shadow_rs/core/
runahead.rs

1use std::sync::RwLock;
2
3use shadow_shim_helper_rs::simulation_time::SimulationTime;
4
5/// Decides on the runahead for the next simulation round (the duration of the round).
6///
7/// Having a larger runahead improves performance since more hosts and more events can be run in
8/// parallel during a simulation round, but if the runahead is too large then packets will be
9/// delayed until the next simulation round which is beyond their intended latency. This uses a
10/// fixed runahead of the provided minimum possible latency when dynamic runahead is disabled, and
11/// otherwise uses a dynamic runahead of the minimum used latency. Both runahead calculations have a
12/// static lower bound.
13#[derive(Debug)]
14pub struct Runahead {
15    /// The lowest packet latency that shadow has used so far in the simulation. For performance, is
16    /// only updated if dynamic runahead is enabled for the simulation.
17    min_used_latency: RwLock<Option<SimulationTime>>,
18    /// The lowest latency that's possible in the simulation (the graph edge with the lowest
19    /// latency).
20    min_possible_latency: SimulationTime,
21    /// A lower bound for the runahead as specified by the user.
22    min_runahead_config: Option<SimulationTime>,
23    /// Is dynamic runahead enabled?
24    is_runahead_dynamic: bool,
25}
26
27impl Runahead {
28    pub fn new(
29        is_runahead_dynamic: bool,
30        min_possible_latency: SimulationTime,
31        min_runahead_config: Option<SimulationTime>,
32    ) -> Self {
33        assert!(!min_possible_latency.is_zero());
34
35        Self {
36            min_used_latency: RwLock::new(None),
37            min_possible_latency,
38            min_runahead_config,
39            is_runahead_dynamic,
40        }
41    }
42
43    /// Get the runahead for the next round.
44    pub fn get(&self) -> SimulationTime {
45        // If the 'min_used_latency' is None, we haven't yet been given a latency value to base our
46        // runahead off of (or dynamic runahead is disabled). We use the smallest possible latency
47        // to start.
48        let runahead = self
49            .min_used_latency
50            .read()
51            .unwrap()
52            .unwrap_or(self.min_possible_latency);
53
54        // the 'runahead' config option sets a lower bound for the runahead
55        let runahead_config = self.min_runahead_config.unwrap_or(SimulationTime::ZERO);
56        std::cmp::max(runahead, runahead_config)
57    }
58
59    /// If dynamic runahead is enabled, will compare and update the stored lowest packet latency.
60    /// This may shorten the runahead for future rounds.
61    pub fn update_lowest_used_latency(&self, latency: SimulationTime) {
62        assert!(latency > SimulationTime::ZERO);
63
64        // if dynamic runahead is disabled, we don't update 'min_used_latency'
65        if !self.is_runahead_dynamic {
66            return;
67        }
68
69        // helper function for checking if we should update the min_used_latency
70        let should_update = |min_used_latency: &Option<SimulationTime>| {
71            if let Some(min_used_latency) = min_used_latency {
72                if latency >= *min_used_latency {
73                    return false;
74                }
75            }
76            // true if runahead was never set before, or new latency is smaller than the old latency
77            true
78        };
79
80        // an initial check with only a read lock
81        {
82            let min_used_latency = self.min_used_latency.read().unwrap();
83
84            if !should_update(&min_used_latency) {
85                return;
86            }
87        }
88
89        let old_runahead;
90        let min_runahead_config;
91
92        // check the same condition again, but with a write lock
93        {
94            let mut min_used_latency = self.min_used_latency.write().unwrap();
95
96            if !should_update(&min_used_latency) {
97                return;
98            }
99
100            // cache the values for logging
101            old_runahead = *min_used_latency;
102            min_runahead_config = self.min_runahead_config;
103
104            // update the min runahead
105            *min_used_latency = Some(latency);
106        }
107
108        // these info messages may appear out-of-order in the log
109        log::info!(
110            "Minimum time runahead for next scheduling round updated from {:?} \
111             to {} ns; the minimum config override is {:?} ns",
112            old_runahead.map(|x| x.as_nanos()),
113            latency.as_nanos(),
114            min_runahead_config.map(|x| x.as_nanos())
115        );
116    }
117}