shadow_rs/core/runahead.rs
1use std::sync::RwLock;
2
3use shadow_shim_helper_rs::simulation_time::SimulationTime;
4
5/// Decides on the runahead for the next simulation round (the duration of the round).
6///
7/// Having a larger runahead improves performance since more hosts and more events can be run in
8/// parallel during a simulation round, but if the runahead is too large then packets will be
9/// delayed until the next simulation round which is beyond their intended latency. This uses a
10/// fixed runahead of the provided minimum possible latency when dynamic runahead is disabled, and
11/// otherwise uses a dynamic runahead of the minimum used latency. Both runahead calculations have a
12/// static lower bound.
13#[derive(Debug)]
14pub struct Runahead {
15 /// The lowest packet latency that shadow has used so far in the simulation. For performance, is
16 /// only updated if dynamic runahead is enabled for the simulation.
17 min_used_latency: RwLock<Option<SimulationTime>>,
18 /// The lowest latency that's possible in the simulation (the graph edge with the lowest
19 /// latency).
20 min_possible_latency: SimulationTime,
21 /// A lower bound for the runahead as specified by the user.
22 min_runahead_config: Option<SimulationTime>,
23 /// Is dynamic runahead enabled?
24 is_runahead_dynamic: bool,
25}
26
27impl Runahead {
28 pub fn new(
29 is_runahead_dynamic: bool,
30 min_possible_latency: SimulationTime,
31 min_runahead_config: Option<SimulationTime>,
32 ) -> Self {
33 assert!(!min_possible_latency.is_zero());
34
35 Self {
36 min_used_latency: RwLock::new(None),
37 min_possible_latency,
38 min_runahead_config,
39 is_runahead_dynamic,
40 }
41 }
42
43 /// Get the runahead for the next round.
44 pub fn get(&self) -> SimulationTime {
45 // If the 'min_used_latency' is None, we haven't yet been given a latency value to base our
46 // runahead off of (or dynamic runahead is disabled). We use the smallest possible latency
47 // to start.
48 let runahead = self
49 .min_used_latency
50 .read()
51 .unwrap()
52 .unwrap_or(self.min_possible_latency);
53
54 // the 'runahead' config option sets a lower bound for the runahead
55 let runahead_config = self.min_runahead_config.unwrap_or(SimulationTime::ZERO);
56 std::cmp::max(runahead, runahead_config)
57 }
58
59 /// If dynamic runahead is enabled, will compare and update the stored lowest packet latency.
60 /// This may shorten the runahead for future rounds.
61 pub fn update_lowest_used_latency(&self, latency: SimulationTime) {
62 assert!(latency > SimulationTime::ZERO);
63
64 // if dynamic runahead is disabled, we don't update 'min_used_latency'
65 if !self.is_runahead_dynamic {
66 return;
67 }
68
69 // helper function for checking if we should update the min_used_latency
70 let should_update = |min_used_latency: &Option<SimulationTime>| {
71 if let Some(min_used_latency) = min_used_latency
72 && latency >= *min_used_latency
73 {
74 return false;
75 }
76
77 // true if runahead was never set before, or new latency is smaller than the old latency
78 true
79 };
80
81 // an initial check with only a read lock
82 {
83 let min_used_latency = self.min_used_latency.read().unwrap();
84
85 if !should_update(&min_used_latency) {
86 return;
87 }
88 }
89
90 let old_runahead;
91 let min_runahead_config;
92
93 // check the same condition again, but with a write lock
94 {
95 let mut min_used_latency = self.min_used_latency.write().unwrap();
96
97 if !should_update(&min_used_latency) {
98 return;
99 }
100
101 // cache the values for logging
102 old_runahead = *min_used_latency;
103 min_runahead_config = self.min_runahead_config;
104
105 // update the min runahead
106 *min_used_latency = Some(latency);
107 }
108
109 // these info messages may appear out-of-order in the log
110 log::info!(
111 "Minimum time runahead for next scheduling round updated from {:?} \
112 to {} ns; the minimum config override is {:?} ns",
113 old_runahead.map(|x| x.as_nanos()),
114 latency.as_nanos(),
115 min_runahead_config.map(|x| x.as_nanos())
116 );
117 }
118}