tokio/runtime/scheduler/multi_thread/
stats.rs1use crate::runtime::{Config, MetricsBatch, WorkerMetrics};
2
3use std::time::{Duration, Instant};
4
5pub(crate) struct Stats {
8 batch: MetricsBatch,
11
12 processing_scheduled_tasks_started_at: Instant,
17
18 tasks_polled_in_batch: usize,
20
21 task_poll_time_ewma: f64,
27}
28
29const TASK_POLL_TIME_EWMA_ALPHA: f64 = 0.1;
31
32const TARGET_GLOBAL_QUEUE_INTERVAL: f64 = Duration::from_micros(200).as_nanos() as f64;
34
35const MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 127;
37
38const TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL: u32 = 61;
40
41impl Stats {
42 pub(crate) fn new(worker_metrics: &WorkerMetrics) -> Stats {
43 let task_poll_time_ewma =
45 TARGET_GLOBAL_QUEUE_INTERVAL / TARGET_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL as f64;
46
47 Stats {
48 batch: MetricsBatch::new(worker_metrics),
49 processing_scheduled_tasks_started_at: Instant::now(),
50 tasks_polled_in_batch: 0,
51 task_poll_time_ewma,
52 }
53 }
54
55 pub(crate) fn tuned_global_queue_interval(&self, config: &Config) -> u32 {
56 if let Some(configured) = config.global_queue_interval {
58 return configured;
59 }
60
61 let tasks_per_interval = (TARGET_GLOBAL_QUEUE_INTERVAL / self.task_poll_time_ewma) as u32;
63
64 tasks_per_interval.clamp(2, MAX_TASKS_POLLED_PER_GLOBAL_QUEUE_INTERVAL)
67 }
68
69 pub(crate) fn submit(&mut self, to: &WorkerMetrics) {
70 self.batch.submit(to, self.task_poll_time_ewma as u64);
71 }
72
73 pub(crate) fn about_to_park(&mut self) {
74 self.batch.about_to_park();
75 }
76
77 pub(crate) fn unparked(&mut self) {
78 self.batch.unparked();
79 }
80
81 pub(crate) fn inc_local_schedule_count(&mut self) {
82 self.batch.inc_local_schedule_count();
83 }
84
85 pub(crate) fn start_processing_scheduled_tasks(&mut self) {
86 self.batch.start_processing_scheduled_tasks();
87
88 self.processing_scheduled_tasks_started_at = Instant::now();
89 self.tasks_polled_in_batch = 0;
90 }
91
92 pub(crate) fn end_processing_scheduled_tasks(&mut self) {
93 self.batch.end_processing_scheduled_tasks();
94
95 if self.tasks_polled_in_batch > 0 {
97 let now = Instant::now();
98
99 let elapsed = (now - self.processing_scheduled_tasks_started_at).as_nanos() as f64;
102 let num_polls = self.tasks_polled_in_batch as f64;
103
104 let mean_poll_duration = elapsed / num_polls;
106
107 let weighted_alpha = 1.0 - (1.0 - TASK_POLL_TIME_EWMA_ALPHA).powf(num_polls);
109
110 self.task_poll_time_ewma = weighted_alpha * mean_poll_duration
112 + (1.0 - weighted_alpha) * self.task_poll_time_ewma;
113 }
114 }
115
116 pub(crate) fn start_poll(&mut self) {
117 self.batch.start_poll();
118
119 self.tasks_polled_in_batch += 1;
120 }
121
122 pub(crate) fn end_poll(&mut self) {
123 self.batch.end_poll();
124 }
125
126 pub(crate) fn incr_steal_count(&mut self, by: u16) {
127 self.batch.incr_steal_count(by);
128 }
129
130 pub(crate) fn incr_steal_operations(&mut self) {
131 self.batch.incr_steal_operations();
132 }
133
134 pub(crate) fn incr_overflow_count(&mut self) {
135 self.batch.incr_overflow_count();
136 }
137}