tokio/runtime/scheduler/inject/
rt_multi_thread.rs1use super::{Shared, Synced};
2
3use crate::runtime::scheduler::Lock;
4use crate::runtime::task;
5
6use std::sync::atomic::Ordering::Release;
7
8impl<'a> Lock<Synced> for &'a mut Synced {
9 type Handle = &'a mut Synced;
10
11 fn lock(self) -> Self::Handle {
12 self
13 }
14}
15
16impl AsMut<Synced> for Synced {
17 fn as_mut(&mut self) -> &mut Synced {
18 self
19 }
20}
21
22impl<T: 'static> Shared<T> {
23 #[inline]
29 pub(crate) unsafe fn push_batch<L, I>(&self, shared: L, mut iter: I)
30 where
31 L: Lock<Synced>,
32 I: Iterator<Item = task::Notified<T>>,
33 {
34 let first = match iter.next() {
35 Some(first) => first.into_raw(),
36 None => return,
37 };
38
39 let mut prev = first;
41 let mut counter = 1;
42
43 iter.for_each(|next| {
47 let next = next.into_raw();
48
49 unsafe { prev.set_queue_next(Some(next)) };
52 prev = next;
53 counter += 1;
54 });
55
56 self.push_batch_inner(shared, first, prev, counter);
59 }
60
61 #[inline]
66 unsafe fn push_batch_inner<L>(
67 &self,
68 shared: L,
69 batch_head: task::RawTask,
70 batch_tail: task::RawTask,
71 num: usize,
72 ) where
73 L: Lock<Synced>,
74 {
75 debug_assert!(unsafe { batch_tail.get_queue_next().is_none() });
76
77 let mut synced = shared.lock();
78
79 if synced.as_mut().is_closed {
80 drop(synced);
81
82 let mut curr = Some(batch_head);
83
84 while let Some(task) = curr {
85 curr = task.get_queue_next();
86
87 let _ = unsafe { task::Notified::<T>::from_raw(task) };
88 }
89
90 return;
91 }
92
93 let synced = synced.as_mut();
94
95 if let Some(tail) = synced.tail {
96 unsafe {
97 tail.set_queue_next(Some(batch_head));
98 }
99 } else {
100 synced.head = Some(batch_head);
101 }
102
103 synced.tail = Some(batch_tail);
104
105 let len = self.len.unsync_load();
110
111 self.len.store(len + num, Release);
112 }
113}