virtio_drivers/queue/owning.rs
1use super::VirtQueue;
2use crate::{transport::Transport, Error, Hal, Result};
3use alloc::boxed::Box;
4use core::convert::TryInto;
5use core::ptr::{null_mut, NonNull};
6use zerocopy::FromZeroes;
7
8/// A wrapper around [`Queue`] that owns all the buffers that are passed to the queue.
9#[derive(Debug)]
10pub struct OwningQueue<H: Hal, const SIZE: usize, const BUFFER_SIZE: usize> {
11 queue: VirtQueue<H, SIZE>,
12 buffers: [NonNull<[u8; BUFFER_SIZE]>; SIZE],
13}
14
15impl<H: Hal, const SIZE: usize, const BUFFER_SIZE: usize> OwningQueue<H, SIZE, BUFFER_SIZE> {
16 /// Constructs a new `OwningQueue` wrapping around the given `VirtQueue`.
17 ///
18 /// This will allocate `SIZE` buffers of `BUFFER_SIZE` bytes each and add them to the queue.
19 ///
20 /// The caller is responsible for notifying the device if `should_notify` returns true.
21 pub fn new(mut queue: VirtQueue<H, SIZE>) -> Result<Self> {
22 let mut buffers = [null_mut(); SIZE];
23 for (i, queue_buffer) in buffers.iter_mut().enumerate() {
24 let mut buffer: Box<[u8; BUFFER_SIZE]> = FromZeroes::new_box_zeroed();
25 // SAFETY: The buffer lives as long as the queue, as specified in the function safety
26 // requirement, and we don't access it until it is popped.
27 let token = unsafe { queue.add(&[], &mut [buffer.as_mut_slice()]) }?;
28 assert_eq!(i, token.into());
29 *queue_buffer = Box::into_raw(buffer);
30 }
31 let buffers = buffers.map(|ptr| NonNull::new(ptr).unwrap());
32
33 Ok(Self { queue, buffers })
34 }
35
36 /// Returns whether the driver should notify the device after adding a new buffer to the
37 /// virtqueue.
38 ///
39 /// This will be false if the device has supressed notifications.
40 pub fn should_notify(&self) -> bool {
41 self.queue.should_notify()
42 }
43
44 /// Tells the device whether to send used buffer notifications.
45 pub fn set_dev_notify(&mut self, enable: bool) {
46 self.queue.set_dev_notify(enable);
47 }
48
49 /// Adds the buffer at the given index in `buffers` back to the queue.
50 ///
51 /// Automatically notifies the device if required.
52 ///
53 /// # Safety
54 ///
55 /// The buffer must not currently be in the RX queue, and no other references to it must exist
56 /// between when this method is called and when it is popped from the queue.
57 unsafe fn add_buffer_to_queue(&mut self, index: u16, transport: &mut impl Transport) -> Result {
58 // SAFETY: The buffer lives as long as the queue, and the caller guarantees that it's not
59 // currently in the queue or referred to anywhere else until it is popped.
60 unsafe {
61 let buffer = self
62 .buffers
63 .get_mut(usize::from(index))
64 .ok_or(Error::WrongToken)?
65 .as_mut();
66 let new_token = self.queue.add(&[], &mut [buffer])?;
67 // If the RX buffer somehow gets assigned a different token, then our safety assumptions
68 // are broken and we can't safely continue to do anything with the device.
69 assert_eq!(new_token, index);
70 }
71
72 if self.queue.should_notify() {
73 transport.notify(self.queue.queue_idx);
74 }
75
76 Ok(())
77 }
78
79 fn pop(&mut self) -> Result<Option<(&[u8], u16)>> {
80 let Some(token) = self.queue.peek_used() else {
81 return Ok(None);
82 };
83
84 // SAFETY: The device has told us it has finished using the buffer, and there are no other
85 // references to it.
86 let buffer = unsafe { self.buffers[usize::from(token)].as_mut() };
87 // SAFETY: We maintain a consistent mapping of tokens to buffers, so we pass the same buffer
88 // to `pop_used` as we previously passed to `add` for the token. Once we add the buffer back
89 // to the RX queue then we don't access it again until next time it is popped.
90 let len = unsafe { self.queue.pop_used(token, &[], &mut [buffer])? }
91 .try_into()
92 .unwrap();
93
94 Ok(Some((&buffer[0..len], token)))
95 }
96
97 /// Checks whether there are any buffers which the device has marked as used so the driver
98 /// should now process. If so, passes the first one to the `handle` function and then adds it
99 /// back to the queue.
100 ///
101 /// Returns an error if there is an error accessing the queue or `handler` returns an error.
102 /// Returns `Ok(None)` if there are no pending buffers to handle, or if `handler` returns
103 /// `Ok(None)`.
104 ///
105 /// If `handler` panics then the buffer will not be added back to the queue, so this should be
106 /// avoided.
107 pub fn poll<T>(
108 &mut self,
109 transport: &mut impl Transport,
110 handler: impl FnOnce(&[u8]) -> Result<Option<T>>,
111 ) -> Result<Option<T>> {
112 let Some((buffer, token)) = self.pop()? else {
113 return Ok(None);
114 };
115
116 let result = handler(buffer);
117
118 // SAFETY: The buffer was just popped from the queue so it's not in it, and there won't be
119 // any other references until next time it's popped.
120 unsafe {
121 self.add_buffer_to_queue(token, transport)?;
122 }
123
124 result
125 }
126}
127
128// SAFETY: The `buffers` can be accessed from any thread.
129unsafe impl<H: Hal, const SIZE: usize, const BUFFER_SIZE: usize> Send
130 for OwningQueue<H, SIZE, BUFFER_SIZE>
131where
132 VirtQueue<H, SIZE>: Send,
133{
134}
135
136// SAFETY: An `&OwningQueue` only allows calling `should_notify`.
137unsafe impl<H: Hal, const SIZE: usize, const BUFFER_SIZE: usize> Sync
138 for OwningQueue<H, SIZE, BUFFER_SIZE>
139where
140 VirtQueue<H, SIZE>: Sync,
141{
142}
143
144impl<H: Hal, const SIZE: usize, const BUFFER_SIZE: usize> Drop
145 for OwningQueue<H, SIZE, BUFFER_SIZE>
146{
147 fn drop(&mut self) {
148 for buffer in self.buffers {
149 // Safe because we obtained the buffer pointer from Box::into_raw, and it won't be used
150 // anywhere else after the queue is destroyed.
151 unsafe { drop(Box::from_raw(buffer.as_ptr())) };
152 }
153 }
154}