Skip to main content

virtio_drivers/device/net/
dev_raw.rs

1use super::{Config, EthernetAddress, Features, VirtioNetHdr, VirtioNetHdrLegacy};
2use super::{MIN_BUFFER_LEN, QUEUE_RECEIVE, QUEUE_TRANSMIT, SUPPORTED_FEATURES};
3use crate::config::read_config;
4use crate::hal::Hal;
5use crate::queue::VirtQueue;
6use crate::transport::{InterruptStatus, Transport};
7use crate::{Error, Result};
8use core::mem::size_of;
9use log::{debug, info, warn};
10use zerocopy::IntoBytes;
11
12/// Raw driver for a VirtIO network device.
13///
14/// This is a raw version of the VirtIONet driver. It provides non-blocking
15/// methods for transmitting and receiving raw slices, without the buffer
16/// management. For more higher-level functions such as receive buffer backing,
17/// see [`VirtIONet`].
18///
19/// [`VirtIONet`]: super::VirtIONet
20pub struct VirtIONetRaw<H: Hal, T: Transport, const QUEUE_SIZE: usize> {
21    transport: T,
22    mac: EthernetAddress,
23    recv_queue: VirtQueue<H, QUEUE_SIZE>,
24    send_queue: VirtQueue<H, QUEUE_SIZE>,
25    /// Whether `num_buffers` is missing in the `virtio_net_hdr` struct.
26    pub(crate) legacy_header: bool,
27}
28
29impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> VirtIONetRaw<H, T, QUEUE_SIZE> {
30    /// Create a new VirtIO-Net driver.
31    pub fn new(mut transport: T) -> Result<Self> {
32        let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
33        info!("negotiated_features {:?}", negotiated_features);
34
35        // Read configuration space.
36        let mac = transport.read_consistent(|| read_config!(transport, Config, mac))?;
37        let status = read_config!(transport, Config, status)?;
38        debug!("Got MAC={:02x?}, status={:?}", mac, status);
39
40        let send_queue = VirtQueue::new(
41            &mut transport,
42            QUEUE_TRANSMIT,
43            negotiated_features.contains(Features::RING_INDIRECT_DESC),
44            negotiated_features.contains(Features::RING_EVENT_IDX),
45        )?;
46        let recv_queue = VirtQueue::new(
47            &mut transport,
48            QUEUE_RECEIVE,
49            negotiated_features.contains(Features::RING_INDIRECT_DESC),
50            negotiated_features.contains(Features::RING_EVENT_IDX),
51        )?;
52
53        transport.finish_init();
54
55        Ok(VirtIONetRaw {
56            transport,
57            mac,
58            recv_queue,
59            send_queue,
60            legacy_header: !negotiated_features.contains(Features::VERSION_1)
61                && !negotiated_features.contains(Features::MRG_RXBUF),
62        })
63    }
64
65    /// Acknowledge interrupt.
66    pub fn ack_interrupt(&mut self) -> InterruptStatus {
67        self.transport.ack_interrupt()
68    }
69
70    /// Disable interrupts.
71    pub fn disable_interrupts(&mut self) {
72        self.send_queue.set_dev_notify(false);
73        self.recv_queue.set_dev_notify(false);
74    }
75
76    /// Enable interrupts.
77    pub fn enable_interrupts(&mut self) {
78        self.send_queue.set_dev_notify(true);
79        self.recv_queue.set_dev_notify(true);
80    }
81
82    /// Get MAC address.
83    pub fn mac_address(&self) -> EthernetAddress {
84        self.mac
85    }
86
87    /// Whether can send packet.
88    pub fn can_send(&self) -> bool {
89        self.send_queue.available_desc() >= 2
90    }
91
92    /// Whether the length of the receive buffer is valid.
93    fn check_rx_buf_len(rx_buf: &[u8]) -> Result<()> {
94        if rx_buf.len() < MIN_BUFFER_LEN {
95            warn!("Receive buffer len {} is too small", rx_buf.len());
96            Err(Error::InvalidParam)
97        } else {
98            Ok(())
99        }
100    }
101
102    /// Whether the length of the transmit buffer is valid.
103    fn check_tx_buf_len(&self, tx_buf: &[u8]) -> Result<()> {
104        let hdr_size = if self.legacy_header {
105            size_of::<VirtioNetHdrLegacy>()
106        } else {
107            size_of::<VirtioNetHdr>()
108        };
109        if tx_buf.len() < hdr_size {
110            warn!("Transmit buffer len {} is too small", tx_buf.len());
111            Err(Error::InvalidParam)
112        } else {
113            Ok(())
114        }
115    }
116
117    /// Fill the header of the `buffer` with [`VirtioNetHdr`].
118    ///
119    /// If the `buffer` is not large enough, it returns [`Error::InvalidParam`].
120    pub fn fill_buffer_header(&self, buffer: &mut [u8]) -> Result<usize> {
121        macro_rules! fill {
122            ($hdr:ty) => {{
123                if buffer.len() < size_of::<$hdr>() {
124                    return Err(Error::InvalidParam);
125                }
126                let header = <$hdr>::default();
127                buffer[..size_of::<$hdr>()].copy_from_slice(header.as_bytes());
128                Ok(size_of::<$hdr>())
129            }};
130        }
131        if self.legacy_header {
132            fill!(VirtioNetHdrLegacy)
133        } else {
134            fill!(VirtioNetHdr)
135        }
136    }
137
138    /// Submits a request to transmit a buffer immediately without waiting for
139    /// the transmission to complete.
140    ///
141    /// It will submit request to the VirtIO net device and return a token
142    /// identifying the position of the first descriptor in the chain. If there
143    /// are not enough descriptors to allocate, then it returns
144    /// [`Error::QueueFull`].
145    ///
146    /// The caller needs to fill the `tx_buf` with a header by calling
147    /// [`fill_buffer_header`] before transmission. Then it calls [`poll_transmit`]
148    /// with the returned token to check whether the device has finished handling
149    /// the request. Once it has, the caller must call [`transmit_complete`] with
150    /// the same buffer before reading the result (transmitted length).
151    ///
152    /// # Safety
153    ///
154    /// `tx_buf` is still borrowed by the underlying VirtIO net device even after
155    /// this method returns. Thus, it is the caller's responsibility to guarantee
156    /// that they are not accessed before the request is completed in order to
157    /// avoid data races.
158    ///
159    /// [`fill_buffer_header`]: Self::fill_buffer_header
160    /// [`poll_transmit`]: Self::poll_transmit
161    /// [`transmit_complete`]: Self::transmit_complete
162    pub unsafe fn transmit_begin(&mut self, tx_buf: &[u8]) -> Result<u16> {
163        self.check_tx_buf_len(tx_buf)?;
164        // SAFETY: The caller promises that `tx_buf` is not accessed before the request completes.
165        let token = unsafe { self.send_queue.add(&[tx_buf], &mut [])? };
166        if self.send_queue.should_notify() {
167            self.transport.notify(QUEUE_TRANSMIT);
168        }
169        Ok(token)
170    }
171
172    /// Fetches the token of the next completed transmission request from the
173    /// used ring and returns it, without removing it from the used ring. If
174    /// there are no pending completed requests it returns [`None`].
175    pub fn poll_transmit(&mut self) -> Option<u16> {
176        self.send_queue.peek_used()
177    }
178
179    /// Completes a transmission operation which was started by [`transmit_begin`].
180    /// Returns number of bytes transmitted.
181    ///
182    /// # Safety
183    ///
184    /// The same buffer must be passed in again as was passed to
185    /// [`transmit_begin`] when it returned the token.
186    ///
187    /// [`transmit_begin`]: Self::transmit_begin
188    pub unsafe fn transmit_complete(&mut self, token: u16, tx_buf: &[u8]) -> Result<usize> {
189        // SAFETY: The caller promises that `tx_buf` is the same one passed to the corresponding
190        // call to `transmit_begin`.
191        let len = unsafe { self.send_queue.pop_used(token, &[tx_buf], &mut [])? };
192        Ok(len as usize)
193    }
194
195    /// Submits a request to receive a buffer immediately without waiting for
196    /// the reception to complete.
197    ///
198    /// It will submit request to the VirtIO net device and return a token
199    /// identifying the position of the first descriptor in the chain. If there
200    /// are not enough descriptors to allocate, then it returns
201    /// [`Error::QueueFull`].
202    ///
203    /// The caller can then call [`poll_receive`] with the returned token to
204    /// check whether the device has finished handling the request. Once it has,
205    /// the caller must call [`receive_complete`] with the same buffer before
206    /// reading the response.
207    ///
208    /// # Safety
209    ///
210    /// `rx_buf` is still borrowed by the underlying VirtIO net device even after
211    /// this method returns. Thus, it is the caller's responsibility to guarantee
212    /// that they are not accessed before the request is completed in order to
213    /// avoid data races.
214    ///
215    /// [`poll_receive`]: Self::poll_receive
216    /// [`receive_complete`]: Self::receive_complete
217    pub unsafe fn receive_begin(&mut self, rx_buf: &mut [u8]) -> Result<u16> {
218        Self::check_rx_buf_len(rx_buf)?;
219        // SAFETY: The caller promises that `rx_buf` is not accessed before the request completes.
220        let token = unsafe { self.recv_queue.add(&[], &mut [rx_buf])? };
221        if self.recv_queue.should_notify() {
222            self.transport.notify(QUEUE_RECEIVE);
223        }
224        Ok(token)
225    }
226
227    /// Fetches the token of the next completed reception request from the
228    /// used ring and returns it, without removing it from the used ring. If
229    /// there are no pending completed requests it returns [`None`].
230    pub fn poll_receive(&self) -> Option<u16> {
231        self.recv_queue.peek_used()
232    }
233
234    /// Completes a transmission operation which was started by [`receive_begin`].
235    ///
236    /// After completion, the `rx_buf` will contain a header followed by the
237    /// received packet. It returns the length of the header and the length of
238    /// the packet.
239    ///
240    /// # Safety
241    ///
242    /// The same buffer must be passed in again as was passed to
243    /// [`receive_begin`] when it returned the token.
244    ///
245    /// [`receive_begin`]: Self::receive_begin
246    pub unsafe fn receive_complete(
247        &mut self,
248        token: u16,
249        rx_buf: &mut [u8],
250    ) -> Result<(usize, usize)> {
251        // SAFETY: The caller promises that `rx_buf` is the same one passed to the corresponding
252        // call to `receive_begin`.
253        let len = unsafe { self.recv_queue.pop_used(token, &[], &mut [rx_buf])? } as usize;
254        let hdr_size = if self.legacy_header {
255            size_of::<VirtioNetHdrLegacy>()
256        } else {
257            size_of::<VirtioNetHdr>()
258        };
259        let packet_len = len.checked_sub(hdr_size).ok_or(Error::IoError)?;
260        Ok((hdr_size, packet_len))
261    }
262
263    /// Sends a packet to the network, and blocks until the request completed.
264    pub fn send(&mut self, tx_buf: &[u8]) -> Result {
265        macro_rules! send {
266            ($header:expr) => {{
267                let header = $header;
268                if tx_buf.is_empty() {
269                    // Special case sending an empty packet, to avoid adding an empty buffer to the
270                    // virtqueue.
271                    self.send_queue.add_notify_wait_pop(
272                        &[header.as_bytes()],
273                        &mut [],
274                        &mut self.transport,
275                    )?;
276                } else {
277                    self.send_queue.add_notify_wait_pop(
278                        &[header.as_bytes(), tx_buf],
279                        &mut [],
280                        &mut self.transport,
281                    )?;
282                }
283            }};
284        }
285        if self.legacy_header {
286            send!(VirtioNetHdrLegacy::default())
287        } else {
288            send!(VirtioNetHdr::default())
289        };
290        Ok(())
291    }
292
293    /// Blocks and waits for a packet to be received.
294    ///
295    /// After completion, the `rx_buf` will contain a header followed by the
296    /// received packet. It returns the length of the header and the length of
297    /// the packet.
298    pub fn receive_wait(&mut self, rx_buf: &mut [u8]) -> Result<(usize, usize)> {
299        // SAFETY: After calling `receive_begin`, `rx_buf` is not accessed
300        // until calling `receive_complete` when the request is complete.
301        let token = unsafe { self.receive_begin(rx_buf)? };
302        while self.poll_receive().is_none() {
303            core::hint::spin_loop();
304        }
305        // SAFETY: This `rx_buf` is the same one passed to `receive_begin`.
306        unsafe { self.receive_complete(token, rx_buf) }
307    }
308}
309
310impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> Drop for VirtIONetRaw<H, T, QUEUE_SIZE> {
311    fn drop(&mut self) {
312        // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
313        // after they have been freed.
314        self.transport.queue_unset(QUEUE_RECEIVE);
315        self.transport.queue_unset(QUEUE_TRANSMIT);
316    }
317}