virtio_drivers/transport/
mmio.rs

1//! MMIO transport for VirtIO.
2
3use super::{DeviceStatus, DeviceType, DeviceTypeError, Transport};
4use crate::{
5    align_up_phys, queue::Descriptor, transport::InterruptStatus, Error, PhysAddr, PAGE_SIZE,
6    PAGE_SIZE_PHYS,
7};
8use core::{
9    convert::{TryFrom, TryInto},
10    mem::{align_of, size_of},
11    ops::Deref,
12    ptr::NonNull,
13};
14use safe_mmio::{
15    field, field_shared,
16    fields::{ReadPure, ReadPureWrite, WriteOnly},
17    UniqueMmioPointer,
18};
19use zerocopy::{FromBytes, Immutable, IntoBytes};
20
21const MAGIC_VALUE: u32 = 0x7472_6976;
22pub(crate) const LEGACY_VERSION: u32 = 1;
23pub(crate) const MODERN_VERSION: u32 = 2;
24const CONFIG_SPACE_OFFSET: usize = 0x100;
25
26/// The version of the VirtIO MMIO transport supported by a device.
27#[derive(Copy, Clone, Debug, Eq, PartialEq)]
28#[repr(u32)]
29pub enum MmioVersion {
30    /// Legacy MMIO transport with page-based addressing.
31    Legacy = LEGACY_VERSION,
32    /// Modern MMIO transport.
33    Modern = MODERN_VERSION,
34}
35
36impl TryFrom<u32> for MmioVersion {
37    type Error = MmioError;
38
39    fn try_from(version: u32) -> Result<Self, Self::Error> {
40        match version {
41            LEGACY_VERSION => Ok(Self::Legacy),
42            MODERN_VERSION => Ok(Self::Modern),
43            _ => Err(MmioError::UnsupportedVersion(version)),
44        }
45    }
46}
47
48impl From<MmioVersion> for u32 {
49    fn from(version: MmioVersion) -> Self {
50        match version {
51            MmioVersion::Legacy => LEGACY_VERSION,
52            MmioVersion::Modern => MODERN_VERSION,
53        }
54    }
55}
56
57/// An error encountered initialising a VirtIO MMIO transport.
58#[derive(Clone, Debug, Eq, Error, PartialEq)]
59pub enum MmioError {
60    /// The header doesn't start with the expected magic value 0x74726976.
61    #[error("Invalid magic value {0:#010x} (expected 0x74726976)")]
62    BadMagic(u32),
63    /// The header reports a version number that is neither 1 (legacy) nor 2 (modern).
64    #[error("Unsupported Virtio MMIO version {0}")]
65    UnsupportedVersion(u32),
66    /// The header reports a device ID of 0.
67    #[error("Invalid or unknown device ID: {0}")]
68    InvalidDeviceID(DeviceTypeError),
69    /// The MMIO region size was smaller than the header size we expect.
70    #[error("MMIO region too small")]
71    MmioRegionTooSmall,
72}
73
74/// MMIO Device Register Interface, both legacy and modern.
75///
76/// Ref: 4.2.2 MMIO Device Register Layout and 4.2.4 Legacy interface
77#[derive(Debug)]
78#[repr(C)]
79pub struct VirtIOHeader {
80    /// Magic value
81    magic: ReadPure<u32>,
82
83    /// Device version number
84    ///
85    /// Legacy device returns value 0x1.
86    version: ReadPure<u32>,
87
88    /// Virtio Subsystem Device ID
89    device_id: ReadPure<u32>,
90
91    /// Virtio Subsystem Vendor ID
92    vendor_id: ReadPure<u32>,
93
94    /// Flags representing features the device supports
95    device_features: ReadPure<u32>,
96
97    /// Device (host) features word selection
98    device_features_sel: WriteOnly<u32>,
99
100    /// Reserved
101    __r1: [u32; 2],
102
103    /// Flags representing device features understood and activated by the driver
104    driver_features: WriteOnly<u32>,
105
106    /// Activated (guest) features word selection
107    driver_features_sel: WriteOnly<u32>,
108
109    /// Guest page size
110    ///
111    /// The driver writes the guest page size in bytes to the register during
112    /// initialization, before any queues are used. This value should be a
113    /// power of 2 and is used by the device to calculate the Guest address
114    /// of the first queue page (see QueuePFN).
115    legacy_guest_page_size: WriteOnly<u32>,
116
117    /// Reserved
118    __r2: u32,
119
120    /// Virtual queue index
121    ///
122    /// Writing to this register selects the virtual queue that the following
123    /// operations on the QueueNumMax, QueueNum, QueueAlign and QueuePFN
124    /// registers apply to. The index number of the first queue is zero (0x0).
125    queue_sel: WriteOnly<u32>,
126
127    /// Maximum virtual queue size
128    ///
129    /// Reading from the register returns the maximum size of the queue the
130    /// device is ready to process or zero (0x0) if the queue is not available.
131    /// This applies to the queue selected by writing to QueueSel and is
132    /// allowed only when QueuePFN is set to zero (0x0), so when the queue is
133    /// not actively used.
134    queue_num_max: ReadPure<u32>,
135
136    /// Virtual queue size
137    ///
138    /// Queue size is the number of elements in the queue. Writing to this
139    /// register notifies the device what size of the queue the driver will use.
140    /// This applies to the queue selected by writing to QueueSel.
141    queue_num: WriteOnly<u32>,
142
143    /// Used Ring alignment in the virtual queue
144    ///
145    /// Writing to this register notifies the device about alignment boundary
146    /// of the Used Ring in bytes. This value should be a power of 2 and
147    /// applies to the queue selected by writing to QueueSel.
148    legacy_queue_align: WriteOnly<u32>,
149
150    /// Guest physical page number of the virtual queue
151    ///
152    /// Writing to this register notifies the device about location of the
153    /// virtual queue in the Guest’s physical address space. This value is
154    /// the index number of a page starting with the queue Descriptor Table.
155    /// Value zero (0x0) means physical address zero (0x00000000) and is illegal.
156    /// When the driver stops using the queue it writes zero (0x0) to this
157    /// register. Reading from this register returns the currently used page
158    /// number of the queue, therefore a value other than zero (0x0) means that
159    /// the queue is in use. Both read and write accesses apply to the queue
160    /// selected by writing to QueueSel.
161    legacy_queue_pfn: ReadPureWrite<u32>,
162
163    /// new interface only
164    queue_ready: ReadPureWrite<u32>,
165
166    /// Reserved
167    __r3: [u32; 2],
168
169    /// Queue notifier
170    queue_notify: WriteOnly<u32>,
171
172    /// Reserved
173    __r4: [u32; 3],
174
175    /// Interrupt status
176    interrupt_status: ReadPure<u32>,
177
178    /// Interrupt acknowledge
179    interrupt_ack: WriteOnly<u32>,
180
181    /// Reserved
182    __r5: [u32; 2],
183
184    /// Device status
185    ///
186    /// Reading from this register returns the current device status flags.
187    /// Writing non-zero values to this register sets the status flags,
188    /// indicating the OS/driver progress. Writing zero (0x0) to this register
189    /// triggers a device reset. The device sets QueuePFN to zero (0x0) for
190    /// all queues in the device. Also see 3.1 Device Initialization.
191    status: ReadPureWrite<DeviceStatus>,
192
193    /// Reserved
194    __r6: [u32; 3],
195
196    // new interface only since here
197    queue_desc_low: WriteOnly<u32>,
198    queue_desc_high: WriteOnly<u32>,
199
200    /// Reserved
201    __r7: [u32; 2],
202
203    queue_driver_low: WriteOnly<u32>,
204    queue_driver_high: WriteOnly<u32>,
205
206    /// Reserved
207    __r8: [u32; 2],
208
209    queue_device_low: WriteOnly<u32>,
210    queue_device_high: WriteOnly<u32>,
211
212    /// Reserved
213    __r9: [u32; 21],
214
215    config_generation: ReadPure<u32>,
216}
217
218impl VirtIOHeader {
219    /// Constructs a fake VirtIO header for use in unit tests.
220    #[cfg(test)]
221    pub fn make_fake_header(
222        version: u32,
223        device_id: u32,
224        vendor_id: u32,
225        device_features: u32,
226        queue_num_max: u32,
227    ) -> Self {
228        Self {
229            magic: ReadPure(MAGIC_VALUE),
230            version: ReadPure(version),
231            device_id: ReadPure(device_id),
232            vendor_id: ReadPure(vendor_id),
233            device_features: ReadPure(device_features),
234            device_features_sel: WriteOnly::default(),
235            __r1: Default::default(),
236            driver_features: Default::default(),
237            driver_features_sel: Default::default(),
238            legacy_guest_page_size: Default::default(),
239            __r2: Default::default(),
240            queue_sel: Default::default(),
241            queue_num_max: ReadPure(queue_num_max),
242            queue_num: Default::default(),
243            legacy_queue_align: Default::default(),
244            legacy_queue_pfn: Default::default(),
245            queue_ready: Default::default(),
246            __r3: Default::default(),
247            queue_notify: Default::default(),
248            __r4: Default::default(),
249            interrupt_status: Default::default(),
250            interrupt_ack: Default::default(),
251            __r5: Default::default(),
252            status: ReadPureWrite(DeviceStatus::empty()),
253            __r6: Default::default(),
254            queue_desc_low: Default::default(),
255            queue_desc_high: Default::default(),
256            __r7: Default::default(),
257            queue_driver_low: Default::default(),
258            queue_driver_high: Default::default(),
259            __r8: Default::default(),
260            queue_device_low: Default::default(),
261            queue_device_high: Default::default(),
262            __r9: Default::default(),
263            config_generation: Default::default(),
264        }
265    }
266}
267
268/// MMIO Device Register Interface.
269///
270/// Ref: 4.2.2 MMIO Device Register Layout and 4.2.4 Legacy interface
271#[derive(Debug)]
272pub struct MmioTransport<'a> {
273    header: UniqueMmioPointer<'a, VirtIOHeader>,
274    config_space: UniqueMmioPointer<'a, [u8]>,
275    version: MmioVersion,
276    device_type: DeviceType,
277}
278
279impl<'a> MmioTransport<'a> {
280    /// Constructs a new VirtIO MMIO transport, or returns an error if the header reports an
281    /// unsupported version.
282    ///
283    /// # Safety
284    ///
285    /// `header` must point to a properly aligned valid VirtIO MMIO region, which must remain valid
286    /// for the lifetime `'a`. This includes the config space following the header, if any.
287    pub unsafe fn new(header: NonNull<VirtIOHeader>, mmio_size: usize) -> Result<Self, MmioError> {
288        let Some(config_space_size) = mmio_size.checked_sub(CONFIG_SPACE_OFFSET) else {
289            return Err(MmioError::MmioRegionTooSmall);
290        };
291        let config_space = NonNull::slice_from_raw_parts(
292            header.cast::<u8>().byte_add(CONFIG_SPACE_OFFSET),
293            config_space_size,
294        );
295        // SAFETY: The caller promises that the config space following the header is an MMIO region
296        // valid for `'a`.
297        let config_space = unsafe { UniqueMmioPointer::new(config_space) };
298
299        // SAFETY: The caller promises that `header` is a properly aligned MMIO region  valid for
300        // `'a`.
301        let header = UniqueMmioPointer::new(header);
302
303        Self::new_from_unique(header, config_space)
304    }
305
306    /// Constructs a new VirtIO MMIO transport, or returns an error if the header reports an
307    /// unsupported version.
308    pub fn new_from_unique(
309        header: UniqueMmioPointer<'a, VirtIOHeader>,
310        config_space: UniqueMmioPointer<'a, [u8]>,
311    ) -> Result<Self, MmioError> {
312        let magic = field_shared!(header, magic).read();
313        if magic != MAGIC_VALUE {
314            return Err(MmioError::BadMagic(magic));
315        }
316        let device_id = field_shared!(header, device_id).read();
317        let device_type = DeviceType::try_from(device_id).map_err(MmioError::InvalidDeviceID)?;
318        let version = field_shared!(header, version).read().try_into()?;
319        Ok(Self {
320            header,
321            version,
322            device_type,
323            config_space,
324        })
325    }
326
327    /// Gets the version of the VirtIO MMIO transport.
328    pub fn version(&self) -> MmioVersion {
329        self.version
330    }
331
332    /// Gets the vendor ID.
333    pub fn vendor_id(&self) -> u32 {
334        field_shared!(self.header, vendor_id).read()
335    }
336}
337
338// SAFETY: `&MmioTransport` only allows MMIO reads or getting the config space, both of which are
339// fine to happen concurrently on different CPU cores.
340unsafe impl Sync for MmioTransport<'_> {}
341
342impl Transport for MmioTransport<'_> {
343    fn device_type(&self) -> DeviceType {
344        self.device_type
345    }
346
347    fn read_device_features(&mut self) -> u64 {
348        field!(self.header, device_features_sel).write(0); // device features [0, 32)
349        let mut device_features_bits = field_shared!(self.header, device_features).read().into();
350        field!(self.header, device_features_sel).write(1); // device features [32, 64)
351        device_features_bits += (field_shared!(self.header, device_features).read() as u64) << 32;
352        device_features_bits
353    }
354
355    fn write_driver_features(&mut self, driver_features: u64) {
356        field!(self.header, driver_features_sel).write(0); // driver features [0, 32)
357        field!(self.header, driver_features).write(driver_features as u32);
358        field!(self.header, driver_features_sel).write(1); // driver features [32, 64)
359        field!(self.header, driver_features).write((driver_features >> 32) as u32);
360    }
361
362    fn max_queue_size(&mut self, queue: u16) -> u32 {
363        field!(self.header, queue_sel).write(queue.into());
364        field_shared!(self.header, queue_num_max).read()
365    }
366
367    fn notify(&mut self, queue: u16) {
368        field!(self.header, queue_notify).write(queue.into());
369    }
370
371    fn get_status(&self) -> DeviceStatus {
372        field_shared!(self.header, status).read()
373    }
374
375    fn set_status(&mut self, status: DeviceStatus) {
376        field!(self.header, status).write(status);
377    }
378
379    fn set_guest_page_size(&mut self, guest_page_size: u32) {
380        match self.version {
381            MmioVersion::Legacy => {
382                field!(self.header, legacy_guest_page_size).write(guest_page_size);
383            }
384            MmioVersion::Modern => {
385                // No-op, modern devices don't care.
386            }
387        }
388    }
389
390    fn requires_legacy_layout(&self) -> bool {
391        match self.version {
392            MmioVersion::Legacy => true,
393            MmioVersion::Modern => false,
394        }
395    }
396
397    fn queue_set(
398        &mut self,
399        queue: u16,
400        size: u32,
401        descriptors: PhysAddr,
402        driver_area: PhysAddr,
403        device_area: PhysAddr,
404    ) {
405        match self.version {
406            MmioVersion::Legacy => {
407                assert_eq!(
408                    driver_area - descriptors,
409                    size_of::<Descriptor>() as u64 * u64::from(size)
410                );
411                assert_eq!(
412                    device_area - descriptors,
413                    align_up_phys(
414                        size_of::<Descriptor>() as u64 * u64::from(size)
415                            + size_of::<u16>() as u64 * (u64::from(size) + 3)
416                    )
417                );
418                let align = PAGE_SIZE as u32;
419                let pfn = (descriptors / PAGE_SIZE_PHYS).try_into().unwrap();
420                assert_eq!(u64::from(pfn) * PAGE_SIZE_PHYS, descriptors);
421                field!(self.header, queue_sel).write(queue.into());
422                field!(self.header, queue_num).write(size);
423                field!(self.header, legacy_queue_align).write(align);
424                field!(self.header, legacy_queue_pfn).write(pfn);
425            }
426            MmioVersion::Modern => {
427                field!(self.header, queue_sel).write(queue.into());
428                field!(self.header, queue_num).write(size);
429                field!(self.header, queue_desc_low).write(descriptors as u32);
430                field!(self.header, queue_desc_high).write((descriptors >> 32) as u32);
431                field!(self.header, queue_driver_low).write(driver_area as u32);
432                field!(self.header, queue_driver_high).write((driver_area >> 32) as u32);
433                field!(self.header, queue_device_low).write(device_area as u32);
434                field!(self.header, queue_device_high).write((device_area >> 32) as u32);
435                field!(self.header, queue_ready).write(1);
436            }
437        }
438    }
439
440    fn queue_unset(&mut self, queue: u16) {
441        match self.version {
442            MmioVersion::Legacy => {
443                field!(self.header, queue_sel).write(queue.into());
444                field!(self.header, queue_num).write(0);
445                field!(self.header, legacy_queue_align).write(0);
446                field!(self.header, legacy_queue_pfn).write(0);
447            }
448            MmioVersion::Modern => {
449                field!(self.header, queue_sel).write(queue.into());
450
451                field!(self.header, queue_ready).write(0);
452                // Wait until we read the same value back, to ensure synchronisation (see 4.2.2.2).
453                let queue_ready = field_shared!(self.header, queue_ready);
454                while queue_ready.read() != 0 {}
455
456                field!(self.header, queue_num).write(0);
457                field!(self.header, queue_desc_low).write(0);
458                field!(self.header, queue_desc_high).write(0);
459                field!(self.header, queue_driver_low).write(0);
460                field!(self.header, queue_driver_high).write(0);
461                field!(self.header, queue_device_low).write(0);
462                field!(self.header, queue_device_high).write(0);
463            }
464        }
465    }
466
467    fn queue_used(&mut self, queue: u16) -> bool {
468        field!(self.header, queue_sel).write(queue.into());
469        match self.version {
470            MmioVersion::Legacy => field_shared!(self.header, legacy_queue_pfn).read() != 0,
471            MmioVersion::Modern => field_shared!(self.header, queue_ready).read() != 0,
472        }
473    }
474
475    fn ack_interrupt(&mut self) -> InterruptStatus {
476        let interrupt = field_shared!(self.header, interrupt_status).read();
477        if interrupt != 0 {
478            field!(self.header, interrupt_ack).write(interrupt);
479            InterruptStatus::from_bits_truncate(interrupt)
480        } else {
481            InterruptStatus::empty()
482        }
483    }
484
485    fn read_config_generation(&self) -> u32 {
486        field_shared!(self.header, config_generation).read()
487    }
488
489    fn read_config_space<T: FromBytes + IntoBytes>(&self, offset: usize) -> Result<T, Error> {
490        assert!(align_of::<T>() <= 4,
491            "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
492            align_of::<T>());
493        assert!(offset % align_of::<T>() == 0);
494
495        if self.config_space.len() < offset + size_of::<T>() {
496            Err(Error::ConfigSpaceTooSmall)
497        } else {
498            // SAFETY: The caller of `MmioTransport::new` guaranteed that the header pointer was
499            // valid, including the config space. We have checked that the value is properly aligned
500            // for `T` and within the bounds of the config space. Reading the config space shouldn't
501            // have side-effects.
502            unsafe {
503                let ptr = self.config_space.ptr().cast::<T>().byte_add(offset);
504                Ok(self
505                    .config_space
506                    .deref()
507                    .child(NonNull::new(ptr.cast_mut()).unwrap())
508                    .read_unsafe())
509            }
510        }
511    }
512
513    fn write_config_space<T: IntoBytes + Immutable>(
514        &mut self,
515        offset: usize,
516        value: T,
517    ) -> Result<(), Error> {
518        assert!(align_of::<T>() <= 4,
519            "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
520            align_of::<T>());
521        assert!(offset % align_of::<T>() == 0);
522
523        if self.config_space.len() < offset + size_of::<T>() {
524            Err(Error::ConfigSpaceTooSmall)
525        } else {
526            // SAFETY: The caller of `MmioTransport::new` guaranteed that the header pointer was
527            // valid, including the config space. We have checked that the value is properly aligned
528            // for `T` and within the bounds of the config space.
529            unsafe {
530                let ptr = self.config_space.ptr_nonnull().cast::<T>().byte_add(offset);
531                self.config_space.child(ptr).write_unsafe(value);
532            }
533            Ok(())
534        }
535    }
536}
537
538impl Drop for MmioTransport<'_> {
539    fn drop(&mut self) {
540        // Reset the device when the transport is dropped.
541        self.set_status(DeviceStatus::empty())
542    }
543}