Skip to main content

virtio_drivers/transport/
mmio.rs

1//! MMIO transport for VirtIO.
2
3use super::{DeviceStatus, DeviceType, DeviceTypeError, Transport};
4use crate::{
5    Error, PAGE_SIZE, PAGE_SIZE_PHYS, PhysAddr, align_up_phys, queue::Descriptor,
6    transport::InterruptStatus,
7};
8use core::{
9    convert::{TryFrom, TryInto},
10    mem::{align_of, size_of},
11    ops::Deref,
12    ptr::NonNull,
13};
14use safe_mmio::{
15    UniqueMmioPointer, field, field_shared,
16    fields::{ReadPure, ReadPureWrite, WriteOnly},
17};
18use zerocopy::{FromBytes, Immutable, IntoBytes};
19
20const MAGIC_VALUE: u32 = 0x7472_6976;
21pub(crate) const LEGACY_VERSION: u32 = 1;
22pub(crate) const MODERN_VERSION: u32 = 2;
23const CONFIG_SPACE_OFFSET: usize = 0x100;
24
25/// The version of the VirtIO MMIO transport supported by a device.
26#[derive(Copy, Clone, Debug, Eq, PartialEq)]
27#[repr(u32)]
28pub enum MmioVersion {
29    /// Legacy MMIO transport with page-based addressing.
30    Legacy = LEGACY_VERSION,
31    /// Modern MMIO transport.
32    Modern = MODERN_VERSION,
33}
34
35impl TryFrom<u32> for MmioVersion {
36    type Error = MmioError;
37
38    fn try_from(version: u32) -> Result<Self, Self::Error> {
39        match version {
40            LEGACY_VERSION => Ok(Self::Legacy),
41            MODERN_VERSION => Ok(Self::Modern),
42            _ => Err(MmioError::UnsupportedVersion(version)),
43        }
44    }
45}
46
47impl From<MmioVersion> for u32 {
48    fn from(version: MmioVersion) -> Self {
49        match version {
50            MmioVersion::Legacy => LEGACY_VERSION,
51            MmioVersion::Modern => MODERN_VERSION,
52        }
53    }
54}
55
56/// An error encountered initialising a VirtIO MMIO transport.
57#[derive(Clone, Debug, Eq, Error, PartialEq)]
58pub enum MmioError {
59    /// The header doesn't start with the expected magic value 0x74726976.
60    #[error("Invalid magic value {0:#010x} (expected 0x74726976)")]
61    BadMagic(u32),
62    /// The header reports a version number that is neither 1 (legacy) nor 2 (modern).
63    #[error("Unsupported Virtio MMIO version {0}")]
64    UnsupportedVersion(u32),
65    /// The header reports a device ID of 0.
66    #[error("Invalid or unknown device ID: {0}")]
67    InvalidDeviceID(DeviceTypeError),
68    /// The MMIO region size was smaller than the header size we expect.
69    #[error("MMIO region too small")]
70    MmioRegionTooSmall,
71}
72
73/// MMIO Device Register Interface, both legacy and modern.
74///
75/// Ref: 4.2.2 MMIO Device Register Layout and 4.2.4 Legacy interface
76#[derive(Debug)]
77#[repr(C)]
78pub struct VirtIOHeader {
79    /// Magic value
80    magic: ReadPure<u32>,
81
82    /// Device version number
83    ///
84    /// Legacy device returns value 0x1.
85    version: ReadPure<u32>,
86
87    /// Virtio Subsystem Device ID
88    device_id: ReadPure<u32>,
89
90    /// Virtio Subsystem Vendor ID
91    vendor_id: ReadPure<u32>,
92
93    /// Flags representing features the device supports
94    device_features: ReadPure<u32>,
95
96    /// Device (host) features word selection
97    device_features_sel: WriteOnly<u32>,
98
99    /// Reserved
100    __r1: [u32; 2],
101
102    /// Flags representing device features understood and activated by the driver
103    driver_features: WriteOnly<u32>,
104
105    /// Activated (guest) features word selection
106    driver_features_sel: WriteOnly<u32>,
107
108    /// Guest page size
109    ///
110    /// The driver writes the guest page size in bytes to the register during
111    /// initialization, before any queues are used. This value should be a
112    /// power of 2 and is used by the device to calculate the Guest address
113    /// of the first queue page (see QueuePFN).
114    legacy_guest_page_size: WriteOnly<u32>,
115
116    /// Reserved
117    __r2: u32,
118
119    /// Virtual queue index
120    ///
121    /// Writing to this register selects the virtual queue that the following
122    /// operations on the QueueNumMax, QueueNum, QueueAlign and QueuePFN
123    /// registers apply to. The index number of the first queue is zero (0x0).
124    queue_sel: WriteOnly<u32>,
125
126    /// Maximum virtual queue size
127    ///
128    /// Reading from the register returns the maximum size of the queue the
129    /// device is ready to process or zero (0x0) if the queue is not available.
130    /// This applies to the queue selected by writing to QueueSel and is
131    /// allowed only when QueuePFN is set to zero (0x0), so when the queue is
132    /// not actively used.
133    queue_num_max: ReadPure<u32>,
134
135    /// Virtual queue size
136    ///
137    /// Queue size is the number of elements in the queue. Writing to this
138    /// register notifies the device what size of the queue the driver will use.
139    /// This applies to the queue selected by writing to QueueSel.
140    queue_num: WriteOnly<u32>,
141
142    /// Used Ring alignment in the virtual queue
143    ///
144    /// Writing to this register notifies the device about alignment boundary
145    /// of the Used Ring in bytes. This value should be a power of 2 and
146    /// applies to the queue selected by writing to QueueSel.
147    legacy_queue_align: WriteOnly<u32>,
148
149    /// Guest physical page number of the virtual queue
150    ///
151    /// Writing to this register notifies the device about location of the
152    /// virtual queue in the Guest’s physical address space. This value is
153    /// the index number of a page starting with the queue Descriptor Table.
154    /// Value zero (0x0) means physical address zero (0x00000000) and is illegal.
155    /// When the driver stops using the queue it writes zero (0x0) to this
156    /// register. Reading from this register returns the currently used page
157    /// number of the queue, therefore a value other than zero (0x0) means that
158    /// the queue is in use. Both read and write accesses apply to the queue
159    /// selected by writing to QueueSel.
160    legacy_queue_pfn: ReadPureWrite<u32>,
161
162    /// new interface only
163    queue_ready: ReadPureWrite<u32>,
164
165    /// Reserved
166    __r3: [u32; 2],
167
168    /// Queue notifier
169    queue_notify: WriteOnly<u32>,
170
171    /// Reserved
172    __r4: [u32; 3],
173
174    /// Interrupt status
175    interrupt_status: ReadPure<u32>,
176
177    /// Interrupt acknowledge
178    interrupt_ack: WriteOnly<u32>,
179
180    /// Reserved
181    __r5: [u32; 2],
182
183    /// Device status
184    ///
185    /// Reading from this register returns the current device status flags.
186    /// Writing non-zero values to this register sets the status flags,
187    /// indicating the OS/driver progress. Writing zero (0x0) to this register
188    /// triggers a device reset. The device sets QueuePFN to zero (0x0) for
189    /// all queues in the device. Also see 3.1 Device Initialization.
190    status: ReadPureWrite<DeviceStatus>,
191
192    /// Reserved
193    __r6: [u32; 3],
194
195    // new interface only since here
196    queue_desc_low: WriteOnly<u32>,
197    queue_desc_high: WriteOnly<u32>,
198
199    /// Reserved
200    __r7: [u32; 2],
201
202    queue_driver_low: WriteOnly<u32>,
203    queue_driver_high: WriteOnly<u32>,
204
205    /// Reserved
206    __r8: [u32; 2],
207
208    queue_device_low: WriteOnly<u32>,
209    queue_device_high: WriteOnly<u32>,
210
211    /// Reserved
212    __r9: [u32; 21],
213
214    config_generation: ReadPure<u32>,
215}
216
217impl VirtIOHeader {
218    /// Constructs a fake VirtIO header for use in unit tests.
219    #[cfg(test)]
220    pub fn make_fake_header(
221        version: u32,
222        device_id: u32,
223        vendor_id: u32,
224        device_features: u32,
225        queue_num_max: u32,
226    ) -> Self {
227        Self {
228            magic: ReadPure(MAGIC_VALUE),
229            version: ReadPure(version),
230            device_id: ReadPure(device_id),
231            vendor_id: ReadPure(vendor_id),
232            device_features: ReadPure(device_features),
233            device_features_sel: WriteOnly::default(),
234            __r1: Default::default(),
235            driver_features: Default::default(),
236            driver_features_sel: Default::default(),
237            legacy_guest_page_size: Default::default(),
238            __r2: Default::default(),
239            queue_sel: Default::default(),
240            queue_num_max: ReadPure(queue_num_max),
241            queue_num: Default::default(),
242            legacy_queue_align: Default::default(),
243            legacy_queue_pfn: Default::default(),
244            queue_ready: Default::default(),
245            __r3: Default::default(),
246            queue_notify: Default::default(),
247            __r4: Default::default(),
248            interrupt_status: Default::default(),
249            interrupt_ack: Default::default(),
250            __r5: Default::default(),
251            status: ReadPureWrite(DeviceStatus::empty()),
252            __r6: Default::default(),
253            queue_desc_low: Default::default(),
254            queue_desc_high: Default::default(),
255            __r7: Default::default(),
256            queue_driver_low: Default::default(),
257            queue_driver_high: Default::default(),
258            __r8: Default::default(),
259            queue_device_low: Default::default(),
260            queue_device_high: Default::default(),
261            __r9: Default::default(),
262            config_generation: Default::default(),
263        }
264    }
265}
266
267/// MMIO Device Register Interface.
268///
269/// Ref: 4.2.2 MMIO Device Register Layout and 4.2.4 Legacy interface
270#[derive(Debug)]
271pub struct MmioTransport<'a> {
272    header: UniqueMmioPointer<'a, VirtIOHeader>,
273    config_space: UniqueMmioPointer<'a, [u8]>,
274    version: MmioVersion,
275    device_type: DeviceType,
276}
277
278impl<'a> MmioTransport<'a> {
279    /// Constructs a new VirtIO MMIO transport, or returns an error if the header reports an
280    /// unsupported version.
281    ///
282    /// # Safety
283    ///
284    /// `header` must point to a properly aligned valid VirtIO MMIO region, which must remain valid
285    /// for the lifetime `'a`. This includes the config space following the header, if any.
286    pub unsafe fn new(header: NonNull<VirtIOHeader>, mmio_size: usize) -> Result<Self, MmioError> {
287        let Some(config_space_size) = mmio_size.checked_sub(CONFIG_SPACE_OFFSET) else {
288            return Err(MmioError::MmioRegionTooSmall);
289        };
290        let config_space = NonNull::slice_from_raw_parts(
291            // SAFETY: CONFIG_SPACE_OFFSET is well within the range of `isize`. The memory range
292            // must be within the bounds of the allocation, because our caller promised that
293            // `header` was a valid VirtIO MMIO region including the config space after the header.
294            unsafe { header.cast::<u8>().byte_add(CONFIG_SPACE_OFFSET) },
295            config_space_size,
296        );
297        // SAFETY: The caller promises that the config space following the header is an MMIO region
298        // valid for `'a`.
299        let config_space = unsafe { UniqueMmioPointer::new(config_space) };
300
301        // SAFETY: The caller promises that `header` is a properly aligned MMIO region valid for
302        // `'a`.
303        let header = unsafe { UniqueMmioPointer::new(header) };
304
305        Self::new_from_unique(header, config_space)
306    }
307
308    /// Constructs a new VirtIO MMIO transport, or returns an error if the header reports an
309    /// unsupported version.
310    pub fn new_from_unique(
311        header: UniqueMmioPointer<'a, VirtIOHeader>,
312        config_space: UniqueMmioPointer<'a, [u8]>,
313    ) -> Result<Self, MmioError> {
314        let magic = field_shared!(header, magic).read();
315        if magic != MAGIC_VALUE {
316            return Err(MmioError::BadMagic(magic));
317        }
318        let device_id = field_shared!(header, device_id).read();
319        let device_type = DeviceType::try_from(device_id).map_err(MmioError::InvalidDeviceID)?;
320        let version = field_shared!(header, version).read().try_into()?;
321        Ok(Self {
322            header,
323            version,
324            device_type,
325            config_space,
326        })
327    }
328
329    /// Gets the version of the VirtIO MMIO transport.
330    pub fn version(&self) -> MmioVersion {
331        self.version
332    }
333
334    /// Gets the vendor ID.
335    pub fn vendor_id(&self) -> u32 {
336        field_shared!(self.header, vendor_id).read()
337    }
338}
339
340// SAFETY: `&MmioTransport` only allows MMIO reads or getting the config space, both of which are
341// fine to happen concurrently on different CPU cores.
342unsafe impl Sync for MmioTransport<'_> {}
343
344impl Transport for MmioTransport<'_> {
345    fn device_type(&self) -> DeviceType {
346        self.device_type
347    }
348
349    fn read_device_features(&mut self) -> u64 {
350        field!(self.header, device_features_sel).write(0); // device features [0, 32)
351        let mut device_features_bits = field_shared!(self.header, device_features).read().into();
352        field!(self.header, device_features_sel).write(1); // device features [32, 64)
353        device_features_bits += (field_shared!(self.header, device_features).read() as u64) << 32;
354        device_features_bits
355    }
356
357    fn write_driver_features(&mut self, driver_features: u64) {
358        field!(self.header, driver_features_sel).write(0); // driver features [0, 32)
359        field!(self.header, driver_features).write(driver_features as u32);
360        field!(self.header, driver_features_sel).write(1); // driver features [32, 64)
361        field!(self.header, driver_features).write((driver_features >> 32) as u32);
362    }
363
364    fn max_queue_size(&mut self, queue: u16) -> u32 {
365        field!(self.header, queue_sel).write(queue.into());
366        field_shared!(self.header, queue_num_max).read()
367    }
368
369    fn notify(&mut self, queue: u16) {
370        field!(self.header, queue_notify).write(queue.into());
371    }
372
373    fn get_status(&self) -> DeviceStatus {
374        field_shared!(self.header, status).read()
375    }
376
377    fn set_status(&mut self, status: DeviceStatus) {
378        field!(self.header, status).write(status);
379    }
380
381    fn set_guest_page_size(&mut self, guest_page_size: u32) {
382        match self.version {
383            MmioVersion::Legacy => {
384                field!(self.header, legacy_guest_page_size).write(guest_page_size);
385            }
386            MmioVersion::Modern => {
387                // No-op, modern devices don't care.
388            }
389        }
390    }
391
392    fn requires_legacy_layout(&self) -> bool {
393        match self.version {
394            MmioVersion::Legacy => true,
395            MmioVersion::Modern => false,
396        }
397    }
398
399    fn queue_set(
400        &mut self,
401        queue: u16,
402        size: u32,
403        descriptors: PhysAddr,
404        driver_area: PhysAddr,
405        device_area: PhysAddr,
406    ) {
407        match self.version {
408            MmioVersion::Legacy => {
409                assert_eq!(
410                    driver_area - descriptors,
411                    size_of::<Descriptor>() as u64 * u64::from(size)
412                );
413                assert_eq!(
414                    device_area - descriptors,
415                    align_up_phys(
416                        size_of::<Descriptor>() as u64 * u64::from(size)
417                            + size_of::<u16>() as u64 * (u64::from(size) + 3)
418                    )
419                );
420                let align = PAGE_SIZE as u32;
421                let pfn = (descriptors / PAGE_SIZE_PHYS).try_into().unwrap();
422                assert_eq!(u64::from(pfn) * PAGE_SIZE_PHYS, descriptors);
423                field!(self.header, queue_sel).write(queue.into());
424                field!(self.header, queue_num).write(size);
425                field!(self.header, legacy_queue_align).write(align);
426                field!(self.header, legacy_queue_pfn).write(pfn);
427            }
428            MmioVersion::Modern => {
429                field!(self.header, queue_sel).write(queue.into());
430                field!(self.header, queue_num).write(size);
431                field!(self.header, queue_desc_low).write(descriptors as u32);
432                field!(self.header, queue_desc_high).write((descriptors >> 32) as u32);
433                field!(self.header, queue_driver_low).write(driver_area as u32);
434                field!(self.header, queue_driver_high).write((driver_area >> 32) as u32);
435                field!(self.header, queue_device_low).write(device_area as u32);
436                field!(self.header, queue_device_high).write((device_area >> 32) as u32);
437                field!(self.header, queue_ready).write(1);
438            }
439        }
440    }
441
442    fn queue_unset(&mut self, queue: u16) {
443        match self.version {
444            MmioVersion::Legacy => {
445                field!(self.header, queue_sel).write(queue.into());
446                field!(self.header, queue_num).write(0);
447                field!(self.header, legacy_queue_align).write(0);
448                field!(self.header, legacy_queue_pfn).write(0);
449            }
450            MmioVersion::Modern => {
451                field!(self.header, queue_sel).write(queue.into());
452
453                field!(self.header, queue_ready).write(0);
454                // Wait until we read the same value back, to ensure synchronisation (see 4.2.2.2).
455                let queue_ready = field_shared!(self.header, queue_ready);
456                while queue_ready.read() != 0 {}
457
458                field!(self.header, queue_num).write(0);
459                field!(self.header, queue_desc_low).write(0);
460                field!(self.header, queue_desc_high).write(0);
461                field!(self.header, queue_driver_low).write(0);
462                field!(self.header, queue_driver_high).write(0);
463                field!(self.header, queue_device_low).write(0);
464                field!(self.header, queue_device_high).write(0);
465            }
466        }
467    }
468
469    fn queue_used(&mut self, queue: u16) -> bool {
470        field!(self.header, queue_sel).write(queue.into());
471        match self.version {
472            MmioVersion::Legacy => field_shared!(self.header, legacy_queue_pfn).read() != 0,
473            MmioVersion::Modern => field_shared!(self.header, queue_ready).read() != 0,
474        }
475    }
476
477    fn ack_interrupt(&mut self) -> InterruptStatus {
478        let interrupt = field_shared!(self.header, interrupt_status).read();
479        if interrupt != 0 {
480            field!(self.header, interrupt_ack).write(interrupt);
481            InterruptStatus::from_bits_truncate(interrupt)
482        } else {
483            InterruptStatus::empty()
484        }
485    }
486
487    fn read_config_generation(&self) -> u32 {
488        field_shared!(self.header, config_generation).read()
489    }
490
491    fn read_config_space<T: FromBytes + IntoBytes>(&self, offset: usize) -> Result<T, Error> {
492        assert!(
493            align_of::<T>() <= 4,
494            "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
495            align_of::<T>()
496        );
497        assert!(offset.is_multiple_of(align_of::<T>()));
498
499        if self.config_space.len() < offset + size_of::<T>() {
500            Err(Error::ConfigSpaceTooSmall)
501        } else {
502            // SAFETY: The caller of `MmioTransport::new` guaranteed that the header pointer was
503            // valid, including the config space. We have checked that the value is properly aligned
504            // for `T` and within the bounds of the config space. Reading the config space shouldn't
505            // have side-effects.
506            unsafe {
507                let ptr = self.config_space.ptr().cast::<T>().byte_add(offset);
508                Ok(self
509                    .config_space
510                    .deref()
511                    .child(NonNull::new(ptr.cast_mut()).unwrap())
512                    .read_unsafe())
513            }
514        }
515    }
516
517    fn write_config_space<T: IntoBytes + Immutable>(
518        &mut self,
519        offset: usize,
520        value: T,
521    ) -> Result<(), Error> {
522        assert!(
523            align_of::<T>() <= 4,
524            "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
525            align_of::<T>()
526        );
527        assert!(offset.is_multiple_of(align_of::<T>()));
528
529        if self.config_space.len() < offset + size_of::<T>() {
530            Err(Error::ConfigSpaceTooSmall)
531        } else {
532            // SAFETY: The caller of `MmioTransport::new` guaranteed that the header pointer was
533            // valid, including the config space. We have checked that the value is properly aligned
534            // for `T` and within the bounds of the config space.
535            unsafe {
536                let ptr = self.config_space.ptr_nonnull().cast::<T>().byte_add(offset);
537                self.config_space.child(ptr).write_unsafe(value);
538            }
539            Ok(())
540        }
541    }
542}
543
544impl Drop for MmioTransport<'_> {
545    fn drop(&mut self) {
546        // Reset the device when the transport is dropped.
547        self.set_status(DeviceStatus::empty())
548    }
549}