1use super::{DeviceStatus, DeviceType, DeviceTypeError, Transport};
4use crate::{
5 Error, PAGE_SIZE, PAGE_SIZE_PHYS, PhysAddr, align_up_phys, queue::Descriptor,
6 transport::InterruptStatus,
7};
8use core::{
9 convert::{TryFrom, TryInto},
10 mem::{align_of, size_of},
11 ops::Deref,
12 ptr::NonNull,
13};
14use safe_mmio::{
15 UniqueMmioPointer, field, field_shared,
16 fields::{ReadPure, ReadPureWrite, WriteOnly},
17};
18use zerocopy::{FromBytes, Immutable, IntoBytes};
19
20const MAGIC_VALUE: u32 = 0x7472_6976;
21pub(crate) const LEGACY_VERSION: u32 = 1;
22pub(crate) const MODERN_VERSION: u32 = 2;
23const CONFIG_SPACE_OFFSET: usize = 0x100;
24
25#[derive(Copy, Clone, Debug, Eq, PartialEq)]
27#[repr(u32)]
28pub enum MmioVersion {
29 Legacy = LEGACY_VERSION,
31 Modern = MODERN_VERSION,
33}
34
35impl TryFrom<u32> for MmioVersion {
36 type Error = MmioError;
37
38 fn try_from(version: u32) -> Result<Self, Self::Error> {
39 match version {
40 LEGACY_VERSION => Ok(Self::Legacy),
41 MODERN_VERSION => Ok(Self::Modern),
42 _ => Err(MmioError::UnsupportedVersion(version)),
43 }
44 }
45}
46
47impl From<MmioVersion> for u32 {
48 fn from(version: MmioVersion) -> Self {
49 match version {
50 MmioVersion::Legacy => LEGACY_VERSION,
51 MmioVersion::Modern => MODERN_VERSION,
52 }
53 }
54}
55
56#[derive(Clone, Debug, Eq, Error, PartialEq)]
58pub enum MmioError {
59 #[error("Invalid magic value {0:#010x} (expected 0x74726976)")]
61 BadMagic(u32),
62 #[error("Unsupported Virtio MMIO version {0}")]
64 UnsupportedVersion(u32),
65 #[error("Invalid or unknown device ID: {0}")]
67 InvalidDeviceID(DeviceTypeError),
68 #[error("MMIO region too small")]
70 MmioRegionTooSmall,
71}
72
73#[derive(Debug)]
77#[repr(C)]
78pub struct VirtIOHeader {
79 magic: ReadPure<u32>,
81
82 version: ReadPure<u32>,
86
87 device_id: ReadPure<u32>,
89
90 vendor_id: ReadPure<u32>,
92
93 device_features: ReadPure<u32>,
95
96 device_features_sel: WriteOnly<u32>,
98
99 __r1: [u32; 2],
101
102 driver_features: WriteOnly<u32>,
104
105 driver_features_sel: WriteOnly<u32>,
107
108 legacy_guest_page_size: WriteOnly<u32>,
115
116 __r2: u32,
118
119 queue_sel: WriteOnly<u32>,
125
126 queue_num_max: ReadPure<u32>,
134
135 queue_num: WriteOnly<u32>,
141
142 legacy_queue_align: WriteOnly<u32>,
148
149 legacy_queue_pfn: ReadPureWrite<u32>,
161
162 queue_ready: ReadPureWrite<u32>,
164
165 __r3: [u32; 2],
167
168 queue_notify: WriteOnly<u32>,
170
171 __r4: [u32; 3],
173
174 interrupt_status: ReadPure<u32>,
176
177 interrupt_ack: WriteOnly<u32>,
179
180 __r5: [u32; 2],
182
183 status: ReadPureWrite<DeviceStatus>,
191
192 __r6: [u32; 3],
194
195 queue_desc_low: WriteOnly<u32>,
197 queue_desc_high: WriteOnly<u32>,
198
199 __r7: [u32; 2],
201
202 queue_driver_low: WriteOnly<u32>,
203 queue_driver_high: WriteOnly<u32>,
204
205 __r8: [u32; 2],
207
208 queue_device_low: WriteOnly<u32>,
209 queue_device_high: WriteOnly<u32>,
210
211 __r9: [u32; 21],
213
214 config_generation: ReadPure<u32>,
215}
216
217impl VirtIOHeader {
218 #[cfg(test)]
220 pub fn make_fake_header(
221 version: u32,
222 device_id: u32,
223 vendor_id: u32,
224 device_features: u32,
225 queue_num_max: u32,
226 ) -> Self {
227 Self {
228 magic: ReadPure(MAGIC_VALUE),
229 version: ReadPure(version),
230 device_id: ReadPure(device_id),
231 vendor_id: ReadPure(vendor_id),
232 device_features: ReadPure(device_features),
233 device_features_sel: WriteOnly::default(),
234 __r1: Default::default(),
235 driver_features: Default::default(),
236 driver_features_sel: Default::default(),
237 legacy_guest_page_size: Default::default(),
238 __r2: Default::default(),
239 queue_sel: Default::default(),
240 queue_num_max: ReadPure(queue_num_max),
241 queue_num: Default::default(),
242 legacy_queue_align: Default::default(),
243 legacy_queue_pfn: Default::default(),
244 queue_ready: Default::default(),
245 __r3: Default::default(),
246 queue_notify: Default::default(),
247 __r4: Default::default(),
248 interrupt_status: Default::default(),
249 interrupt_ack: Default::default(),
250 __r5: Default::default(),
251 status: ReadPureWrite(DeviceStatus::empty()),
252 __r6: Default::default(),
253 queue_desc_low: Default::default(),
254 queue_desc_high: Default::default(),
255 __r7: Default::default(),
256 queue_driver_low: Default::default(),
257 queue_driver_high: Default::default(),
258 __r8: Default::default(),
259 queue_device_low: Default::default(),
260 queue_device_high: Default::default(),
261 __r9: Default::default(),
262 config_generation: Default::default(),
263 }
264 }
265}
266
267#[derive(Debug)]
271pub struct MmioTransport<'a> {
272 header: UniqueMmioPointer<'a, VirtIOHeader>,
273 config_space: UniqueMmioPointer<'a, [u8]>,
274 version: MmioVersion,
275 device_type: DeviceType,
276}
277
278impl<'a> MmioTransport<'a> {
279 pub unsafe fn new(header: NonNull<VirtIOHeader>, mmio_size: usize) -> Result<Self, MmioError> {
287 let Some(config_space_size) = mmio_size.checked_sub(CONFIG_SPACE_OFFSET) else {
288 return Err(MmioError::MmioRegionTooSmall);
289 };
290 let config_space = NonNull::slice_from_raw_parts(
291 unsafe { header.cast::<u8>().byte_add(CONFIG_SPACE_OFFSET) },
295 config_space_size,
296 );
297 let config_space = unsafe { UniqueMmioPointer::new(config_space) };
300
301 let header = unsafe { UniqueMmioPointer::new(header) };
304
305 Self::new_from_unique(header, config_space)
306 }
307
308 pub fn new_from_unique(
311 header: UniqueMmioPointer<'a, VirtIOHeader>,
312 config_space: UniqueMmioPointer<'a, [u8]>,
313 ) -> Result<Self, MmioError> {
314 let magic = field_shared!(header, magic).read();
315 if magic != MAGIC_VALUE {
316 return Err(MmioError::BadMagic(magic));
317 }
318 let device_id = field_shared!(header, device_id).read();
319 let device_type = DeviceType::try_from(device_id).map_err(MmioError::InvalidDeviceID)?;
320 let version = field_shared!(header, version).read().try_into()?;
321 Ok(Self {
322 header,
323 version,
324 device_type,
325 config_space,
326 })
327 }
328
329 pub fn version(&self) -> MmioVersion {
331 self.version
332 }
333
334 pub fn vendor_id(&self) -> u32 {
336 field_shared!(self.header, vendor_id).read()
337 }
338}
339
340unsafe impl Sync for MmioTransport<'_> {}
343
344impl Transport for MmioTransport<'_> {
345 fn device_type(&self) -> DeviceType {
346 self.device_type
347 }
348
349 fn read_device_features(&mut self) -> u64 {
350 field!(self.header, device_features_sel).write(0); let mut device_features_bits = field_shared!(self.header, device_features).read().into();
352 field!(self.header, device_features_sel).write(1); device_features_bits += (field_shared!(self.header, device_features).read() as u64) << 32;
354 device_features_bits
355 }
356
357 fn write_driver_features(&mut self, driver_features: u64) {
358 field!(self.header, driver_features_sel).write(0); field!(self.header, driver_features).write(driver_features as u32);
360 field!(self.header, driver_features_sel).write(1); field!(self.header, driver_features).write((driver_features >> 32) as u32);
362 }
363
364 fn max_queue_size(&mut self, queue: u16) -> u32 {
365 field!(self.header, queue_sel).write(queue.into());
366 field_shared!(self.header, queue_num_max).read()
367 }
368
369 fn notify(&mut self, queue: u16) {
370 field!(self.header, queue_notify).write(queue.into());
371 }
372
373 fn get_status(&self) -> DeviceStatus {
374 field_shared!(self.header, status).read()
375 }
376
377 fn set_status(&mut self, status: DeviceStatus) {
378 field!(self.header, status).write(status);
379 }
380
381 fn set_guest_page_size(&mut self, guest_page_size: u32) {
382 match self.version {
383 MmioVersion::Legacy => {
384 field!(self.header, legacy_guest_page_size).write(guest_page_size);
385 }
386 MmioVersion::Modern => {
387 }
389 }
390 }
391
392 fn requires_legacy_layout(&self) -> bool {
393 match self.version {
394 MmioVersion::Legacy => true,
395 MmioVersion::Modern => false,
396 }
397 }
398
399 fn queue_set(
400 &mut self,
401 queue: u16,
402 size: u32,
403 descriptors: PhysAddr,
404 driver_area: PhysAddr,
405 device_area: PhysAddr,
406 ) {
407 match self.version {
408 MmioVersion::Legacy => {
409 assert_eq!(
410 driver_area - descriptors,
411 size_of::<Descriptor>() as u64 * u64::from(size)
412 );
413 assert_eq!(
414 device_area - descriptors,
415 align_up_phys(
416 size_of::<Descriptor>() as u64 * u64::from(size)
417 + size_of::<u16>() as u64 * (u64::from(size) + 3)
418 )
419 );
420 let align = PAGE_SIZE as u32;
421 let pfn = (descriptors / PAGE_SIZE_PHYS).try_into().unwrap();
422 assert_eq!(u64::from(pfn) * PAGE_SIZE_PHYS, descriptors);
423 field!(self.header, queue_sel).write(queue.into());
424 field!(self.header, queue_num).write(size);
425 field!(self.header, legacy_queue_align).write(align);
426 field!(self.header, legacy_queue_pfn).write(pfn);
427 }
428 MmioVersion::Modern => {
429 field!(self.header, queue_sel).write(queue.into());
430 field!(self.header, queue_num).write(size);
431 field!(self.header, queue_desc_low).write(descriptors as u32);
432 field!(self.header, queue_desc_high).write((descriptors >> 32) as u32);
433 field!(self.header, queue_driver_low).write(driver_area as u32);
434 field!(self.header, queue_driver_high).write((driver_area >> 32) as u32);
435 field!(self.header, queue_device_low).write(device_area as u32);
436 field!(self.header, queue_device_high).write((device_area >> 32) as u32);
437 field!(self.header, queue_ready).write(1);
438 }
439 }
440 }
441
442 fn queue_unset(&mut self, queue: u16) {
443 match self.version {
444 MmioVersion::Legacy => {
445 field!(self.header, queue_sel).write(queue.into());
446 field!(self.header, queue_num).write(0);
447 field!(self.header, legacy_queue_align).write(0);
448 field!(self.header, legacy_queue_pfn).write(0);
449 }
450 MmioVersion::Modern => {
451 field!(self.header, queue_sel).write(queue.into());
452
453 field!(self.header, queue_ready).write(0);
454 let queue_ready = field_shared!(self.header, queue_ready);
456 while queue_ready.read() != 0 {}
457
458 field!(self.header, queue_num).write(0);
459 field!(self.header, queue_desc_low).write(0);
460 field!(self.header, queue_desc_high).write(0);
461 field!(self.header, queue_driver_low).write(0);
462 field!(self.header, queue_driver_high).write(0);
463 field!(self.header, queue_device_low).write(0);
464 field!(self.header, queue_device_high).write(0);
465 }
466 }
467 }
468
469 fn queue_used(&mut self, queue: u16) -> bool {
470 field!(self.header, queue_sel).write(queue.into());
471 match self.version {
472 MmioVersion::Legacy => field_shared!(self.header, legacy_queue_pfn).read() != 0,
473 MmioVersion::Modern => field_shared!(self.header, queue_ready).read() != 0,
474 }
475 }
476
477 fn ack_interrupt(&mut self) -> InterruptStatus {
478 let interrupt = field_shared!(self.header, interrupt_status).read();
479 if interrupt != 0 {
480 field!(self.header, interrupt_ack).write(interrupt);
481 InterruptStatus::from_bits_truncate(interrupt)
482 } else {
483 InterruptStatus::empty()
484 }
485 }
486
487 fn read_config_generation(&self) -> u32 {
488 field_shared!(self.header, config_generation).read()
489 }
490
491 fn read_config_space<T: FromBytes + IntoBytes>(&self, offset: usize) -> Result<T, Error> {
492 assert!(
493 align_of::<T>() <= 4,
494 "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
495 align_of::<T>()
496 );
497 assert!(offset.is_multiple_of(align_of::<T>()));
498
499 if self.config_space.len() < offset + size_of::<T>() {
500 Err(Error::ConfigSpaceTooSmall)
501 } else {
502 unsafe {
507 let ptr = self.config_space.ptr().cast::<T>().byte_add(offset);
508 Ok(self
509 .config_space
510 .deref()
511 .child(NonNull::new(ptr.cast_mut()).unwrap())
512 .read_unsafe())
513 }
514 }
515 }
516
517 fn write_config_space<T: IntoBytes + Immutable>(
518 &mut self,
519 offset: usize,
520 value: T,
521 ) -> Result<(), Error> {
522 assert!(
523 align_of::<T>() <= 4,
524 "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
525 align_of::<T>()
526 );
527 assert!(offset.is_multiple_of(align_of::<T>()));
528
529 if self.config_space.len() < offset + size_of::<T>() {
530 Err(Error::ConfigSpaceTooSmall)
531 } else {
532 unsafe {
536 let ptr = self.config_space.ptr_nonnull().cast::<T>().byte_add(offset);
537 self.config_space.child(ptr).write_unsafe(value);
538 }
539 Ok(())
540 }
541 }
542}
543
544impl Drop for MmioTransport<'_> {
545 fn drop(&mut self) {
546 self.set_status(DeviceStatus::empty())
548 }
549}