1use super::{DeviceStatus, DeviceType, DeviceTypeError, Transport};
4use crate::{
5 align_up_phys, queue::Descriptor, transport::InterruptStatus, Error, PhysAddr, PAGE_SIZE,
6 PAGE_SIZE_PHYS,
7};
8use core::{
9 convert::{TryFrom, TryInto},
10 mem::{align_of, size_of},
11 ops::Deref,
12 ptr::NonNull,
13};
14use safe_mmio::{
15 field, field_shared,
16 fields::{ReadPure, ReadPureWrite, WriteOnly},
17 UniqueMmioPointer,
18};
19use zerocopy::{FromBytes, Immutable, IntoBytes};
20
21const MAGIC_VALUE: u32 = 0x7472_6976;
22pub(crate) const LEGACY_VERSION: u32 = 1;
23pub(crate) const MODERN_VERSION: u32 = 2;
24const CONFIG_SPACE_OFFSET: usize = 0x100;
25
26#[derive(Copy, Clone, Debug, Eq, PartialEq)]
28#[repr(u32)]
29pub enum MmioVersion {
30 Legacy = LEGACY_VERSION,
32 Modern = MODERN_VERSION,
34}
35
36impl TryFrom<u32> for MmioVersion {
37 type Error = MmioError;
38
39 fn try_from(version: u32) -> Result<Self, Self::Error> {
40 match version {
41 LEGACY_VERSION => Ok(Self::Legacy),
42 MODERN_VERSION => Ok(Self::Modern),
43 _ => Err(MmioError::UnsupportedVersion(version)),
44 }
45 }
46}
47
48impl From<MmioVersion> for u32 {
49 fn from(version: MmioVersion) -> Self {
50 match version {
51 MmioVersion::Legacy => LEGACY_VERSION,
52 MmioVersion::Modern => MODERN_VERSION,
53 }
54 }
55}
56
57#[derive(Clone, Debug, Eq, Error, PartialEq)]
59pub enum MmioError {
60 #[error("Invalid magic value {0:#010x} (expected 0x74726976)")]
62 BadMagic(u32),
63 #[error("Unsupported Virtio MMIO version {0}")]
65 UnsupportedVersion(u32),
66 #[error("Invalid or unknown device ID: {0}")]
68 InvalidDeviceID(DeviceTypeError),
69 #[error("MMIO region too small")]
71 MmioRegionTooSmall,
72}
73
74#[derive(Debug)]
78#[repr(C)]
79pub struct VirtIOHeader {
80 magic: ReadPure<u32>,
82
83 version: ReadPure<u32>,
87
88 device_id: ReadPure<u32>,
90
91 vendor_id: ReadPure<u32>,
93
94 device_features: ReadPure<u32>,
96
97 device_features_sel: WriteOnly<u32>,
99
100 __r1: [u32; 2],
102
103 driver_features: WriteOnly<u32>,
105
106 driver_features_sel: WriteOnly<u32>,
108
109 legacy_guest_page_size: WriteOnly<u32>,
116
117 __r2: u32,
119
120 queue_sel: WriteOnly<u32>,
126
127 queue_num_max: ReadPure<u32>,
135
136 queue_num: WriteOnly<u32>,
142
143 legacy_queue_align: WriteOnly<u32>,
149
150 legacy_queue_pfn: ReadPureWrite<u32>,
162
163 queue_ready: ReadPureWrite<u32>,
165
166 __r3: [u32; 2],
168
169 queue_notify: WriteOnly<u32>,
171
172 __r4: [u32; 3],
174
175 interrupt_status: ReadPure<u32>,
177
178 interrupt_ack: WriteOnly<u32>,
180
181 __r5: [u32; 2],
183
184 status: ReadPureWrite<DeviceStatus>,
192
193 __r6: [u32; 3],
195
196 queue_desc_low: WriteOnly<u32>,
198 queue_desc_high: WriteOnly<u32>,
199
200 __r7: [u32; 2],
202
203 queue_driver_low: WriteOnly<u32>,
204 queue_driver_high: WriteOnly<u32>,
205
206 __r8: [u32; 2],
208
209 queue_device_low: WriteOnly<u32>,
210 queue_device_high: WriteOnly<u32>,
211
212 __r9: [u32; 21],
214
215 config_generation: ReadPure<u32>,
216}
217
218impl VirtIOHeader {
219 #[cfg(test)]
221 pub fn make_fake_header(
222 version: u32,
223 device_id: u32,
224 vendor_id: u32,
225 device_features: u32,
226 queue_num_max: u32,
227 ) -> Self {
228 Self {
229 magic: ReadPure(MAGIC_VALUE),
230 version: ReadPure(version),
231 device_id: ReadPure(device_id),
232 vendor_id: ReadPure(vendor_id),
233 device_features: ReadPure(device_features),
234 device_features_sel: WriteOnly::default(),
235 __r1: Default::default(),
236 driver_features: Default::default(),
237 driver_features_sel: Default::default(),
238 legacy_guest_page_size: Default::default(),
239 __r2: Default::default(),
240 queue_sel: Default::default(),
241 queue_num_max: ReadPure(queue_num_max),
242 queue_num: Default::default(),
243 legacy_queue_align: Default::default(),
244 legacy_queue_pfn: Default::default(),
245 queue_ready: Default::default(),
246 __r3: Default::default(),
247 queue_notify: Default::default(),
248 __r4: Default::default(),
249 interrupt_status: Default::default(),
250 interrupt_ack: Default::default(),
251 __r5: Default::default(),
252 status: ReadPureWrite(DeviceStatus::empty()),
253 __r6: Default::default(),
254 queue_desc_low: Default::default(),
255 queue_desc_high: Default::default(),
256 __r7: Default::default(),
257 queue_driver_low: Default::default(),
258 queue_driver_high: Default::default(),
259 __r8: Default::default(),
260 queue_device_low: Default::default(),
261 queue_device_high: Default::default(),
262 __r9: Default::default(),
263 config_generation: Default::default(),
264 }
265 }
266}
267
268#[derive(Debug)]
272pub struct MmioTransport<'a> {
273 header: UniqueMmioPointer<'a, VirtIOHeader>,
274 config_space: UniqueMmioPointer<'a, [u8]>,
275 version: MmioVersion,
276 device_type: DeviceType,
277}
278
279impl<'a> MmioTransport<'a> {
280 pub unsafe fn new(header: NonNull<VirtIOHeader>, mmio_size: usize) -> Result<Self, MmioError> {
288 let Some(config_space_size) = mmio_size.checked_sub(CONFIG_SPACE_OFFSET) else {
289 return Err(MmioError::MmioRegionTooSmall);
290 };
291 let config_space = NonNull::slice_from_raw_parts(
292 header.cast::<u8>().byte_add(CONFIG_SPACE_OFFSET),
293 config_space_size,
294 );
295 let config_space = unsafe { UniqueMmioPointer::new(config_space) };
298
299 let header = UniqueMmioPointer::new(header);
302
303 Self::new_from_unique(header, config_space)
304 }
305
306 pub fn new_from_unique(
309 header: UniqueMmioPointer<'a, VirtIOHeader>,
310 config_space: UniqueMmioPointer<'a, [u8]>,
311 ) -> Result<Self, MmioError> {
312 let magic = field_shared!(header, magic).read();
313 if magic != MAGIC_VALUE {
314 return Err(MmioError::BadMagic(magic));
315 }
316 let device_id = field_shared!(header, device_id).read();
317 let device_type = DeviceType::try_from(device_id).map_err(MmioError::InvalidDeviceID)?;
318 let version = field_shared!(header, version).read().try_into()?;
319 Ok(Self {
320 header,
321 version,
322 device_type,
323 config_space,
324 })
325 }
326
327 pub fn version(&self) -> MmioVersion {
329 self.version
330 }
331
332 pub fn vendor_id(&self) -> u32 {
334 field_shared!(self.header, vendor_id).read()
335 }
336}
337
338unsafe impl Sync for MmioTransport<'_> {}
341
342impl Transport for MmioTransport<'_> {
343 fn device_type(&self) -> DeviceType {
344 self.device_type
345 }
346
347 fn read_device_features(&mut self) -> u64 {
348 field!(self.header, device_features_sel).write(0); let mut device_features_bits = field_shared!(self.header, device_features).read().into();
350 field!(self.header, device_features_sel).write(1); device_features_bits += (field_shared!(self.header, device_features).read() as u64) << 32;
352 device_features_bits
353 }
354
355 fn write_driver_features(&mut self, driver_features: u64) {
356 field!(self.header, driver_features_sel).write(0); field!(self.header, driver_features).write(driver_features as u32);
358 field!(self.header, driver_features_sel).write(1); field!(self.header, driver_features).write((driver_features >> 32) as u32);
360 }
361
362 fn max_queue_size(&mut self, queue: u16) -> u32 {
363 field!(self.header, queue_sel).write(queue.into());
364 field_shared!(self.header, queue_num_max).read()
365 }
366
367 fn notify(&mut self, queue: u16) {
368 field!(self.header, queue_notify).write(queue.into());
369 }
370
371 fn get_status(&self) -> DeviceStatus {
372 field_shared!(self.header, status).read()
373 }
374
375 fn set_status(&mut self, status: DeviceStatus) {
376 field!(self.header, status).write(status);
377 }
378
379 fn set_guest_page_size(&mut self, guest_page_size: u32) {
380 match self.version {
381 MmioVersion::Legacy => {
382 field!(self.header, legacy_guest_page_size).write(guest_page_size);
383 }
384 MmioVersion::Modern => {
385 }
387 }
388 }
389
390 fn requires_legacy_layout(&self) -> bool {
391 match self.version {
392 MmioVersion::Legacy => true,
393 MmioVersion::Modern => false,
394 }
395 }
396
397 fn queue_set(
398 &mut self,
399 queue: u16,
400 size: u32,
401 descriptors: PhysAddr,
402 driver_area: PhysAddr,
403 device_area: PhysAddr,
404 ) {
405 match self.version {
406 MmioVersion::Legacy => {
407 assert_eq!(
408 driver_area - descriptors,
409 size_of::<Descriptor>() as u64 * u64::from(size)
410 );
411 assert_eq!(
412 device_area - descriptors,
413 align_up_phys(
414 size_of::<Descriptor>() as u64 * u64::from(size)
415 + size_of::<u16>() as u64 * (u64::from(size) + 3)
416 )
417 );
418 let align = PAGE_SIZE as u32;
419 let pfn = (descriptors / PAGE_SIZE_PHYS).try_into().unwrap();
420 assert_eq!(u64::from(pfn) * PAGE_SIZE_PHYS, descriptors);
421 field!(self.header, queue_sel).write(queue.into());
422 field!(self.header, queue_num).write(size);
423 field!(self.header, legacy_queue_align).write(align);
424 field!(self.header, legacy_queue_pfn).write(pfn);
425 }
426 MmioVersion::Modern => {
427 field!(self.header, queue_sel).write(queue.into());
428 field!(self.header, queue_num).write(size);
429 field!(self.header, queue_desc_low).write(descriptors as u32);
430 field!(self.header, queue_desc_high).write((descriptors >> 32) as u32);
431 field!(self.header, queue_driver_low).write(driver_area as u32);
432 field!(self.header, queue_driver_high).write((driver_area >> 32) as u32);
433 field!(self.header, queue_device_low).write(device_area as u32);
434 field!(self.header, queue_device_high).write((device_area >> 32) as u32);
435 field!(self.header, queue_ready).write(1);
436 }
437 }
438 }
439
440 fn queue_unset(&mut self, queue: u16) {
441 match self.version {
442 MmioVersion::Legacy => {
443 field!(self.header, queue_sel).write(queue.into());
444 field!(self.header, queue_num).write(0);
445 field!(self.header, legacy_queue_align).write(0);
446 field!(self.header, legacy_queue_pfn).write(0);
447 }
448 MmioVersion::Modern => {
449 field!(self.header, queue_sel).write(queue.into());
450
451 field!(self.header, queue_ready).write(0);
452 let queue_ready = field_shared!(self.header, queue_ready);
454 while queue_ready.read() != 0 {}
455
456 field!(self.header, queue_num).write(0);
457 field!(self.header, queue_desc_low).write(0);
458 field!(self.header, queue_desc_high).write(0);
459 field!(self.header, queue_driver_low).write(0);
460 field!(self.header, queue_driver_high).write(0);
461 field!(self.header, queue_device_low).write(0);
462 field!(self.header, queue_device_high).write(0);
463 }
464 }
465 }
466
467 fn queue_used(&mut self, queue: u16) -> bool {
468 field!(self.header, queue_sel).write(queue.into());
469 match self.version {
470 MmioVersion::Legacy => field_shared!(self.header, legacy_queue_pfn).read() != 0,
471 MmioVersion::Modern => field_shared!(self.header, queue_ready).read() != 0,
472 }
473 }
474
475 fn ack_interrupt(&mut self) -> InterruptStatus {
476 let interrupt = field_shared!(self.header, interrupt_status).read();
477 if interrupt != 0 {
478 field!(self.header, interrupt_ack).write(interrupt);
479 InterruptStatus::from_bits_truncate(interrupt)
480 } else {
481 InterruptStatus::empty()
482 }
483 }
484
485 fn read_config_generation(&self) -> u32 {
486 field_shared!(self.header, config_generation).read()
487 }
488
489 fn read_config_space<T: FromBytes + IntoBytes>(&self, offset: usize) -> Result<T, Error> {
490 assert!(align_of::<T>() <= 4,
491 "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
492 align_of::<T>());
493 assert!(offset % align_of::<T>() == 0);
494
495 if self.config_space.len() < offset + size_of::<T>() {
496 Err(Error::ConfigSpaceTooSmall)
497 } else {
498 unsafe {
503 let ptr = self.config_space.ptr().cast::<T>().byte_add(offset);
504 Ok(self
505 .config_space
506 .deref()
507 .child(NonNull::new(ptr.cast_mut()).unwrap())
508 .read_unsafe())
509 }
510 }
511 }
512
513 fn write_config_space<T: IntoBytes + Immutable>(
514 &mut self,
515 offset: usize,
516 value: T,
517 ) -> Result<(), Error> {
518 assert!(align_of::<T>() <= 4,
519 "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
520 align_of::<T>());
521 assert!(offset % align_of::<T>() == 0);
522
523 if self.config_space.len() < offset + size_of::<T>() {
524 Err(Error::ConfigSpaceTooSmall)
525 } else {
526 unsafe {
530 let ptr = self.config_space.ptr_nonnull().cast::<T>().byte_add(offset);
531 self.config_space.child(ptr).write_unsafe(value);
532 }
533 Ok(())
534 }
535 }
536}
537
538impl Drop for MmioTransport<'_> {
539 fn drop(&mut self) {
540 self.set_status(DeviceStatus::empty())
542 }
543}