1use crate::config::{read_config, ReadOnly};
4use crate::hal::Hal;
5use crate::queue::VirtQueue;
6use crate::transport::{InterruptStatus, Transport};
7use crate::{Error, Result};
8use bitflags::bitflags;
9use log::info;
10use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
11
12const QUEUE: u16 = 0;
13const QUEUE_SIZE: u16 = 16;
14const SUPPORTED_FEATURES: BlkFeature = BlkFeature::RO
15 .union(BlkFeature::FLUSH)
16 .union(BlkFeature::RING_INDIRECT_DESC)
17 .union(BlkFeature::RING_EVENT_IDX)
18 .union(BlkFeature::VERSION_1);
19
20pub struct VirtIOBlk<H: Hal, T: Transport> {
47 transport: T,
48 queue: VirtQueue<H, { QUEUE_SIZE as usize }>,
49 capacity: u64,
50 negotiated_features: BlkFeature,
51}
52
53impl<H: Hal, T: Transport> VirtIOBlk<H, T> {
54 pub fn new(mut transport: T) -> Result<Self> {
56 let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
57
58 let capacity = transport.read_consistent(|| {
60 Ok((read_config!(transport, BlkConfig, capacity_low)? as u64)
61 | ((read_config!(transport, BlkConfig, capacity_high)? as u64) << 32))
62 })?;
63 info!("found a block device of size {}KB", capacity / 2);
64
65 let queue = VirtQueue::new(
66 &mut transport,
67 QUEUE,
68 negotiated_features.contains(BlkFeature::RING_INDIRECT_DESC),
69 negotiated_features.contains(BlkFeature::RING_EVENT_IDX),
70 )?;
71 transport.finish_init();
72
73 Ok(VirtIOBlk {
74 transport,
75 queue,
76 capacity,
77 negotiated_features,
78 })
79 }
80
81 pub fn capacity(&self) -> u64 {
83 self.capacity
84 }
85
86 pub fn readonly(&self) -> bool {
88 self.negotiated_features.contains(BlkFeature::RO)
89 }
90
91 pub fn ack_interrupt(&mut self) -> InterruptStatus {
95 self.transport.ack_interrupt()
96 }
97
98 pub fn enable_interrupts(&mut self) {
100 self.queue.set_dev_notify(true);
101 }
102
103 pub fn disable_interrupts(&mut self) {
105 self.queue.set_dev_notify(false);
106 }
107
108 fn request(&mut self, request: BlkReq) -> Result {
110 let mut resp = BlkResp::default();
111 self.queue.add_notify_wait_pop(
112 &[request.as_bytes()],
113 &mut [resp.as_mut_bytes()],
114 &mut self.transport,
115 )?;
116 resp.status.into()
117 }
118
119 fn request_read(&mut self, request: BlkReq, data: &mut [u8]) -> Result {
121 let mut resp = BlkResp::default();
122 self.queue.add_notify_wait_pop(
123 &[request.as_bytes()],
124 &mut [data, resp.as_mut_bytes()],
125 &mut self.transport,
126 )?;
127 resp.status.into()
128 }
129
130 fn request_write(&mut self, request: BlkReq, data: &[u8]) -> Result {
132 let mut resp = BlkResp::default();
133 self.queue.add_notify_wait_pop(
134 &[request.as_bytes(), data],
135 &mut [resp.as_mut_bytes()],
136 &mut self.transport,
137 )?;
138 resp.status.into()
139 }
140
141 pub fn flush(&mut self) -> Result {
145 if self.negotiated_features.contains(BlkFeature::FLUSH) {
146 self.request(BlkReq {
147 type_: ReqType::Flush,
148 ..Default::default()
149 })
150 } else {
151 Ok(())
152 }
153 }
154
155 pub fn device_id(&mut self, id: &mut [u8; 20]) -> Result<usize> {
160 self.request_read(
161 BlkReq {
162 type_: ReqType::GetId,
163 ..Default::default()
164 },
165 id,
166 )?;
167
168 let length = id.iter().position(|&x| x == 0).unwrap_or(20);
169 Ok(length)
170 }
171
172 pub fn read_blocks(&mut self, block_id: usize, buf: &mut [u8]) -> Result {
178 assert_ne!(buf.len(), 0);
179 assert_eq!(buf.len() % SECTOR_SIZE, 0);
180 self.request_read(
181 BlkReq {
182 type_: ReqType::In,
183 reserved: 0,
184 sector: block_id as u64,
185 },
186 buf,
187 )
188 }
189
190 pub unsafe fn read_blocks_nb(
248 &mut self,
249 block_id: usize,
250 req: &mut BlkReq,
251 buf: &mut [u8],
252 resp: &mut BlkResp,
253 ) -> Result<u16> {
254 assert_ne!(buf.len(), 0);
255 assert_eq!(buf.len() % SECTOR_SIZE, 0);
256 *req = BlkReq {
257 type_: ReqType::In,
258 reserved: 0,
259 sector: block_id as u64,
260 };
261 let token = self
262 .queue
263 .add(&[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
264 if self.queue.should_notify() {
265 self.transport.notify(QUEUE);
266 }
267 Ok(token)
268 }
269
270 pub unsafe fn complete_read_blocks(
277 &mut self,
278 token: u16,
279 req: &BlkReq,
280 buf: &mut [u8],
281 resp: &mut BlkResp,
282 ) -> Result<()> {
283 self.queue
284 .pop_used(token, &[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
285 resp.status.into()
286 }
287
288 pub fn write_blocks(&mut self, block_id: usize, buf: &[u8]) -> Result {
294 assert_ne!(buf.len(), 0);
295 assert_eq!(buf.len() % SECTOR_SIZE, 0);
296 self.request_write(
297 BlkReq {
298 type_: ReqType::Out,
299 sector: block_id as u64,
300 ..Default::default()
301 },
302 buf,
303 )
304 }
305
306 pub unsafe fn write_blocks_nb(
330 &mut self,
331 block_id: usize,
332 req: &mut BlkReq,
333 buf: &[u8],
334 resp: &mut BlkResp,
335 ) -> Result<u16> {
336 assert_ne!(buf.len(), 0);
337 assert_eq!(buf.len() % SECTOR_SIZE, 0);
338 *req = BlkReq {
339 type_: ReqType::Out,
340 reserved: 0,
341 sector: block_id as u64,
342 };
343 let token = self
344 .queue
345 .add(&[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
346 if self.queue.should_notify() {
347 self.transport.notify(QUEUE);
348 }
349 Ok(token)
350 }
351
352 pub unsafe fn complete_write_blocks(
359 &mut self,
360 token: u16,
361 req: &BlkReq,
362 buf: &[u8],
363 resp: &mut BlkResp,
364 ) -> Result<()> {
365 self.queue
366 .pop_used(token, &[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
367 resp.status.into()
368 }
369
370 pub fn peek_used(&mut self) -> Option<u16> {
373 self.queue.peek_used()
374 }
375
376 pub fn virt_queue_size(&self) -> u16 {
380 QUEUE_SIZE
381 }
382}
383
384impl<H: Hal, T: Transport> Drop for VirtIOBlk<H, T> {
385 fn drop(&mut self) {
386 self.transport.queue_unset(QUEUE);
389 }
390}
391
392#[derive(FromBytes, Immutable, IntoBytes)]
393#[repr(C)]
394struct BlkConfig {
395 capacity_low: ReadOnly<u32>,
397 capacity_high: ReadOnly<u32>,
398 size_max: ReadOnly<u32>,
399 seg_max: ReadOnly<u32>,
400 cylinders: ReadOnly<u16>,
401 heads: ReadOnly<u8>,
402 sectors: ReadOnly<u8>,
403 blk_size: ReadOnly<u32>,
404 physical_block_exp: ReadOnly<u8>,
405 alignment_offset: ReadOnly<u8>,
406 min_io_size: ReadOnly<u16>,
407 opt_io_size: ReadOnly<u32>,
408 }
410
411#[repr(C)]
413#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
414pub struct BlkReq {
415 type_: ReqType,
416 reserved: u32,
417 sector: u64,
418}
419
420impl Default for BlkReq {
421 fn default() -> Self {
422 Self {
423 type_: ReqType::In,
424 reserved: 0,
425 sector: 0,
426 }
427 }
428}
429
430#[repr(C)]
432#[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
433pub struct BlkResp {
434 status: RespStatus,
435}
436
437impl BlkResp {
438 pub fn status(&self) -> RespStatus {
440 self.status
441 }
442}
443
444#[repr(u32)]
445#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
446enum ReqType {
447 In = 0,
448 Out = 1,
449 Flush = 4,
450 GetId = 8,
451 GetLifetime = 10,
452 Discard = 11,
453 WriteZeroes = 13,
454 SecureErase = 14,
455}
456
457#[repr(transparent)]
459#[derive(Copy, Clone, Debug, Eq, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq)]
460pub struct RespStatus(u8);
461
462impl RespStatus {
463 pub const OK: RespStatus = RespStatus(0);
465 pub const IO_ERR: RespStatus = RespStatus(1);
467 pub const UNSUPPORTED: RespStatus = RespStatus(2);
469 pub const NOT_READY: RespStatus = RespStatus(3);
471}
472
473impl From<RespStatus> for Result {
474 fn from(status: RespStatus) -> Self {
475 match status {
476 RespStatus::OK => Ok(()),
477 RespStatus::IO_ERR => Err(Error::IoError),
478 RespStatus::UNSUPPORTED => Err(Error::Unsupported),
479 RespStatus::NOT_READY => Err(Error::NotReady),
480 _ => Err(Error::IoError),
481 }
482 }
483}
484
485impl Default for BlkResp {
486 fn default() -> Self {
487 BlkResp {
488 status: RespStatus::NOT_READY,
489 }
490 }
491}
492
493pub const SECTOR_SIZE: usize = 512;
496
497bitflags! {
498 #[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
499 struct BlkFeature: u64 {
500 const BARRIER = 1 << 0;
502 const SIZE_MAX = 1 << 1;
504 const SEG_MAX = 1 << 2;
506 const GEOMETRY = 1 << 4;
508 const RO = 1 << 5;
510 const BLK_SIZE = 1 << 6;
512 const SCSI = 1 << 7;
514 const FLUSH = 1 << 9;
516 const TOPOLOGY = 1 << 10;
518 const CONFIG_WCE = 1 << 11;
520 const MQ = 1 << 12;
522 const DISCARD = 1 << 13;
526 const WRITE_ZEROES = 1 << 14;
530 const LIFETIME = 1 << 15;
532 const SECURE_ERASE = 1 << 16;
534
535 const NOTIFY_ON_EMPTY = 1 << 24; const ANY_LAYOUT = 1 << 27; const RING_INDIRECT_DESC = 1 << 28;
539 const RING_EVENT_IDX = 1 << 29;
540 const UNUSED = 1 << 30; const VERSION_1 = 1 << 32; const ACCESS_PLATFORM = 1 << 33;
545 const RING_PACKED = 1 << 34;
546 const IN_ORDER = 1 << 35;
547 const ORDER_PLATFORM = 1 << 36;
548 const SR_IOV = 1 << 37;
549 const NOTIFICATION_DATA = 1 << 38;
550 }
551}
552
553#[cfg(test)]
554mod tests {
555 use super::*;
556 use crate::{
557 hal::fake::FakeHal,
558 transport::{
559 fake::{FakeTransport, QueueStatus, State},
560 DeviceType,
561 },
562 };
563 use alloc::{sync::Arc, vec};
564 use core::mem::size_of;
565 use std::{sync::Mutex, thread};
566
567 #[test]
568 fn config() {
569 let config_space = BlkConfig {
570 capacity_low: ReadOnly::new(0x42),
571 capacity_high: ReadOnly::new(0x02),
572 size_max: ReadOnly::new(0),
573 seg_max: ReadOnly::new(0),
574 cylinders: ReadOnly::new(0),
575 heads: ReadOnly::new(0),
576 sectors: ReadOnly::new(0),
577 blk_size: ReadOnly::new(0),
578 physical_block_exp: ReadOnly::new(0),
579 alignment_offset: ReadOnly::new(0),
580 min_io_size: ReadOnly::new(0),
581 opt_io_size: ReadOnly::new(0),
582 };
583 let state = Arc::new(Mutex::new(State::new(
584 vec![QueueStatus::default()],
585 config_space,
586 )));
587 let transport = FakeTransport {
588 device_type: DeviceType::Block,
589 max_queue_size: QUEUE_SIZE.into(),
590 device_features: BlkFeature::RO.bits(),
591 state: state.clone(),
592 };
593 let blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
594
595 assert_eq!(blk.capacity(), 0x02_0000_0042);
596 assert_eq!(blk.readonly(), true);
597 }
598
599 #[test]
600 fn read() {
601 let config_space = BlkConfig {
602 capacity_low: ReadOnly::new(66),
603 capacity_high: ReadOnly::new(0),
604 size_max: ReadOnly::new(0),
605 seg_max: ReadOnly::new(0),
606 cylinders: ReadOnly::new(0),
607 heads: ReadOnly::new(0),
608 sectors: ReadOnly::new(0),
609 blk_size: ReadOnly::new(0),
610 physical_block_exp: ReadOnly::new(0),
611 alignment_offset: ReadOnly::new(0),
612 min_io_size: ReadOnly::new(0),
613 opt_io_size: ReadOnly::new(0),
614 };
615 let state = Arc::new(Mutex::new(State::new(
616 vec![QueueStatus::default()],
617 config_space,
618 )));
619 let transport = FakeTransport {
620 device_type: DeviceType::Block,
621 max_queue_size: QUEUE_SIZE.into(),
622 device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
623 state: state.clone(),
624 };
625 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
626
627 let handle = thread::spawn(move || {
629 println!("Device waiting for a request.");
630 State::wait_until_queue_notified(&state, QUEUE);
631 println!("Transmit queue was notified.");
632
633 assert!(state
634 .lock()
635 .unwrap()
636 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
637 assert_eq!(
638 request,
639 BlkReq {
640 type_: ReqType::In,
641 reserved: 0,
642 sector: 42
643 }
644 .as_bytes()
645 );
646
647 let mut response = vec![0; SECTOR_SIZE];
648 response[0..9].copy_from_slice(b"Test data");
649 response.extend_from_slice(
650 BlkResp {
651 status: RespStatus::OK,
652 }
653 .as_bytes(),
654 );
655
656 response
657 }));
658 });
659
660 let mut buffer = [0; 512];
662 blk.read_blocks(42, &mut buffer).unwrap();
663 assert_eq!(&buffer[0..9], b"Test data");
664
665 handle.join().unwrap();
666 }
667
668 #[test]
669 fn write() {
670 let config_space = BlkConfig {
671 capacity_low: ReadOnly::new(66),
672 capacity_high: ReadOnly::new(0),
673 size_max: ReadOnly::new(0),
674 seg_max: ReadOnly::new(0),
675 cylinders: ReadOnly::new(0),
676 heads: ReadOnly::new(0),
677 sectors: ReadOnly::new(0),
678 blk_size: ReadOnly::new(0),
679 physical_block_exp: ReadOnly::new(0),
680 alignment_offset: ReadOnly::new(0),
681 min_io_size: ReadOnly::new(0),
682 opt_io_size: ReadOnly::new(0),
683 };
684 let state = Arc::new(Mutex::new(State::new(
685 vec![QueueStatus::default()],
686 config_space,
687 )));
688 let transport = FakeTransport {
689 device_type: DeviceType::Block,
690 max_queue_size: QUEUE_SIZE.into(),
691 device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
692 state: state.clone(),
693 };
694 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
695
696 let handle = thread::spawn(move || {
698 println!("Device waiting for a request.");
699 State::wait_until_queue_notified(&state, QUEUE);
700 println!("Transmit queue was notified.");
701
702 assert!(state
703 .lock()
704 .unwrap()
705 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
706 assert_eq!(
707 &request[0..size_of::<BlkReq>()],
708 BlkReq {
709 type_: ReqType::Out,
710 reserved: 0,
711 sector: 42
712 }
713 .as_bytes()
714 );
715 let data = &request[size_of::<BlkReq>()..];
716 assert_eq!(data.len(), SECTOR_SIZE);
717 assert_eq!(&data[0..9], b"Test data");
718
719 let mut response = Vec::new();
720 response.extend_from_slice(
721 BlkResp {
722 status: RespStatus::OK,
723 }
724 .as_bytes(),
725 );
726
727 response
728 }));
729 });
730
731 let mut buffer = [0; 512];
733 buffer[0..9].copy_from_slice(b"Test data");
734 blk.write_blocks(42, &mut buffer).unwrap();
735
736 blk.flush().unwrap();
738
739 handle.join().unwrap();
740 }
741
742 #[test]
743 fn flush() {
744 let config_space = BlkConfig {
745 capacity_low: ReadOnly::new(66),
746 capacity_high: ReadOnly::new(0),
747 size_max: ReadOnly::new(0),
748 seg_max: ReadOnly::new(0),
749 cylinders: ReadOnly::new(0),
750 heads: ReadOnly::new(0),
751 sectors: ReadOnly::new(0),
752 blk_size: ReadOnly::new(0),
753 physical_block_exp: ReadOnly::new(0),
754 alignment_offset: ReadOnly::new(0),
755 min_io_size: ReadOnly::new(0),
756 opt_io_size: ReadOnly::new(0),
757 };
758 let state = Arc::new(Mutex::new(State::new(
759 vec![QueueStatus::default()],
760 config_space,
761 )));
762 let transport = FakeTransport {
763 device_type: DeviceType::Block,
764 max_queue_size: QUEUE_SIZE.into(),
765 device_features: (BlkFeature::RING_INDIRECT_DESC | BlkFeature::FLUSH).bits(),
766 state: state.clone(),
767 };
768 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
769
770 let handle = thread::spawn(move || {
772 println!("Device waiting for a request.");
773 State::wait_until_queue_notified(&state, QUEUE);
774 println!("Transmit queue was notified.");
775
776 assert!(state
777 .lock()
778 .unwrap()
779 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
780 assert_eq!(
781 request,
782 BlkReq {
783 type_: ReqType::Flush,
784 reserved: 0,
785 sector: 0,
786 }
787 .as_bytes()
788 );
789
790 let mut response = Vec::new();
791 response.extend_from_slice(
792 BlkResp {
793 status: RespStatus::OK,
794 }
795 .as_bytes(),
796 );
797
798 response
799 }));
800 });
801
802 blk.flush().unwrap();
804
805 handle.join().unwrap();
806 }
807
808 #[test]
809 fn device_id() {
810 let config_space = BlkConfig {
811 capacity_low: ReadOnly::new(66),
812 capacity_high: ReadOnly::new(0),
813 size_max: ReadOnly::new(0),
814 seg_max: ReadOnly::new(0),
815 cylinders: ReadOnly::new(0),
816 heads: ReadOnly::new(0),
817 sectors: ReadOnly::new(0),
818 blk_size: ReadOnly::new(0),
819 physical_block_exp: ReadOnly::new(0),
820 alignment_offset: ReadOnly::new(0),
821 min_io_size: ReadOnly::new(0),
822 opt_io_size: ReadOnly::new(0),
823 };
824 let state = Arc::new(Mutex::new(State::new(
825 vec![QueueStatus::default()],
826 config_space,
827 )));
828 let transport = FakeTransport {
829 device_type: DeviceType::Block,
830 max_queue_size: QUEUE_SIZE.into(),
831 device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
832 state: state.clone(),
833 };
834 let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
835
836 let handle = thread::spawn(move || {
838 println!("Device waiting for a request.");
839 State::wait_until_queue_notified(&state, QUEUE);
840 println!("Transmit queue was notified.");
841
842 assert!(state
843 .lock()
844 .unwrap()
845 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
846 assert_eq!(
847 request,
848 BlkReq {
849 type_: ReqType::GetId,
850 reserved: 0,
851 sector: 0,
852 }
853 .as_bytes()
854 );
855
856 let mut response = Vec::new();
857 response.extend_from_slice(b"device_id\0\0\0\0\0\0\0\0\0\0\0");
858 response.extend_from_slice(
859 BlkResp {
860 status: RespStatus::OK,
861 }
862 .as_bytes(),
863 );
864
865 response
866 }));
867 });
868
869 let mut id = [0; 20];
870 let length = blk.device_id(&mut id).unwrap();
871 assert_eq!(&id[0..length], b"device_id");
872
873 handle.join().unwrap();
874 }
875}