virtio_drivers/device/
blk.rs

1//! Driver for VirtIO block devices.
2
3use crate::config::{read_config, ReadOnly};
4use crate::hal::Hal;
5use crate::queue::VirtQueue;
6use crate::transport::{InterruptStatus, Transport};
7use crate::{Error, Result};
8use bitflags::bitflags;
9use log::info;
10use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
11
12const QUEUE: u16 = 0;
13const QUEUE_SIZE: u16 = 16;
14const SUPPORTED_FEATURES: BlkFeature = BlkFeature::RO
15    .union(BlkFeature::FLUSH)
16    .union(BlkFeature::RING_INDIRECT_DESC)
17    .union(BlkFeature::RING_EVENT_IDX)
18    .union(BlkFeature::VERSION_1);
19
20/// Driver for a VirtIO block device.
21///
22/// This is a simple virtual block device, e.g. disk.
23///
24/// Read and write requests (and other exotic requests) are placed in the queue and serviced
25/// (probably out of order) by the device except where noted.
26///
27/// # Example
28///
29/// ```
30/// # use virtio_drivers::{Error, Hal};
31/// # use virtio_drivers::transport::Transport;
32/// use virtio_drivers::device::blk::{VirtIOBlk, SECTOR_SIZE};
33///
34/// # fn example<HalImpl: Hal, T: Transport>(transport: T) -> Result<(), Error> {
35/// let mut disk = VirtIOBlk::<HalImpl, _>::new(transport)?;
36///
37/// println!("VirtIO block device: {} kB", disk.capacity() * SECTOR_SIZE as u64 / 2);
38///
39/// // Read sector 0 and then copy it to sector 1.
40/// let mut buf = [0; SECTOR_SIZE];
41/// disk.read_blocks(0, &mut buf)?;
42/// disk.write_blocks(1, &buf)?;
43/// # Ok(())
44/// # }
45/// ```
46pub struct VirtIOBlk<H: Hal, T: Transport> {
47    transport: T,
48    queue: VirtQueue<H, { QUEUE_SIZE as usize }>,
49    capacity: u64,
50    negotiated_features: BlkFeature,
51}
52
53impl<H: Hal, T: Transport> VirtIOBlk<H, T> {
54    /// Create a new VirtIO-Blk driver.
55    pub fn new(mut transport: T) -> Result<Self> {
56        let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
57
58        // Read configuration space.
59        let capacity = transport.read_consistent(|| {
60            Ok((read_config!(transport, BlkConfig, capacity_low)? as u64)
61                | ((read_config!(transport, BlkConfig, capacity_high)? as u64) << 32))
62        })?;
63        info!("found a block device of size {}KB", capacity / 2);
64
65        let queue = VirtQueue::new(
66            &mut transport,
67            QUEUE,
68            negotiated_features.contains(BlkFeature::RING_INDIRECT_DESC),
69            negotiated_features.contains(BlkFeature::RING_EVENT_IDX),
70        )?;
71        transport.finish_init();
72
73        Ok(VirtIOBlk {
74            transport,
75            queue,
76            capacity,
77            negotiated_features,
78        })
79    }
80
81    /// Gets the capacity of the block device, in 512 byte ([`SECTOR_SIZE`]) sectors.
82    pub fn capacity(&self) -> u64 {
83        self.capacity
84    }
85
86    /// Returns true if the block device is read-only, or false if it allows writes.
87    pub fn readonly(&self) -> bool {
88        self.negotiated_features.contains(BlkFeature::RO)
89    }
90
91    /// Acknowledges a pending interrupt, if any.
92    ///
93    /// Returns true if there was an interrupt to acknowledge.
94    pub fn ack_interrupt(&mut self) -> InterruptStatus {
95        self.transport.ack_interrupt()
96    }
97
98    /// Enables interrupts from the device.
99    pub fn enable_interrupts(&mut self) {
100        self.queue.set_dev_notify(true);
101    }
102
103    /// Disables interrupts from the device.
104    pub fn disable_interrupts(&mut self) {
105        self.queue.set_dev_notify(false);
106    }
107
108    /// Sends the given request to the device and waits for a response, with no extra data.
109    fn request(&mut self, request: BlkReq) -> Result {
110        let mut resp = BlkResp::default();
111        self.queue.add_notify_wait_pop(
112            &[request.as_bytes()],
113            &mut [resp.as_mut_bytes()],
114            &mut self.transport,
115        )?;
116        resp.status.into()
117    }
118
119    /// Sends the given request to the device and waits for a response, including the given data.
120    fn request_read(&mut self, request: BlkReq, data: &mut [u8]) -> Result {
121        let mut resp = BlkResp::default();
122        self.queue.add_notify_wait_pop(
123            &[request.as_bytes()],
124            &mut [data, resp.as_mut_bytes()],
125            &mut self.transport,
126        )?;
127        resp.status.into()
128    }
129
130    /// Sends the given request and data to the device and waits for a response.
131    fn request_write(&mut self, request: BlkReq, data: &[u8]) -> Result {
132        let mut resp = BlkResp::default();
133        self.queue.add_notify_wait_pop(
134            &[request.as_bytes(), data],
135            &mut [resp.as_mut_bytes()],
136            &mut self.transport,
137        )?;
138        resp.status.into()
139    }
140
141    /// Requests the device to flush any pending writes to storage.
142    ///
143    /// This will be ignored if the device doesn't support the `VIRTIO_BLK_F_FLUSH` feature.
144    pub fn flush(&mut self) -> Result {
145        if self.negotiated_features.contains(BlkFeature::FLUSH) {
146            self.request(BlkReq {
147                type_: ReqType::Flush,
148                ..Default::default()
149            })
150        } else {
151            Ok(())
152        }
153    }
154
155    /// Gets the device ID.
156    ///
157    /// The ID is written as ASCII into the given buffer, which must be 20 bytes long, and the used
158    /// length returned.
159    pub fn device_id(&mut self, id: &mut [u8; 20]) -> Result<usize> {
160        self.request_read(
161            BlkReq {
162                type_: ReqType::GetId,
163                ..Default::default()
164            },
165            id,
166        )?;
167
168        let length = id.iter().position(|&x| x == 0).unwrap_or(20);
169        Ok(length)
170    }
171
172    /// Reads one or more blocks into the given buffer.
173    ///
174    /// The buffer length must be a non-zero multiple of [`SECTOR_SIZE`].
175    ///
176    /// Blocks until the read completes or there is an error.
177    pub fn read_blocks(&mut self, block_id: usize, buf: &mut [u8]) -> Result {
178        assert_ne!(buf.len(), 0);
179        assert_eq!(buf.len() % SECTOR_SIZE, 0);
180        self.request_read(
181            BlkReq {
182                type_: ReqType::In,
183                reserved: 0,
184                sector: block_id as u64,
185            },
186            buf,
187        )
188    }
189
190    /// Submits a request to read one or more blocks, but returns immediately without waiting for
191    /// the read to complete.
192    ///
193    /// # Arguments
194    ///
195    /// * `block_id` - The identifier of the first block to read.
196    /// * `req` - A buffer which the driver can use for the request to send to the device. The
197    ///   contents don't matter as `read_blocks_nb` will initialise it, but like the other buffers
198    ///   it needs to be valid (and not otherwise used) until the corresponding
199    ///   `complete_read_blocks` call. Its length must be a non-zero multiple of [`SECTOR_SIZE`].
200    /// * `buf` - The buffer in memory into which the block should be read.
201    /// * `resp` - A mutable reference to a variable provided by the caller
202    ///   to contain the status of the request. The caller can safely
203    ///   read the variable only after the request is complete.
204    ///
205    /// # Usage
206    ///
207    /// It will submit request to the VirtIO block device and return a token identifying
208    /// the position of the first Descriptor in the chain. If there are not enough
209    /// Descriptors to allocate, then it returns [`Error::QueueFull`].
210    ///
211    /// The caller can then call `peek_used` with the returned token to check whether the device has
212    /// finished handling the request. Once it has, the caller must call `complete_read_blocks` with
213    /// the same buffers before reading the response.
214    ///
215    /// ```
216    /// # use virtio_drivers::{Error, Hal};
217    /// # use virtio_drivers::device::blk::VirtIOBlk;
218    /// # use virtio_drivers::transport::Transport;
219    /// use virtio_drivers::device::blk::{BlkReq, BlkResp, RespStatus};
220    ///
221    /// # fn example<H: Hal, T: Transport>(blk: &mut VirtIOBlk<H, T>) -> Result<(), Error> {
222    /// let mut request = BlkReq::default();
223    /// let mut buffer = [0; 512];
224    /// let mut response = BlkResp::default();
225    /// let token = unsafe { blk.read_blocks_nb(42, &mut request, &mut buffer, &mut response) }?;
226    ///
227    /// // Wait for an interrupt to tell us that the request completed...
228    /// assert_eq!(blk.peek_used(), Some(token));
229    ///
230    /// unsafe {
231    ///   blk.complete_read_blocks(token, &request, &mut buffer, &mut response)?;
232    /// }
233    /// if response.status() == RespStatus::OK {
234    ///   println!("Successfully read block.");
235    /// } else {
236    ///   println!("Error {:?} reading block.", response.status());
237    /// }
238    /// # Ok(())
239    /// # }
240    /// ```
241    ///
242    /// # Safety
243    ///
244    /// `req`, `buf` and `resp` are still borrowed by the underlying VirtIO block device even after
245    /// this method returns. Thus, it is the caller's responsibility to guarantee that they are not
246    /// accessed before the request is completed in order to avoid data races.
247    pub unsafe fn read_blocks_nb(
248        &mut self,
249        block_id: usize,
250        req: &mut BlkReq,
251        buf: &mut [u8],
252        resp: &mut BlkResp,
253    ) -> Result<u16> {
254        assert_ne!(buf.len(), 0);
255        assert_eq!(buf.len() % SECTOR_SIZE, 0);
256        *req = BlkReq {
257            type_: ReqType::In,
258            reserved: 0,
259            sector: block_id as u64,
260        };
261        let token = self
262            .queue
263            .add(&[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
264        if self.queue.should_notify() {
265            self.transport.notify(QUEUE);
266        }
267        Ok(token)
268    }
269
270    /// Completes a read operation which was started by `read_blocks_nb`.
271    ///
272    /// # Safety
273    ///
274    /// The same buffers must be passed in again as were passed to `read_blocks_nb` when it returned
275    /// the token.
276    pub unsafe fn complete_read_blocks(
277        &mut self,
278        token: u16,
279        req: &BlkReq,
280        buf: &mut [u8],
281        resp: &mut BlkResp,
282    ) -> Result<()> {
283        self.queue
284            .pop_used(token, &[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
285        resp.status.into()
286    }
287
288    /// Writes the contents of the given buffer to a block or blocks.
289    ///
290    /// The buffer length must be a non-zero multiple of [`SECTOR_SIZE`].
291    ///
292    /// Blocks until the write is complete or there is an error.
293    pub fn write_blocks(&mut self, block_id: usize, buf: &[u8]) -> Result {
294        assert_ne!(buf.len(), 0);
295        assert_eq!(buf.len() % SECTOR_SIZE, 0);
296        self.request_write(
297            BlkReq {
298                type_: ReqType::Out,
299                sector: block_id as u64,
300                ..Default::default()
301            },
302            buf,
303        )
304    }
305
306    /// Submits a request to write one or more blocks, but returns immediately without waiting for
307    /// the write to complete.
308    ///
309    /// # Arguments
310    ///
311    /// * `block_id` - The identifier of the first block to write.
312    /// * `req` - A buffer which the driver can use for the request to send to the device. The
313    ///   contents don't matter as `read_blocks_nb` will initialise it, but like the other buffers
314    ///   it needs to be valid (and not otherwise used) until the corresponding
315    ///   `complete_write_blocks` call.
316    /// * `buf` - The buffer in memory containing the data to write to the blocks. Its length must
317    ///   be a non-zero multiple of [`SECTOR_SIZE`].
318    /// * `resp` - A mutable reference to a variable provided by the caller
319    ///   to contain the status of the request. The caller can safely
320    ///   read the variable only after the request is complete.
321    ///
322    /// # Usage
323    ///
324    /// See [VirtIOBlk::read_blocks_nb].
325    ///
326    /// # Safety
327    ///
328    /// See  [VirtIOBlk::read_blocks_nb].
329    pub unsafe fn write_blocks_nb(
330        &mut self,
331        block_id: usize,
332        req: &mut BlkReq,
333        buf: &[u8],
334        resp: &mut BlkResp,
335    ) -> Result<u16> {
336        assert_ne!(buf.len(), 0);
337        assert_eq!(buf.len() % SECTOR_SIZE, 0);
338        *req = BlkReq {
339            type_: ReqType::Out,
340            reserved: 0,
341            sector: block_id as u64,
342        };
343        let token = self
344            .queue
345            .add(&[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
346        if self.queue.should_notify() {
347            self.transport.notify(QUEUE);
348        }
349        Ok(token)
350    }
351
352    /// Completes a write operation which was started by `write_blocks_nb`.
353    ///
354    /// # Safety
355    ///
356    /// The same buffers must be passed in again as were passed to `write_blocks_nb` when it
357    /// returned the token.
358    pub unsafe fn complete_write_blocks(
359        &mut self,
360        token: u16,
361        req: &BlkReq,
362        buf: &[u8],
363        resp: &mut BlkResp,
364    ) -> Result<()> {
365        self.queue
366            .pop_used(token, &[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
367        resp.status.into()
368    }
369
370    /// Fetches the token of the next completed request from the used ring and returns it, without
371    /// removing it from the used ring. If there are no pending completed requests returns `None`.
372    pub fn peek_used(&mut self) -> Option<u16> {
373        self.queue.peek_used()
374    }
375
376    /// Returns the size of the device's VirtQueue.
377    ///
378    /// This can be used to tell the caller how many channels to monitor on.
379    pub fn virt_queue_size(&self) -> u16 {
380        QUEUE_SIZE
381    }
382}
383
384impl<H: Hal, T: Transport> Drop for VirtIOBlk<H, T> {
385    fn drop(&mut self) {
386        // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
387        // after they have been freed.
388        self.transport.queue_unset(QUEUE);
389    }
390}
391
392#[derive(FromBytes, Immutable, IntoBytes)]
393#[repr(C)]
394struct BlkConfig {
395    /// Number of 512 Bytes sectors
396    capacity_low: ReadOnly<u32>,
397    capacity_high: ReadOnly<u32>,
398    size_max: ReadOnly<u32>,
399    seg_max: ReadOnly<u32>,
400    cylinders: ReadOnly<u16>,
401    heads: ReadOnly<u8>,
402    sectors: ReadOnly<u8>,
403    blk_size: ReadOnly<u32>,
404    physical_block_exp: ReadOnly<u8>,
405    alignment_offset: ReadOnly<u8>,
406    min_io_size: ReadOnly<u16>,
407    opt_io_size: ReadOnly<u32>,
408    // ... ignored
409}
410
411/// A VirtIO block device request.
412#[repr(C)]
413#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
414pub struct BlkReq {
415    type_: ReqType,
416    reserved: u32,
417    sector: u64,
418}
419
420impl Default for BlkReq {
421    fn default() -> Self {
422        Self {
423            type_: ReqType::In,
424            reserved: 0,
425            sector: 0,
426        }
427    }
428}
429
430/// Response of a VirtIOBlk request.
431#[repr(C)]
432#[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
433pub struct BlkResp {
434    status: RespStatus,
435}
436
437impl BlkResp {
438    /// Return the status of a VirtIOBlk request.
439    pub fn status(&self) -> RespStatus {
440        self.status
441    }
442}
443
444#[repr(u32)]
445#[derive(Debug, Immutable, IntoBytes, KnownLayout)]
446enum ReqType {
447    In = 0,
448    Out = 1,
449    Flush = 4,
450    GetId = 8,
451    GetLifetime = 10,
452    Discard = 11,
453    WriteZeroes = 13,
454    SecureErase = 14,
455}
456
457/// Status of a VirtIOBlk request.
458#[repr(transparent)]
459#[derive(Copy, Clone, Debug, Eq, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq)]
460pub struct RespStatus(u8);
461
462impl RespStatus {
463    /// Ok.
464    pub const OK: RespStatus = RespStatus(0);
465    /// IoErr.
466    pub const IO_ERR: RespStatus = RespStatus(1);
467    /// Unsupported yet.
468    pub const UNSUPPORTED: RespStatus = RespStatus(2);
469    /// Not ready.
470    pub const NOT_READY: RespStatus = RespStatus(3);
471}
472
473impl From<RespStatus> for Result {
474    fn from(status: RespStatus) -> Self {
475        match status {
476            RespStatus::OK => Ok(()),
477            RespStatus::IO_ERR => Err(Error::IoError),
478            RespStatus::UNSUPPORTED => Err(Error::Unsupported),
479            RespStatus::NOT_READY => Err(Error::NotReady),
480            _ => Err(Error::IoError),
481        }
482    }
483}
484
485impl Default for BlkResp {
486    fn default() -> Self {
487        BlkResp {
488            status: RespStatus::NOT_READY,
489        }
490    }
491}
492
493/// The standard sector size of a VirtIO block device. Data is read and written in multiples of this
494/// size.
495pub const SECTOR_SIZE: usize = 512;
496
497bitflags! {
498    #[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
499    struct BlkFeature: u64 {
500        /// Device supports request barriers. (legacy)
501        const BARRIER       = 1 << 0;
502        /// Maximum size of any single segment is in `size_max`.
503        const SIZE_MAX      = 1 << 1;
504        /// Maximum number of segments in a request is in `seg_max`.
505        const SEG_MAX       = 1 << 2;
506        /// Disk-style geometry specified in geometry.
507        const GEOMETRY      = 1 << 4;
508        /// Device is read-only.
509        const RO            = 1 << 5;
510        /// Block size of disk is in `blk_size`.
511        const BLK_SIZE      = 1 << 6;
512        /// Device supports scsi packet commands. (legacy)
513        const SCSI          = 1 << 7;
514        /// Cache flush command support.
515        const FLUSH         = 1 << 9;
516        /// Device exports information on optimal I/O alignment.
517        const TOPOLOGY      = 1 << 10;
518        /// Device can toggle its cache between writeback and writethrough modes.
519        const CONFIG_WCE    = 1 << 11;
520        /// Device supports multiqueue.
521        const MQ            = 1 << 12;
522        /// Device can support discard command, maximum discard sectors size in
523        /// `max_discard_sectors` and maximum discard segment number in
524        /// `max_discard_seg`.
525        const DISCARD       = 1 << 13;
526        /// Device can support write zeroes command, maximum write zeroes sectors
527        /// size in `max_write_zeroes_sectors` and maximum write zeroes segment
528        /// number in `max_write_zeroes_seg`.
529        const WRITE_ZEROES  = 1 << 14;
530        /// Device supports providing storage lifetime information.
531        const LIFETIME      = 1 << 15;
532        /// Device can support the secure erase command.
533        const SECURE_ERASE  = 1 << 16;
534
535        // device independent
536        const NOTIFY_ON_EMPTY       = 1 << 24; // legacy
537        const ANY_LAYOUT            = 1 << 27; // legacy
538        const RING_INDIRECT_DESC    = 1 << 28;
539        const RING_EVENT_IDX        = 1 << 29;
540        const UNUSED                = 1 << 30; // legacy
541        const VERSION_1             = 1 << 32; // detect legacy
542
543        // the following since virtio v1.1
544        const ACCESS_PLATFORM       = 1 << 33;
545        const RING_PACKED           = 1 << 34;
546        const IN_ORDER              = 1 << 35;
547        const ORDER_PLATFORM        = 1 << 36;
548        const SR_IOV                = 1 << 37;
549        const NOTIFICATION_DATA     = 1 << 38;
550    }
551}
552
553#[cfg(test)]
554mod tests {
555    use super::*;
556    use crate::{
557        hal::fake::FakeHal,
558        transport::{
559            fake::{FakeTransport, QueueStatus, State},
560            DeviceType,
561        },
562    };
563    use alloc::{sync::Arc, vec};
564    use core::mem::size_of;
565    use std::{sync::Mutex, thread};
566
567    #[test]
568    fn config() {
569        let config_space = BlkConfig {
570            capacity_low: ReadOnly::new(0x42),
571            capacity_high: ReadOnly::new(0x02),
572            size_max: ReadOnly::new(0),
573            seg_max: ReadOnly::new(0),
574            cylinders: ReadOnly::new(0),
575            heads: ReadOnly::new(0),
576            sectors: ReadOnly::new(0),
577            blk_size: ReadOnly::new(0),
578            physical_block_exp: ReadOnly::new(0),
579            alignment_offset: ReadOnly::new(0),
580            min_io_size: ReadOnly::new(0),
581            opt_io_size: ReadOnly::new(0),
582        };
583        let state = Arc::new(Mutex::new(State::new(
584            vec![QueueStatus::default()],
585            config_space,
586        )));
587        let transport = FakeTransport {
588            device_type: DeviceType::Block,
589            max_queue_size: QUEUE_SIZE.into(),
590            device_features: BlkFeature::RO.bits(),
591            state: state.clone(),
592        };
593        let blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
594
595        assert_eq!(blk.capacity(), 0x02_0000_0042);
596        assert_eq!(blk.readonly(), true);
597    }
598
599    #[test]
600    fn read() {
601        let config_space = BlkConfig {
602            capacity_low: ReadOnly::new(66),
603            capacity_high: ReadOnly::new(0),
604            size_max: ReadOnly::new(0),
605            seg_max: ReadOnly::new(0),
606            cylinders: ReadOnly::new(0),
607            heads: ReadOnly::new(0),
608            sectors: ReadOnly::new(0),
609            blk_size: ReadOnly::new(0),
610            physical_block_exp: ReadOnly::new(0),
611            alignment_offset: ReadOnly::new(0),
612            min_io_size: ReadOnly::new(0),
613            opt_io_size: ReadOnly::new(0),
614        };
615        let state = Arc::new(Mutex::new(State::new(
616            vec![QueueStatus::default()],
617            config_space,
618        )));
619        let transport = FakeTransport {
620            device_type: DeviceType::Block,
621            max_queue_size: QUEUE_SIZE.into(),
622            device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
623            state: state.clone(),
624        };
625        let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
626
627        // Start a thread to simulate the device waiting for a read request.
628        let handle = thread::spawn(move || {
629            println!("Device waiting for a request.");
630            State::wait_until_queue_notified(&state, QUEUE);
631            println!("Transmit queue was notified.");
632
633            assert!(state
634                .lock()
635                .unwrap()
636                .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
637                    assert_eq!(
638                        request,
639                        BlkReq {
640                            type_: ReqType::In,
641                            reserved: 0,
642                            sector: 42
643                        }
644                        .as_bytes()
645                    );
646
647                    let mut response = vec![0; SECTOR_SIZE];
648                    response[0..9].copy_from_slice(b"Test data");
649                    response.extend_from_slice(
650                        BlkResp {
651                            status: RespStatus::OK,
652                        }
653                        .as_bytes(),
654                    );
655
656                    response
657                }));
658        });
659
660        // Read a block from the device.
661        let mut buffer = [0; 512];
662        blk.read_blocks(42, &mut buffer).unwrap();
663        assert_eq!(&buffer[0..9], b"Test data");
664
665        handle.join().unwrap();
666    }
667
668    #[test]
669    fn write() {
670        let config_space = BlkConfig {
671            capacity_low: ReadOnly::new(66),
672            capacity_high: ReadOnly::new(0),
673            size_max: ReadOnly::new(0),
674            seg_max: ReadOnly::new(0),
675            cylinders: ReadOnly::new(0),
676            heads: ReadOnly::new(0),
677            sectors: ReadOnly::new(0),
678            blk_size: ReadOnly::new(0),
679            physical_block_exp: ReadOnly::new(0),
680            alignment_offset: ReadOnly::new(0),
681            min_io_size: ReadOnly::new(0),
682            opt_io_size: ReadOnly::new(0),
683        };
684        let state = Arc::new(Mutex::new(State::new(
685            vec![QueueStatus::default()],
686            config_space,
687        )));
688        let transport = FakeTransport {
689            device_type: DeviceType::Block,
690            max_queue_size: QUEUE_SIZE.into(),
691            device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
692            state: state.clone(),
693        };
694        let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
695
696        // Start a thread to simulate the device waiting for a write request.
697        let handle = thread::spawn(move || {
698            println!("Device waiting for a request.");
699            State::wait_until_queue_notified(&state, QUEUE);
700            println!("Transmit queue was notified.");
701
702            assert!(state
703                .lock()
704                .unwrap()
705                .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
706                    assert_eq!(
707                        &request[0..size_of::<BlkReq>()],
708                        BlkReq {
709                            type_: ReqType::Out,
710                            reserved: 0,
711                            sector: 42
712                        }
713                        .as_bytes()
714                    );
715                    let data = &request[size_of::<BlkReq>()..];
716                    assert_eq!(data.len(), SECTOR_SIZE);
717                    assert_eq!(&data[0..9], b"Test data");
718
719                    let mut response = Vec::new();
720                    response.extend_from_slice(
721                        BlkResp {
722                            status: RespStatus::OK,
723                        }
724                        .as_bytes(),
725                    );
726
727                    response
728                }));
729        });
730
731        // Write a block to the device.
732        let mut buffer = [0; 512];
733        buffer[0..9].copy_from_slice(b"Test data");
734        blk.write_blocks(42, &mut buffer).unwrap();
735
736        // Request to flush should be ignored as the device doesn't support it.
737        blk.flush().unwrap();
738
739        handle.join().unwrap();
740    }
741
742    #[test]
743    fn flush() {
744        let config_space = BlkConfig {
745            capacity_low: ReadOnly::new(66),
746            capacity_high: ReadOnly::new(0),
747            size_max: ReadOnly::new(0),
748            seg_max: ReadOnly::new(0),
749            cylinders: ReadOnly::new(0),
750            heads: ReadOnly::new(0),
751            sectors: ReadOnly::new(0),
752            blk_size: ReadOnly::new(0),
753            physical_block_exp: ReadOnly::new(0),
754            alignment_offset: ReadOnly::new(0),
755            min_io_size: ReadOnly::new(0),
756            opt_io_size: ReadOnly::new(0),
757        };
758        let state = Arc::new(Mutex::new(State::new(
759            vec![QueueStatus::default()],
760            config_space,
761        )));
762        let transport = FakeTransport {
763            device_type: DeviceType::Block,
764            max_queue_size: QUEUE_SIZE.into(),
765            device_features: (BlkFeature::RING_INDIRECT_DESC | BlkFeature::FLUSH).bits(),
766            state: state.clone(),
767        };
768        let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
769
770        // Start a thread to simulate the device waiting for a flush request.
771        let handle = thread::spawn(move || {
772            println!("Device waiting for a request.");
773            State::wait_until_queue_notified(&state, QUEUE);
774            println!("Transmit queue was notified.");
775
776            assert!(state
777                .lock()
778                .unwrap()
779                .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
780                    assert_eq!(
781                        request,
782                        BlkReq {
783                            type_: ReqType::Flush,
784                            reserved: 0,
785                            sector: 0,
786                        }
787                        .as_bytes()
788                    );
789
790                    let mut response = Vec::new();
791                    response.extend_from_slice(
792                        BlkResp {
793                            status: RespStatus::OK,
794                        }
795                        .as_bytes(),
796                    );
797
798                    response
799                }));
800        });
801
802        // Request to flush.
803        blk.flush().unwrap();
804
805        handle.join().unwrap();
806    }
807
808    #[test]
809    fn device_id() {
810        let config_space = BlkConfig {
811            capacity_low: ReadOnly::new(66),
812            capacity_high: ReadOnly::new(0),
813            size_max: ReadOnly::new(0),
814            seg_max: ReadOnly::new(0),
815            cylinders: ReadOnly::new(0),
816            heads: ReadOnly::new(0),
817            sectors: ReadOnly::new(0),
818            blk_size: ReadOnly::new(0),
819            physical_block_exp: ReadOnly::new(0),
820            alignment_offset: ReadOnly::new(0),
821            min_io_size: ReadOnly::new(0),
822            opt_io_size: ReadOnly::new(0),
823        };
824        let state = Arc::new(Mutex::new(State::new(
825            vec![QueueStatus::default()],
826            config_space,
827        )));
828        let transport = FakeTransport {
829            device_type: DeviceType::Block,
830            max_queue_size: QUEUE_SIZE.into(),
831            device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
832            state: state.clone(),
833        };
834        let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
835
836        // Start a thread to simulate the device waiting for a flush request.
837        let handle = thread::spawn(move || {
838            println!("Device waiting for a request.");
839            State::wait_until_queue_notified(&state, QUEUE);
840            println!("Transmit queue was notified.");
841
842            assert!(state
843                .lock()
844                .unwrap()
845                .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
846                    assert_eq!(
847                        request,
848                        BlkReq {
849                            type_: ReqType::GetId,
850                            reserved: 0,
851                            sector: 0,
852                        }
853                        .as_bytes()
854                    );
855
856                    let mut response = Vec::new();
857                    response.extend_from_slice(b"device_id\0\0\0\0\0\0\0\0\0\0\0");
858                    response.extend_from_slice(
859                        BlkResp {
860                            status: RespStatus::OK,
861                        }
862                        .as_bytes(),
863                    );
864
865                    response
866                }));
867        });
868
869        let mut id = [0; 20];
870        let length = blk.device_id(&mut id).unwrap();
871        assert_eq!(&id[0..length], b"device_id");
872
873        handle.join().unwrap();
874    }
875}