sel4_async_block_io/
lib.rs

1//
2// Copyright 2023, Colias Group, LLC
3//
4// SPDX-License-Identifier: BSD-2-Clause
5//
6
7#![no_std]
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12use core::cell::RefCell;
13use core::convert::Infallible;
14use core::fmt;
15use core::ops::Range;
16
17use futures::future;
18
19pub mod access;
20pub mod disk;
21
22mod operation;
23
24pub use operation::{Operation, OperationType};
25
26use access::{Access, ReadAccess, ReadOnly, ReadWrite, WriteAccess};
27
28#[cfg(feature = "alloc")]
29mod when_alloc;
30
31#[cfg(feature = "alloc")]
32pub use when_alloc::{CachedBlockIO, DynamicBlockSize};
33
34pub trait BlockIOLayout {
35    type Error: fmt::Debug;
36
37    type BlockSize: BlockSize;
38
39    fn block_size(&self) -> Self::BlockSize;
40
41    fn num_blocks(&self) -> u64;
42}
43
44pub trait BlockIO<A: Access>: BlockIOLayout {
45    #[allow(async_fn_in_trait)]
46    async fn read_or_write_blocks(
47        &self,
48        start_block_idx: u64,
49        operation: Operation<'_, A>,
50    ) -> Result<(), Self::Error>;
51
52    #[allow(async_fn_in_trait)]
53    async fn read_blocks(&self, start_block_idx: u64, buf: &mut [u8]) -> Result<(), Self::Error>
54    where
55        A: ReadAccess,
56    {
57        self.read_or_write_blocks(
58            start_block_idx,
59            Operation::Read {
60                buf,
61                witness: A::READ_WITNESS,
62            },
63        )
64        .await
65    }
66
67    #[allow(async_fn_in_trait)]
68    async fn write_blocks(&self, start_block_idx: u64, buf: &[u8]) -> Result<(), Self::Error>
69    where
70        A: WriteAccess,
71    {
72        self.read_or_write_blocks(
73            start_block_idx,
74            Operation::Write {
75                buf,
76                witness: A::WRITE_WITNESS,
77            },
78        )
79        .await
80    }
81}
82
83pub trait BlockSize {
84    type Block: AsRef<[u8]> + AsMut<[u8]>;
85
86    fn bytes(&self) -> usize;
87
88    fn bytes_u64(&self) -> u64 {
89        self.bytes().try_into().unwrap()
90    }
91
92    fn zeroed_block(&self) -> Self::Block;
93}
94
95pub trait ConstantBlockSize: BlockSize {
96    const BLOCK_SIZE: Self;
97
98    const BYTES: usize;
99}
100
101pub trait HasNextBlockSize: ConstantBlockSize {
102    type NextBlockSize: ConstantBlockSize;
103}
104
105pub trait HasPrevBlockSize: ConstantBlockSize {
106    type PrevBlockSize: ConstantBlockSize;
107}
108
109pub mod constant_block_sizes {
110    use super::{BlockSize, ConstantBlockSize, HasNextBlockSize, HasPrevBlockSize};
111
112    macro_rules! declare_block_size {
113        ($ident:ident, $n:literal) => {
114            #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
115            pub struct $ident;
116
117            impl BlockSize for $ident {
118                type Block = [u8; $n];
119
120                fn bytes(&self) -> usize {
121                    Self::BYTES
122                }
123
124                fn zeroed_block(&self) -> Self::Block {
125                    [0; $n]
126                }
127            }
128
129            impl ConstantBlockSize for $ident {
130                const BLOCK_SIZE: Self = $ident;
131
132                const BYTES: usize = $n;
133            }
134        };
135    }
136
137    macro_rules! declare_next_block_size {
138        ($cur:ident, $next:ident) => {
139            impl HasNextBlockSize for $cur {
140                type NextBlockSize = $next;
141            }
142
143            impl HasPrevBlockSize for $next {
144                type PrevBlockSize = $cur;
145            }
146        };
147    }
148
149    declare_block_size!(BlockSize512, 512);
150    declare_block_size!(BlockSize1024, 1024);
151    declare_block_size!(BlockSize2048, 2048);
152    declare_block_size!(BlockSize4096, 4096);
153    declare_block_size!(BlockSize8192, 8192);
154
155    declare_next_block_size!(BlockSize512, BlockSize1024);
156    declare_next_block_size!(BlockSize1024, BlockSize2048);
157    declare_next_block_size!(BlockSize2048, BlockSize4096);
158    declare_next_block_size!(BlockSize4096, BlockSize8192);
159}
160
161impl<T: BlockIOLayout> BlockIOLayout for &T {
162    type Error = T::Error;
163
164    type BlockSize = T::BlockSize;
165
166    fn block_size(&self) -> Self::BlockSize {
167        T::block_size(self)
168    }
169
170    fn num_blocks(&self) -> u64 {
171        T::num_blocks(self)
172    }
173}
174
175impl<T: BlockIO<A>, A: Access> BlockIO<A> for &T {
176    async fn read_or_write_blocks(
177        &self,
178        start_block_idx: u64,
179        operation: Operation<'_, A>,
180    ) -> Result<(), <&T as BlockIOLayout>::Error> {
181        T::read_or_write_blocks(self, start_block_idx, operation).await
182    }
183}
184
185macro_rules! wrapper_methods {
186    ($inner:path) => {
187        pub fn into_inner(self) -> $inner {
188            self.inner
189        }
190
191        pub const fn inner(&self) -> &$inner {
192            &self.inner
193        }
194
195        pub fn inner_mut(&mut self) -> &mut $inner {
196            &mut self.inner
197        }
198    };
199}
200
201use wrapper_methods;
202
203#[derive(Clone, Debug)]
204pub struct NextBlockSizeAdapter<T> {
205    inner: T,
206}
207
208impl<T> NextBlockSizeAdapter<T> {
209    pub fn new(inner: T) -> Self {
210        Self { inner }
211    }
212
213    wrapper_methods!(T);
214}
215
216impl<T: BlockIOLayout<BlockSize: HasNextBlockSize>> BlockIOLayout for NextBlockSizeAdapter<T> {
217    type Error = T::Error;
218
219    type BlockSize = <T::BlockSize as HasNextBlockSize>::NextBlockSize;
220
221    fn block_size(&self) -> Self::BlockSize {
222        Self::BlockSize::BLOCK_SIZE
223    }
224
225    fn num_blocks(&self) -> u64 {
226        let inner_num_blocks = self.inner().num_blocks();
227        assert_eq!(inner_num_blocks % 2, 0);
228        inner_num_blocks / 2
229    }
230}
231
232impl<T: BlockIO<A, BlockSize: HasNextBlockSize>, A: Access> BlockIO<A> for NextBlockSizeAdapter<T> {
233    async fn read_or_write_blocks(
234        &self,
235        start_block_idx: u64,
236        operation: Operation<'_, A>,
237    ) -> Result<(), Self::Error> {
238        let inner_start_block_idx = start_block_idx.checked_mul(2).unwrap();
239        self.inner()
240            .read_or_write_blocks(inner_start_block_idx, operation)
241            .await
242    }
243}
244
245#[derive(Clone, Debug)]
246pub struct PrevBlockSizeAdapter<T> {
247    inner: T,
248}
249
250impl<T> PrevBlockSizeAdapter<T> {
251    pub fn new(inner: T) -> Self {
252        Self { inner }
253    }
254
255    wrapper_methods!(T);
256}
257
258impl<T: BlockIOLayout<BlockSize: HasPrevBlockSize>> BlockIOLayout for PrevBlockSizeAdapter<T> {
259    type Error = T::Error;
260
261    type BlockSize = <T::BlockSize as HasPrevBlockSize>::PrevBlockSize;
262
263    fn block_size(&self) -> Self::BlockSize {
264        Self::BlockSize::BLOCK_SIZE
265    }
266
267    fn num_blocks(&self) -> u64 {
268        self.inner().num_blocks().checked_mul(2).unwrap()
269    }
270}
271
272impl<T: BlockIO<A, BlockSize: HasPrevBlockSize>, A: ReadAccess> BlockIO<A>
273    for PrevBlockSizeAdapter<T>
274{
275    async fn read_or_write_blocks(
276        &self,
277        start_block_idx: u64,
278        operation: Operation<'_, A>,
279    ) -> Result<(), Self::Error> {
280        let block_size = Self::BlockSize::BYTES.try_into().unwrap();
281        let start_byte_idx = start_block_idx.checked_mul(block_size).unwrap();
282        read_or_write_bytes(self.inner(), start_byte_idx, operation).await
283    }
284}
285
286#[derive(Clone, Debug)]
287pub struct Partition<T> {
288    inner: T,
289    range: Range<u64>,
290}
291
292impl<T: BlockIO<ReadOnly>> Partition<T> {
293    pub fn new(inner: T, range: Range<u64>) -> Self {
294        assert!(range.start <= range.end);
295        assert!(range.end <= inner.num_blocks());
296        Self { inner, range }
297    }
298}
299
300impl<T> Partition<T> {
301    wrapper_methods!(T);
302}
303
304impl<T: BlockIOLayout> BlockIOLayout for Partition<T> {
305    type Error = T::Error;
306
307    type BlockSize = T::BlockSize;
308
309    fn block_size(&self) -> Self::BlockSize {
310        self.inner().block_size()
311    }
312
313    fn num_blocks(&self) -> u64 {
314        self.range.end - self.range.start
315    }
316}
317
318impl<T: BlockIO<A>, A: Access> BlockIO<A> for Partition<T> {
319    async fn read_or_write_blocks(
320        &self,
321        start_block_idx: u64,
322        operation: Operation<'_, A>,
323    ) -> Result<(), Self::Error> {
324        assert!(
325            start_block_idx
326                + u64::try_from(operation.len()).unwrap() / self.block_size().bytes_u64()
327                <= self.num_blocks()
328        );
329        let inner_block_idx = self.range.start + start_block_idx;
330        self.inner()
331            .read_or_write_blocks(inner_block_idx, operation)
332            .await
333    }
334}
335
336pub trait ByteIOLayout {
337    type Error: fmt::Debug;
338
339    fn size(&self) -> u64;
340}
341
342pub trait ByteIO<A: Access>: ByteIOLayout {
343    #[allow(async_fn_in_trait)]
344    async fn read_or_write(
345        &self,
346        offset: u64,
347        operation: Operation<'_, A>,
348    ) -> Result<(), Self::Error>;
349
350    #[allow(async_fn_in_trait)]
351    async fn read(&self, offset: u64, buf: &mut [u8]) -> Result<(), Self::Error>
352    where
353        A: ReadAccess,
354    {
355        self.read_or_write(offset, Operation::read(buf)).await
356    }
357
358    #[allow(async_fn_in_trait)]
359    async fn write(&self, offset: u64, buf: &[u8]) -> Result<(), Self::Error>
360    where
361        A: WriteAccess,
362    {
363        self.read_or_write(offset, Operation::write(buf)).await
364    }
365}
366
367#[derive(Clone, Debug)]
368pub struct ByteIOAdapter<T> {
369    inner: T,
370}
371
372impl<T> ByteIOAdapter<T> {
373    pub fn new(inner: T) -> Self {
374        Self { inner }
375    }
376
377    wrapper_methods!(T);
378}
379
380impl<T: BlockIOLayout> ByteIOLayout for ByteIOAdapter<T> {
381    type Error = T::Error;
382
383    fn size(&self) -> u64 {
384        self.inner().num_blocks() * self.inner().block_size().bytes_u64()
385    }
386}
387
388impl<T: BlockIO<A>, A: ReadAccess> ByteIO<A> for ByteIOAdapter<T> {
389    async fn read_or_write(
390        &self,
391        offset: u64,
392        operation: Operation<'_, A>,
393    ) -> Result<(), Self::Error> {
394        read_or_write_bytes(self.inner(), offset, operation).await
395    }
396}
397
398#[derive(Clone, Debug)]
399pub struct BlockIOAdapter<T, N> {
400    inner: T,
401    block_size: N,
402}
403
404impl<T, N> BlockIOAdapter<T, N> {
405    pub fn new(inner: T, block_size: N) -> Self {
406        Self { inner, block_size }
407    }
408
409    wrapper_methods!(T);
410}
411
412impl<T: ByteIOLayout, N: BlockSize + Copy> BlockIOLayout for BlockIOAdapter<T, N> {
413    type Error = T::Error;
414
415    type BlockSize = N;
416
417    fn block_size(&self) -> Self::BlockSize {
418        self.block_size
419    }
420
421    fn num_blocks(&self) -> u64 {
422        self.inner().size() / self.block_size().bytes_u64()
423    }
424}
425
426impl<T: ByteIO<A>, A: Access, N: BlockSize + Copy> BlockIO<A> for BlockIOAdapter<T, N> {
427    async fn read_or_write_blocks(
428        &self,
429        start_block_idx: u64,
430        operation: Operation<'_, A>,
431    ) -> Result<(), Self::Error> {
432        let start_byte_idx = start_block_idx
433            .checked_mul(self.block_size().bytes_u64())
434            .unwrap();
435        self.inner().read_or_write(start_byte_idx, operation).await
436    }
437}
438
439pub struct SliceByteIO<T> {
440    inner: T,
441}
442
443impl<T> SliceByteIO<T> {
444    pub fn new(inner: T) -> Self {
445        Self { inner }
446    }
447
448    wrapper_methods!(T);
449}
450
451impl<T: AsRef<[u8]>> ByteIOLayout for SliceByteIO<T> {
452    type Error = Infallible;
453
454    fn size(&self) -> u64 {
455        self.inner().as_ref().len().try_into().unwrap()
456    }
457}
458
459impl<T: AsRef<[u8]>> ByteIO<ReadOnly> for SliceByteIO<T> {
460    async fn read_or_write(
461        &self,
462        offset: u64,
463        operation: Operation<'_, ReadOnly>,
464    ) -> Result<(), Self::Error> {
465        let offset = offset.try_into().unwrap();
466        match operation {
467            Operation::Read { buf, .. } => {
468                buf.copy_from_slice(&self.inner().as_ref()[offset..][..buf.len()]);
469            }
470            #[allow(unreachable_patterns)]
471            Operation::Write { witness, .. } => witness.absurd(),
472        }
473        Ok(())
474    }
475}
476
477impl<T: AsRef<[u8]> + AsMut<[u8]>> ByteIOLayout for RefCell<SliceByteIO<T>> {
478    type Error = Infallible;
479
480    fn size(&self) -> u64 {
481        self.borrow().size()
482    }
483}
484
485impl<T: AsRef<[u8]> + AsMut<[u8]>> ByteIO<ReadWrite> for RefCell<SliceByteIO<T>> {
486    async fn read_or_write(
487        &self,
488        offset: u64,
489        operation: Operation<'_, ReadWrite>,
490    ) -> Result<(), Self::Error> {
491        let offset = offset.try_into().unwrap();
492        match operation {
493            Operation::Read { buf, .. } => {
494                buf.copy_from_slice(&self.borrow().inner().as_ref()[offset..][..buf.len()]);
495            }
496            Operation::Write { buf, .. } => {
497                self.borrow_mut().inner_mut().as_mut()[offset..][..buf.len()].copy_from_slice(buf);
498            }
499        }
500        Ok(())
501    }
502}
503
504async fn read_or_write_partial_block<T: BlockIO<A>, A: ReadAccess>(
505    io: &T,
506    block_idx: u64,
507    offset_into_block: usize,
508    operation: Operation<'_, A>,
509) -> Result<(), T::Error> {
510    assert!(offset_into_block + operation.len() <= io.block_size().bytes());
511    let mut block_buf = io.block_size().zeroed_block();
512    io.read_blocks(block_idx, block_buf.as_mut()).await?;
513    match operation {
514        Operation::Read { buf, .. } => {
515            buf.copy_from_slice(&block_buf.as_ref()[offset_into_block..][..buf.len()]);
516        }
517        Operation::Write { buf, witness } => {
518            block_buf.as_mut()[offset_into_block..][..buf.len()].copy_from_slice(buf);
519            io.read_or_write_blocks(
520                block_idx,
521                Operation::Write {
522                    buf: block_buf.as_ref(),
523                    witness,
524                },
525            )
526            .await?;
527        }
528    }
529    Ok(())
530}
531
532async fn read_or_write_bytes<T: BlockIO<A>, A: ReadAccess>(
533    io: &T,
534    offset: u64,
535    mut operation: Operation<'_, A>,
536) -> Result<(), T::Error> {
537    let block_size = io.block_size().bytes();
538    let block_size_u64 = io.block_size().bytes_u64();
539    let byte_offset_of_first_full_block = offset.next_multiple_of(block_size_u64);
540    let byte_offset_of_first_full_block_in_buf =
541        usize::try_from(byte_offset_of_first_full_block - offset).unwrap();
542    let first_full_block_idx = byte_offset_of_first_full_block / block_size_u64;
543    let num_full_blocks = (operation.len() - byte_offset_of_first_full_block_in_buf) / block_size;
544    if byte_offset_of_first_full_block > offset + u64::try_from(operation.len()).unwrap() {
545        let block_idx = first_full_block_idx - 1;
546        let offset_into_block = offset - block_idx * block_size_u64;
547        read_or_write_partial_block(
548            io,
549            block_idx,
550            offset_into_block.try_into().unwrap(),
551            operation,
552        )
553        .await?;
554    } else {
555        let (left_partial_block, mut rest) =
556            operation.split_at(byte_offset_of_first_full_block_in_buf);
557        let (full_blocks, right_partial_block) = rest.split_at(num_full_blocks * block_size);
558        future::try_join3(
559            async {
560                io.read_or_write_blocks(first_full_block_idx, full_blocks)
561                    .await
562            },
563            async {
564                if !left_partial_block.is_empty() {
565                    let block_idx = first_full_block_idx - 1;
566                    let offset_into_block = block_size - left_partial_block.len();
567                    read_or_write_partial_block(
568                        io,
569                        block_idx,
570                        offset_into_block,
571                        left_partial_block,
572                    )
573                    .await?;
574                }
575                Ok(())
576            },
577            async {
578                if !right_partial_block.is_empty() {
579                    let block_idx = first_full_block_idx + u64::try_from(num_full_blocks).unwrap();
580                    let offset_into_block = 0;
581                    read_or_write_partial_block(
582                        io,
583                        block_idx,
584                        offset_into_block,
585                        right_partial_block,
586                    )
587                    .await?;
588                }
589                Ok(())
590            },
591        )
592        .await?;
593    }
594    Ok(())
595}
596
597pub async fn read_bytes<T: BlockIO<A>, A: ReadAccess>(
598    io: &T,
599    offset: u64,
600    buf: &mut [u8],
601) -> Result<(), T::Error> {
602    read_or_write_bytes(io, offset, Operation::read(buf)).await
603}
604
605pub async fn write_bytes<T: BlockIO<A>, A: ReadAccess + WriteAccess>(
606    io: &T,
607    offset: u64,
608    buf: &[u8],
609) -> Result<(), T::Error> {
610    read_or_write_bytes(io, offset, Operation::write(buf)).await
611}