sel4_async_block_io/
when_alloc.rs
1use alloc::rc::Rc;
8use alloc::vec;
9use alloc::vec::Vec;
10use core::cell::RefCell;
11use core::num::NonZeroUsize;
12use core::ops::Deref;
13
14use futures::future;
15use lru::LruCache;
16
17use crate::{wrapper_methods, Access, BlockIO, BlockIOLayout, BlockSize, Operation};
18
19pub struct DynamicBlockSize {
20 bits: usize,
21}
22
23impl DynamicBlockSize {
24 pub fn new(bits: usize) -> Self {
25 Self { bits }
26 }
27}
28
29impl BlockSize for DynamicBlockSize {
30 type Block = Vec<u8>;
31
32 fn bytes(&self) -> usize {
33 1 << self.bits
34 }
35
36 fn zeroed_block(&self) -> Self::Block {
37 vec![0; self.bytes()]
38 }
39}
40
41impl<T: BlockIOLayout> BlockIOLayout for Rc<T> {
42 type Error = T::Error;
43
44 type BlockSize = T::BlockSize;
45
46 fn block_size(&self) -> Self::BlockSize {
47 self.deref().block_size()
48 }
49
50 fn num_blocks(&self) -> u64 {
51 self.deref().num_blocks()
52 }
53}
54
55impl<T: BlockIO<A>, A: Access> BlockIO<A> for Rc<T> {
56 async fn read_or_write_blocks(
57 &self,
58 start_block_idx: u64,
59 operation: Operation<'_, A>,
60 ) -> Result<(), Self::Error> {
61 self.deref()
62 .read_or_write_blocks(start_block_idx, operation)
63 .await
64 }
65}
66
67#[derive(Debug)]
68pub struct CachedBlockIO<T: BlockIOLayout> {
69 inner: T,
70 lru: RefCell<LruCache<u64, <T::BlockSize as BlockSize>::Block>>,
71}
72
73impl<T: BlockIOLayout> CachedBlockIO<T> {
74 pub fn new(inner: T, cache_size_in_blocks: usize) -> Self {
75 Self {
76 inner,
77 lru: RefCell::new(LruCache::new(
78 NonZeroUsize::new(cache_size_in_blocks).unwrap(),
79 )),
80 }
81 }
82
83 wrapper_methods!(T);
84}
85
86impl<T: BlockIOLayout> BlockIOLayout for CachedBlockIO<T> {
87 type Error = <T as BlockIOLayout>::Error;
88
89 type BlockSize = <T as BlockIOLayout>::BlockSize;
90
91 fn block_size(&self) -> Self::BlockSize {
92 <T as BlockIOLayout>::block_size(self.inner())
93 }
94
95 fn num_blocks(&self) -> u64 {
96 <T as BlockIOLayout>::num_blocks(self.inner())
97 }
98}
99
100impl<T: BlockIO<A>, A: Access> BlockIO<A> for CachedBlockIO<T> {
101 async fn read_or_write_blocks(
102 &self,
103 start_block_idx: u64,
104 mut operation: Operation<'_, A>,
105 ) -> Result<(), Self::Error> {
106 assert_eq!(operation.len() % self.block_size().bytes(), 0);
107 future::try_join_all(operation.chunks(self.block_size().bytes()).enumerate().map(
108 |(i, block_operation)| async move {
109 let block_idx = start_block_idx.checked_add(i.try_into().unwrap()).unwrap();
110 match block_operation {
111 Operation::Read { buf, witness } => {
112 let cached = self
114 .lru
115 .borrow_mut()
116 .get(&block_idx)
117 .map(|block| {
118 buf.copy_from_slice(block.as_ref());
119 })
120 .is_some();
121 if !cached {
122 let mut block = self.block_size().zeroed_block();
123 self.inner
124 .read_or_write_blocks(
125 block_idx,
126 Operation::Read {
127 buf: block.as_mut(),
128 witness,
129 },
130 )
131 .await?;
132 buf.copy_from_slice(block.as_ref());
133 let _ = self.lru.borrow_mut().put(block_idx, block);
134 }
135 }
136 Operation::Write { buf, witness } => {
137 self.inner
138 .read_or_write_blocks(block_idx, Operation::Write { buf, witness })
139 .await?;
140 let mut block = self.block_size().zeroed_block();
141 block.as_mut().copy_from_slice(buf);
142 let _ = self.lru.borrow_mut().put(block_idx, block);
143 }
144 }
145 Ok(())
146 },
147 ))
148 .await?;
149 Ok(())
150 }
151}