sel4_shared_ring_buffer_smoltcp/
inner.rs

1//
2// Copyright 2023, Colias Group, LLC
3//
4// SPDX-License-Identifier: BSD-2-Clause
5//
6
7use core::alloc::Layout;
8use core::marker::PhantomData;
9use core::ptr::NonNull;
10
11use smoltcp::phy::DeviceCapabilities;
12
13use sel4_abstract_allocator::{AbstractAllocator, AbstractAllocatorAllocation};
14use sel4_shared_memory::SharedMemoryRef;
15use sel4_shared_ring_buffer::{
16    roles::Provide, Descriptor, PeerMisbehaviorError as SharedRingBuffersPeerMisbehaviorError,
17    RingBuffers,
18};
19use sel4_shared_ring_buffer_bookkeeping::slot_tracker::*;
20
21pub(crate) struct Inner<A: AbstractAllocator> {
22    dma_region: SharedMemoryRef<'static, [u8]>,
23    bounce_buffer_allocator: A,
24    rx_ring_buffers: RingBuffers<'static, Provide, fn()>,
25    tx_ring_buffers: RingBuffers<'static, Provide, fn()>,
26    rx_buffers: SlotTracker<RxStateTypesImpl>,
27    tx_buffers: SlotTracker<TxStateTypesImpl<A>>,
28    caps: DeviceCapabilities,
29}
30
31pub(crate) type RxBufferIndex = usize;
32
33enum RxStateTypesImpl {}
34
35impl SlotStateTypes for RxStateTypesImpl {
36    type Common = Descriptor;
37    type Free = RxFree;
38    type Occupied = RxOccupied;
39}
40
41struct RxFree {
42    len: usize,
43}
44
45enum RxOccupied {
46    Waiting,
47    Claimed { len: usize },
48}
49
50pub(crate) type TxBufferIndex = usize;
51
52struct TxStateTypesImpl<A> {
53    _phantom: PhantomData<A>,
54}
55
56impl<A: AbstractAllocator> SlotStateTypes for TxStateTypesImpl<A> {
57    type Common = ();
58    type Free = ();
59    type Occupied = TxOccupied<A>;
60}
61
62enum TxOccupied<A: AbstractAllocator> {
63    Claimed,
64    Sent { allocation: A::Allocation },
65}
66
67impl<A: AbstractAllocator> Inner<A> {
68    pub(crate) fn new(
69        dma_region: SharedMemoryRef<'static, [u8]>,
70        mut bounce_buffer_allocator: A,
71        mut rx_ring_buffers: RingBuffers<'static, Provide, fn()>,
72        tx_ring_buffers: RingBuffers<'static, Provide, fn()>,
73        num_rx_buffers: usize,
74        rx_buffer_size: usize,
75        caps: DeviceCapabilities,
76    ) -> Result<Self, Error> {
77        let rx_buffers = SlotTracker::new_occupied((0..num_rx_buffers).map(|i| {
78            let allocation = bounce_buffer_allocator
79                .allocate(Layout::from_size_align(rx_buffer_size, 1).unwrap())
80                .map_err(|_| Error::BounceBufferAllocationError)
81                .unwrap();
82            let desc = Descriptor::from_encoded_addr_range(allocation.range(), i);
83            rx_ring_buffers
84                .free_mut()
85                .enqueue_and_commit(desc)
86                .unwrap()
87                .unwrap();
88            (desc, RxOccupied::Waiting)
89        }));
90
91        let tx_buffers = SlotTracker::new_with_capacity((), (), tx_ring_buffers.free().capacity());
92
93        Ok(Self {
94            dma_region,
95            bounce_buffer_allocator,
96            rx_ring_buffers,
97            tx_ring_buffers,
98            rx_buffers,
99            tx_buffers,
100            caps,
101        })
102    }
103
104    pub(crate) fn caps(&self) -> &DeviceCapabilities {
105        &self.caps
106    }
107
108    pub(crate) fn poll(&mut self) -> Result<bool, PeerMisbehaviorError> {
109        let mut notify_rx = false;
110
111        while let Some(desc) = self.rx_ring_buffers.used_mut().dequeue()? {
112            let ix = desc.cookie();
113            if ix >= self.rx_buffers.capacity() {
114                return Err(PeerMisbehaviorError::OutOfBoundsCookie);
115            }
116
117            let provided_desc = self.rx_buffers.get_common_value(ix).unwrap();
118            if desc.encoded_addr() != provided_desc.encoded_addr()
119                || desc.len() > provided_desc.len()
120            {
121                return Err(PeerMisbehaviorError::DescriptorMismatch);
122            }
123
124            if !matches!(
125                self.rx_buffers.get_state_value(ix).unwrap(),
126                SlotStateValueRef::Occupied(RxOccupied::Waiting)
127            ) {
128                return Err(PeerMisbehaviorError::StateMismatch);
129            }
130
131            self.rx_buffers
132                .free(
133                    ix,
134                    RxFree {
135                        len: desc.encoded_addr_range().len(),
136                    },
137                )
138                .unwrap();
139
140            notify_rx = true;
141        }
142
143        if notify_rx {
144            self.rx_ring_buffers.notify();
145        }
146
147        let mut notify_tx = false;
148
149        while let Some(desc) = self.tx_ring_buffers.used_mut().dequeue()? {
150            let ix = desc.cookie();
151
152            match self.tx_buffers.free(ix, ()) {
153                Ok(TxOccupied::Sent { allocation }) => {
154                    self.bounce_buffer_allocator.deallocate(allocation);
155                }
156                Err(err) => {
157                    return Err(match err {
158                        SlotTrackerError::OutOfBounds => PeerMisbehaviorError::OutOfBoundsCookie,
159                        SlotTrackerError::StateMismatch => PeerMisbehaviorError::StateMismatch,
160                    })
161                }
162                _ => {
163                    return Err(PeerMisbehaviorError::StateMismatch);
164                }
165            }
166
167            notify_tx = true;
168        }
169
170        if notify_tx {
171            self.tx_ring_buffers.notify();
172        }
173
174        Ok(notify_rx || notify_tx)
175    }
176
177    pub(crate) fn can_receive(&mut self) -> bool {
178        self.can_claim_rx_buffer() && self.can_claim_tx_buffer()
179    }
180
181    pub(crate) fn can_transmit(&mut self) -> bool {
182        self.can_claim_tx_buffer()
183    }
184
185    pub(crate) fn receive(&mut self) -> Option<(RxBufferIndex, TxBufferIndex)> {
186        if self.can_receive() {
187            let rx = self.claim_rx_buffer().unwrap();
188            let tx = self.claim_tx_buffer().unwrap();
189            Some((rx, tx))
190        } else {
191            None
192        }
193    }
194
195    pub(crate) fn transmit(&mut self) -> Option<TxBufferIndex> {
196        self.claim_tx_buffer()
197    }
198
199    fn can_claim_rx_buffer(&self) -> bool {
200        self.rx_buffers.num_free() > 0
201    }
202
203    fn claim_rx_buffer(&mut self) -> Option<RxBufferIndex> {
204        let len = self.rx_buffers.peek_next_free_value()?.len;
205        let (ix, _) = self.rx_buffers.occupy(RxOccupied::Claimed { len }).unwrap();
206        Some(ix)
207    }
208
209    fn can_claim_tx_buffer(&self) -> bool {
210        self.tx_buffers.num_free() > 0
211    }
212
213    fn claim_tx_buffer(&mut self) -> Option<TxBufferIndex> {
214        let (ix, _) = self.tx_buffers.occupy(TxOccupied::Claimed)?;
215        Some(ix)
216    }
217
218    pub(crate) fn consume_rx_start(&mut self, index: RxBufferIndex) -> NonNull<[u8]> {
219        let desc = self.rx_buffers.get_common_value(index).unwrap();
220        let start = desc.encoded_addr_range().start;
221        let len = match self
222            .rx_buffers
223            .get_state_value(index)
224            .unwrap()
225            .as_occupied()
226            .unwrap()
227        {
228            RxOccupied::Claimed { len } => len,
229            _ => panic!(),
230        };
231        self.dma_region
232            .as_mut_ptr()
233            .index(start..(start + len))
234            .as_raw_ptr()
235    }
236
237    pub(crate) fn consume_rx_finish(&mut self, _index: RxBufferIndex) {
238        // nothing to do, for now
239    }
240
241    pub(crate) fn drop_rx(&mut self, index: RxBufferIndex) -> Result<(), PeerMisbehaviorError> {
242        let occupied = self
243            .rx_buffers
244            .get_state_value_mut(index)
245            .unwrap()
246            .as_occupied()
247            .unwrap();
248        assert!(matches!(occupied, RxOccupied::Claimed { .. }));
249        *occupied = RxOccupied::Waiting;
250        let desc = self.rx_buffers.get_common_value(index).unwrap();
251        self.rx_ring_buffers
252            .free_mut()
253            .enqueue_and_commit(*desc)?
254            .unwrap();
255        self.rx_ring_buffers.notify();
256        Ok(())
257    }
258
259    pub(crate) fn consume_tx<F, R>(
260        &mut self,
261        index: TxBufferIndex,
262        len: usize,
263        f: F,
264    ) -> Result<R, Error>
265    where
266        F: FnOnce(&mut [u8]) -> R,
267    {
268        let allocation = self
269            .bounce_buffer_allocator
270            .allocate(Layout::from_size_align(len, 1).unwrap())
271            .map_err(|_| Error::BounceBufferAllocationError)?;
272
273        let range = allocation.range();
274
275        let occupied = self
276            .tx_buffers
277            .get_state_value_mut(index)
278            .unwrap()
279            .as_occupied()
280            .unwrap();
281        assert!(matches!(occupied, TxOccupied::Claimed));
282        *occupied = TxOccupied::Sent { allocation };
283
284        let mut ptr = self
285            .dma_region
286            .as_mut_ptr()
287            .index(range.clone())
288            .as_raw_ptr();
289        let r = f(unsafe { ptr.as_mut() });
290
291        let desc = Descriptor::from_encoded_addr_range(range, index);
292        self.tx_ring_buffers
293            .free_mut()
294            .enqueue_and_commit(desc)?
295            .unwrap();
296        self.tx_ring_buffers.notify();
297
298        Ok(r)
299    }
300
301    pub(crate) fn drop_tx(&mut self, index: TxBufferIndex) {
302        let occupied = self
303            .tx_buffers
304            .get_state_value(index)
305            .unwrap()
306            .as_occupied()
307            .unwrap();
308        match occupied {
309            TxOccupied::Claimed => {
310                self.tx_buffers.free(index, ()).unwrap();
311            }
312            TxOccupied::Sent { .. } => {}
313        }
314    }
315}
316
317// // //
318
319#[derive(Debug, Clone)]
320pub enum Error {
321    BounceBufferAllocationError,
322    PeerMisbehaviorError(PeerMisbehaviorError),
323}
324
325#[derive(Debug, Clone)]
326pub enum PeerMisbehaviorError {
327    DescriptorMismatch,
328    OutOfBoundsCookie,
329    StateMismatch,
330    SharedRingBuffersPeerMisbehaviorError(SharedRingBuffersPeerMisbehaviorError),
331}
332
333impl From<PeerMisbehaviorError> for Error {
334    fn from(err: PeerMisbehaviorError) -> Self {
335        Self::PeerMisbehaviorError(err)
336    }
337}
338
339impl From<SharedRingBuffersPeerMisbehaviorError> for PeerMisbehaviorError {
340    fn from(err: SharedRingBuffersPeerMisbehaviorError) -> Self {
341        Self::SharedRingBuffersPeerMisbehaviorError(err)
342    }
343}
344
345impl From<SharedRingBuffersPeerMisbehaviorError> for Error {
346    fn from(err: SharedRingBuffersPeerMisbehaviorError) -> Self {
347        PeerMisbehaviorError::from(err).into()
348    }
349}