sel4_shared_ring_buffer_smoltcp/
inner.rs

1//
2// Copyright 2023, Colias Group, LLC
3//
4// SPDX-License-Identifier: BSD-2-Clause
5//
6
7use core::alloc::Layout;
8use core::marker::PhantomData;
9use core::ptr::NonNull;
10
11use smoltcp::phy::DeviceCapabilities;
12
13use sel4_abstract_allocator::{AbstractAllocator, AbstractAllocatorAllocation};
14use sel4_shared_memory::SharedMemoryRef;
15use sel4_shared_ring_buffer::{
16    Descriptor, PeerMisbehaviorError as SharedRingBuffersPeerMisbehaviorError, RingBuffers,
17    roles::Provide,
18};
19use sel4_shared_ring_buffer_bookkeeping::slot_tracker::*;
20
21pub(crate) struct Inner<A: AbstractAllocator> {
22    dma_region: SharedMemoryRef<'static, [u8]>,
23    bounce_buffer_allocator: A,
24    rx_ring_buffers: RingBuffers<'static, Provide, fn()>,
25    tx_ring_buffers: RingBuffers<'static, Provide, fn()>,
26    rx_buffers: SlotTracker<RxStateTypesImpl>,
27    tx_buffers: SlotTracker<TxStateTypesImpl<A>>,
28    caps: DeviceCapabilities,
29}
30
31pub(crate) type RxBufferIndex = usize;
32
33enum RxStateTypesImpl {}
34
35impl SlotStateTypes for RxStateTypesImpl {
36    type Common = Descriptor;
37    type Free = RxFree;
38    type Occupied = RxOccupied;
39}
40
41struct RxFree {
42    len: usize,
43}
44
45enum RxOccupied {
46    Waiting,
47    Claimed { len: usize },
48}
49
50pub(crate) type TxBufferIndex = usize;
51
52struct TxStateTypesImpl<A> {
53    _phantom: PhantomData<A>,
54}
55
56impl<A: AbstractAllocator> SlotStateTypes for TxStateTypesImpl<A> {
57    type Common = ();
58    type Free = ();
59    type Occupied = TxOccupied<A>;
60}
61
62enum TxOccupied<A: AbstractAllocator> {
63    Claimed,
64    Sent { allocation: A::Allocation },
65}
66
67impl<A: AbstractAllocator> Inner<A> {
68    pub(crate) fn new(
69        dma_region: SharedMemoryRef<'static, [u8]>,
70        mut bounce_buffer_allocator: A,
71        mut rx_ring_buffers: RingBuffers<'static, Provide, fn()>,
72        tx_ring_buffers: RingBuffers<'static, Provide, fn()>,
73        num_rx_buffers: usize,
74        rx_buffer_size: usize,
75        caps: DeviceCapabilities,
76    ) -> Result<Self, Error> {
77        let rx_buffers = SlotTracker::new_occupied((0..num_rx_buffers).map(|i| {
78            let allocation = bounce_buffer_allocator
79                .allocate(Layout::from_size_align(rx_buffer_size, 1).unwrap())
80                .map_err(|_| Error::BounceBufferAllocationError)
81                .unwrap();
82            let desc = Descriptor::from_encoded_addr_range(allocation.range(), i);
83            rx_ring_buffers
84                .free_mut()
85                .enqueue_and_commit(desc)
86                .unwrap()
87                .unwrap();
88            (desc, RxOccupied::Waiting)
89        }));
90
91        let tx_buffers = SlotTracker::new_with_capacity((), (), tx_ring_buffers.free().capacity());
92
93        Ok(Self {
94            dma_region,
95            bounce_buffer_allocator,
96            rx_ring_buffers,
97            tx_ring_buffers,
98            rx_buffers,
99            tx_buffers,
100            caps,
101        })
102    }
103
104    pub(crate) fn caps(&self) -> &DeviceCapabilities {
105        &self.caps
106    }
107
108    pub(crate) fn poll(&mut self) -> Result<bool, PeerMisbehaviorError> {
109        let mut notify_rx = false;
110
111        while let Some(desc) = self.rx_ring_buffers.used_mut().dequeue()? {
112            let ix = desc.cookie();
113            if ix >= self.rx_buffers.capacity() {
114                return Err(PeerMisbehaviorError::OutOfBoundsCookie);
115            }
116
117            let provided_desc = self.rx_buffers.get_common_value(ix).unwrap();
118            if desc.encoded_addr() != provided_desc.encoded_addr()
119                || desc.len() > provided_desc.len()
120            {
121                return Err(PeerMisbehaviorError::DescriptorMismatch);
122            }
123
124            if !matches!(
125                self.rx_buffers.get_state_value(ix).unwrap(),
126                SlotStateValueRef::Occupied(RxOccupied::Waiting)
127            ) {
128                return Err(PeerMisbehaviorError::StateMismatch);
129            }
130
131            self.rx_buffers
132                .free(
133                    ix,
134                    RxFree {
135                        len: desc.encoded_addr_range().len(),
136                    },
137                )
138                .unwrap();
139
140            notify_rx = true;
141        }
142
143        if notify_rx {
144            self.rx_ring_buffers.notify();
145        }
146
147        let mut notify_tx = false;
148
149        while let Some(desc) = self.tx_ring_buffers.used_mut().dequeue()? {
150            let ix = desc.cookie();
151
152            match self.tx_buffers.free(ix, ()) {
153                Ok(TxOccupied::Sent { allocation }) => {
154                    self.bounce_buffer_allocator.deallocate(allocation);
155                }
156                Err(err) => {
157                    return Err(match err {
158                        SlotTrackerError::OutOfBounds => PeerMisbehaviorError::OutOfBoundsCookie,
159                        SlotTrackerError::StateMismatch => PeerMisbehaviorError::StateMismatch,
160                    });
161                }
162                _ => {
163                    return Err(PeerMisbehaviorError::StateMismatch);
164                }
165            }
166
167            notify_tx = true;
168        }
169
170        if notify_tx {
171            self.tx_ring_buffers.notify();
172        }
173
174        Ok(notify_rx || notify_tx)
175    }
176
177    pub(crate) fn can_receive(&mut self) -> bool {
178        self.can_claim_rx_buffer() && self.can_claim_tx_buffer()
179    }
180
181    pub(crate) fn can_transmit(&mut self) -> bool {
182        self.can_claim_tx_buffer()
183    }
184
185    pub(crate) fn receive(&mut self) -> Option<(RxBufferIndex, TxBufferIndex)> {
186        if self.can_receive() {
187            let rx = self.claim_rx_buffer().unwrap();
188            let tx = self.claim_tx_buffer().unwrap();
189            Some((rx, tx))
190        } else {
191            None
192        }
193    }
194
195    pub(crate) fn transmit(&mut self) -> Option<TxBufferIndex> {
196        self.claim_tx_buffer()
197    }
198
199    fn can_claim_rx_buffer(&self) -> bool {
200        self.rx_buffers.num_free() > 0
201    }
202
203    fn claim_rx_buffer(&mut self) -> Option<RxBufferIndex> {
204        let len = self.rx_buffers.peek_next_free_value()?.len;
205        let (ix, _) = self.rx_buffers.occupy(RxOccupied::Claimed { len }).unwrap();
206        Some(ix)
207    }
208
209    fn can_claim_tx_buffer(&self) -> bool {
210        self.tx_buffers.num_free() > 0
211    }
212
213    fn claim_tx_buffer(&mut self) -> Option<TxBufferIndex> {
214        let (ix, _) = self.tx_buffers.occupy(TxOccupied::Claimed)?;
215        Some(ix)
216    }
217
218    pub(crate) fn consume_rx_start(&mut self, index: RxBufferIndex) -> NonNull<[u8]> {
219        let desc = self.rx_buffers.get_common_value(index).unwrap();
220        let start = desc.encoded_addr_range().start;
221        let RxOccupied::Claimed { len } = self
222            .rx_buffers
223            .get_state_value(index)
224            .unwrap()
225            .as_occupied()
226            .unwrap()
227        else {
228            panic!()
229        };
230        self.dma_region
231            .as_mut_ptr()
232            .index(start..(start + len))
233            .as_raw_ptr()
234    }
235
236    pub(crate) fn consume_rx_finish(&mut self, _index: RxBufferIndex) {
237        // nothing to do, for now
238    }
239
240    pub(crate) fn drop_rx(&mut self, index: RxBufferIndex) -> Result<(), PeerMisbehaviorError> {
241        let occupied = self
242            .rx_buffers
243            .get_state_value_mut(index)
244            .unwrap()
245            .as_occupied()
246            .unwrap();
247        assert!(matches!(occupied, RxOccupied::Claimed { .. }));
248        *occupied = RxOccupied::Waiting;
249        let desc = self.rx_buffers.get_common_value(index).unwrap();
250        self.rx_ring_buffers
251            .free_mut()
252            .enqueue_and_commit(*desc)?
253            .unwrap();
254        self.rx_ring_buffers.notify();
255        Ok(())
256    }
257
258    pub(crate) fn consume_tx<F, R>(
259        &mut self,
260        index: TxBufferIndex,
261        len: usize,
262        f: F,
263    ) -> Result<R, Error>
264    where
265        F: FnOnce(&mut [u8]) -> R,
266    {
267        let allocation = self
268            .bounce_buffer_allocator
269            .allocate(Layout::from_size_align(len, 1).unwrap())
270            .map_err(|_| Error::BounceBufferAllocationError)?;
271
272        let range = allocation.range();
273
274        let occupied = self
275            .tx_buffers
276            .get_state_value_mut(index)
277            .unwrap()
278            .as_occupied()
279            .unwrap();
280        assert!(matches!(occupied, TxOccupied::Claimed));
281        *occupied = TxOccupied::Sent { allocation };
282
283        let mut ptr = self
284            .dma_region
285            .as_mut_ptr()
286            .index(range.clone())
287            .as_raw_ptr();
288        let r = f(unsafe { ptr.as_mut() });
289
290        let desc = Descriptor::from_encoded_addr_range(range, index);
291        self.tx_ring_buffers
292            .free_mut()
293            .enqueue_and_commit(desc)?
294            .unwrap();
295        self.tx_ring_buffers.notify();
296
297        Ok(r)
298    }
299
300    pub(crate) fn drop_tx(&mut self, index: TxBufferIndex) {
301        let occupied = self
302            .tx_buffers
303            .get_state_value(index)
304            .unwrap()
305            .as_occupied()
306            .unwrap();
307        match occupied {
308            TxOccupied::Claimed => {
309                self.tx_buffers.free(index, ()).unwrap();
310            }
311            TxOccupied::Sent { .. } => {}
312        }
313    }
314}
315
316// // //
317
318#[derive(Debug, Clone)]
319pub enum Error {
320    BounceBufferAllocationError,
321    PeerMisbehaviorError(PeerMisbehaviorError),
322}
323
324#[derive(Debug, Clone)]
325pub enum PeerMisbehaviorError {
326    DescriptorMismatch,
327    OutOfBoundsCookie,
328    StateMismatch,
329    SharedRingBuffersPeerMisbehaviorError(SharedRingBuffersPeerMisbehaviorError),
330}
331
332impl From<PeerMisbehaviorError> for Error {
333    fn from(err: PeerMisbehaviorError) -> Self {
334        Self::PeerMisbehaviorError(err)
335    }
336}
337
338impl From<SharedRingBuffersPeerMisbehaviorError> for PeerMisbehaviorError {
339    fn from(err: SharedRingBuffersPeerMisbehaviorError) -> Self {
340        Self::SharedRingBuffersPeerMisbehaviorError(err)
341    }
342}
343
344impl From<SharedRingBuffersPeerMisbehaviorError> for Error {
345    fn from(err: SharedRingBuffersPeerMisbehaviorError) -> Self {
346        PeerMisbehaviorError::from(err).into()
347    }
348}