heapless/pool/
boxed.rs

1//! `std::boxed::Box`-like API on top of a lock-free memory pool
2//!
3//! # Example usage
4//!
5//! ```
6//! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}};
7//!
8//! box_pool!(P: u128);
9//!
10//! // cannot allocate without first giving memory blocks to the pool
11//! assert!(P.alloc(42).is_err());
12//!
13//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
14//! let block: &'static mut BoxBlock<u128> = unsafe {
15//!     static mut B: BoxBlock <u128>= BoxBlock::new();
16//!     &mut B
17//! };
18//!
19//! // give block of memory to the pool
20//! P.manage(block);
21//!
22//! // it's now possible to allocate
23//! let mut boxed = P.alloc(1).unwrap();
24//!
25//! // mutation is possible
26//! *boxed += 1;
27//! assert_eq!(2, *boxed);
28//!
29//! // number of boxes is limited to the number of blocks managed by the pool
30//! let res = P.alloc(3);
31//! assert!(res.is_err());
32//!
33//! // give another memory block to the pool
34//! P.manage(unsafe {
35//!     static mut B: BoxBlock<u128> = BoxBlock::new();
36//!     &mut B
37//! });
38//!
39//! // cloning also consumes a memory block from the pool
40//! let mut separate_box = boxed.clone();
41//! *separate_box += 1;
42//! assert_eq!(3, *separate_box);
43//!
44//! // after the clone it's not possible to allocate again
45//! let res = P.alloc(4);
46//! assert!(res.is_err());
47//!
48//! // `boxed`'s destructor returns the memory block to the pool
49//! drop(boxed);
50//!
51//! // it's possible to allocate again
52//! let res = P.alloc(5);
53//!
54//! assert!(res.is_ok());
55//! ```
56//!
57//! # Array block initialization
58//!
59//! You can create a static variable that contains an array of memory blocks and give all the blocks
60//! to the `BoxPool`. This requires an intermediate `const` value as shown below:
61//!
62//! ```
63//! use heapless::{box_pool, pool::boxed::BoxBlock};
64//!
65//! box_pool!(P: u128);
66//!
67//! const POOL_CAPACITY: usize = 8;
68//!
69//! let blocks: &'static mut [BoxBlock<u128>] = {
70//!     const BLOCK: BoxBlock<u128> = BoxBlock::new(); // <=
71//!     static mut BLOCKS: [BoxBlock<u128>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
72//!     unsafe { &mut BLOCKS }
73//! };
74//!
75//! for block in blocks {
76//!     P.manage(block);
77//! }
78//! ```
79
80use core::{
81    fmt,
82    hash::{Hash, Hasher},
83    mem::{ManuallyDrop, MaybeUninit},
84    ops, ptr,
85};
86
87use stable_deref_trait::StableDeref;
88
89use super::treiber::{NonNullPtr, Stack, UnionNode};
90
91/// Creates a new `BoxPool` singleton with the given `$name` that manages the specified `$data_type`
92///
93/// For more extensive documentation see the [module level documentation](crate::pool::boxed)
94#[macro_export]
95macro_rules! box_pool {
96    ($name:ident: $data_type:ty) => {
97        pub struct $name;
98
99        impl $crate::pool::boxed::BoxPool for $name {
100            type Data = $data_type;
101
102            fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> {
103                static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> =
104                    $crate::pool::boxed::BoxPoolImpl::new();
105
106                &$name
107            }
108        }
109
110        impl $name {
111            /// Inherent method version of `BoxPool::alloc`
112            #[allow(dead_code)]
113            pub fn alloc(
114                &self,
115                value: $data_type,
116            ) -> Result<$crate::pool::boxed::Box<$name>, $data_type> {
117                <$name as $crate::pool::boxed::BoxPool>::alloc(value)
118            }
119
120            /// Inherent method version of `BoxPool::manage`
121            #[allow(dead_code)]
122            pub fn manage(&self, block: &'static mut $crate::pool::boxed::BoxBlock<$data_type>) {
123                <$name as $crate::pool::boxed::BoxPool>::manage(block)
124            }
125        }
126    };
127}
128
129/// A singleton that manages `pool::boxed::Box`-es
130///
131/// # Usage
132///
133/// Do not implement this trait yourself; instead use the `box_pool!` macro to create a type that
134/// implements this trait.
135///
136/// # Semver guarantees
137///
138/// *Implementing* this trait is exempt from semver guarantees.
139/// i.e. a new patch release is allowed to break downstream `BoxPool` implementations.
140///
141/// *Using* the trait, e.g. in generic code, does fall under semver guarantees.
142pub trait BoxPool: Sized {
143    /// The data type managed by the memory pool
144    type Data: 'static;
145
146    /// `box_pool!` implementation detail
147    #[doc(hidden)]
148    fn singleton() -> &'static BoxPoolImpl<Self::Data>;
149
150    /// Allocate a new `Box` initialized to the given `value`
151    ///
152    /// `manage` should be called at least once before calling `alloc`
153    ///
154    /// # Errors
155    ///
156    /// The `Err`or variant is returned when the memory pool has run out of memory blocks
157    fn alloc(value: Self::Data) -> Result<Box<Self>, Self::Data> {
158        Ok(Box {
159            node_ptr: Self::singleton().alloc(value)?,
160        })
161    }
162
163    /// Add a statically allocated memory block to the memory pool
164    fn manage(block: &'static mut BoxBlock<Self::Data>) {
165        Self::singleton().manage(block)
166    }
167}
168
169/// Like `std::boxed::Box` but managed by memory pool `P` rather than `#[global_allocator]`
170pub struct Box<P>
171where
172    P: BoxPool,
173{
174    node_ptr: NonNullPtr<UnionNode<MaybeUninit<P::Data>>>,
175}
176
177impl<A> Clone for Box<A>
178where
179    A: BoxPool,
180    A::Data: Clone,
181{
182    fn clone(&self) -> Self {
183        A::alloc((**self).clone()).ok().expect("OOM")
184    }
185}
186
187impl<A> fmt::Debug for Box<A>
188where
189    A: BoxPool,
190    A::Data: fmt::Debug,
191{
192    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
193        A::Data::fmt(self, f)
194    }
195}
196
197impl<P> ops::Deref for Box<P>
198where
199    P: BoxPool,
200{
201    type Target = P::Data;
202
203    fn deref(&self) -> &Self::Target {
204        unsafe { &*self.node_ptr.as_ptr().cast::<P::Data>() }
205    }
206}
207
208impl<P> ops::DerefMut for Box<P>
209where
210    P: BoxPool,
211{
212    fn deref_mut(&mut self) -> &mut Self::Target {
213        unsafe { &mut *self.node_ptr.as_ptr().cast::<P::Data>() }
214    }
215}
216
217unsafe impl<P> StableDeref for Box<P> where P: BoxPool {}
218
219impl<A> fmt::Display for Box<A>
220where
221    A: BoxPool,
222    A::Data: fmt::Display,
223{
224    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
225        A::Data::fmt(self, f)
226    }
227}
228
229impl<P> Drop for Box<P>
230where
231    P: BoxPool,
232{
233    fn drop(&mut self) {
234        let node = self.node_ptr;
235
236        unsafe { ptr::drop_in_place(node.as_ptr().cast::<P::Data>()) }
237
238        unsafe { P::singleton().stack.push(node) }
239    }
240}
241
242impl<A> Eq for Box<A>
243where
244    A: BoxPool,
245    A::Data: Eq,
246{
247}
248
249impl<A> Hash for Box<A>
250where
251    A: BoxPool,
252    A::Data: Hash,
253{
254    fn hash<H>(&self, state: &mut H)
255    where
256        H: Hasher,
257    {
258        (**self).hash(state)
259    }
260}
261
262impl<A> Ord for Box<A>
263where
264    A: BoxPool,
265    A::Data: Ord,
266{
267    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
268        A::Data::cmp(self, other)
269    }
270}
271
272impl<A, B> PartialEq<Box<B>> for Box<A>
273where
274    A: BoxPool,
275    B: BoxPool,
276    A::Data: PartialEq<B::Data>,
277{
278    fn eq(&self, other: &Box<B>) -> bool {
279        A::Data::eq(self, other)
280    }
281}
282
283impl<A, B> PartialOrd<Box<B>> for Box<A>
284where
285    A: BoxPool,
286    B: BoxPool,
287    A::Data: PartialOrd<B::Data>,
288{
289    fn partial_cmp(&self, other: &Box<B>) -> Option<core::cmp::Ordering> {
290        A::Data::partial_cmp(self, other)
291    }
292}
293
294unsafe impl<P> Send for Box<P>
295where
296    P: BoxPool,
297    P::Data: Send,
298{
299}
300
301unsafe impl<P> Sync for Box<P>
302where
303    P: BoxPool,
304    P::Data: Sync,
305{
306}
307
308/// `box_pool!` implementation detail
309// newtype to avoid having to make field types public
310#[doc(hidden)]
311pub struct BoxPoolImpl<T> {
312    stack: Stack<UnionNode<MaybeUninit<T>>>,
313}
314
315impl<T> BoxPoolImpl<T> {
316    pub const fn new() -> Self {
317        Self {
318            stack: Stack::new(),
319        }
320    }
321
322    fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<T>>>, T> {
323        if let Some(node_ptr) = self.stack.try_pop() {
324            unsafe { node_ptr.as_ptr().cast::<T>().write(value) }
325
326            Ok(node_ptr)
327        } else {
328            Err(value)
329        }
330    }
331
332    fn manage(&self, block: &'static mut BoxBlock<T>) {
333        let node: &'static mut _ = &mut block.node;
334
335        unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
336    }
337}
338
339unsafe impl<T> Sync for BoxPoolImpl<T> {}
340
341/// A chunk of memory that a `BoxPool` singleton can manage
342pub struct BoxBlock<T> {
343    node: UnionNode<MaybeUninit<T>>,
344}
345
346impl<T> BoxBlock<T> {
347    /// Creates a new memory block
348    pub const fn new() -> Self {
349        Self {
350            node: UnionNode {
351                data: ManuallyDrop::new(MaybeUninit::uninit()),
352            },
353        }
354    }
355}
356
357#[cfg(test)]
358mod tests {
359    use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
360    use std::thread;
361
362    use super::*;
363
364    #[test]
365    fn cannot_alloc_if_empty() {
366        box_pool!(P: i32);
367
368        assert_eq!(Err(42), P.alloc(42));
369    }
370
371    #[test]
372    fn can_alloc_if_pool_manages_one_block() {
373        box_pool!(P: i32);
374
375        let block = unsafe {
376            static mut B: BoxBlock<i32> = BoxBlock::new();
377            &mut B
378        };
379        P.manage(block);
380
381        assert_eq!(42, *P.alloc(42).unwrap());
382    }
383
384    #[test]
385    fn alloc_drop_alloc() {
386        box_pool!(P: i32);
387
388        let block = unsafe {
389            static mut B: BoxBlock<i32> = BoxBlock::new();
390            &mut B
391        };
392        P.manage(block);
393
394        let boxed = P.alloc(1).unwrap();
395
396        drop(boxed);
397
398        assert_eq!(2, *P.alloc(2).unwrap());
399    }
400
401    #[test]
402    fn runs_destructor_exactly_once_on_drop() {
403        static COUNT: AtomicUsize = AtomicUsize::new(0);
404
405        pub struct S;
406
407        impl Drop for S {
408            fn drop(&mut self) {
409                COUNT.fetch_add(1, Ordering::Relaxed);
410            }
411        }
412
413        box_pool!(P: S);
414
415        let block = unsafe {
416            static mut B: BoxBlock<S> = BoxBlock::new();
417            &mut B
418        };
419        P.manage(block);
420
421        let boxed = P.alloc(S).ok().unwrap();
422
423        assert_eq!(0, COUNT.load(Ordering::Relaxed));
424
425        drop(boxed);
426
427        assert_eq!(1, COUNT.load(Ordering::Relaxed));
428    }
429
430    #[test]
431    fn zst_is_well_aligned() {
432        #[repr(align(4096))]
433        pub struct Zst4096;
434
435        box_pool!(P: Zst4096);
436
437        let block = unsafe {
438            static mut B: BoxBlock<Zst4096> = BoxBlock::new();
439            &mut B
440        };
441        P.manage(block);
442
443        let boxed = P.alloc(Zst4096).ok().unwrap();
444
445        let raw = &*boxed as *const Zst4096;
446        assert_eq!(0, raw as usize % 4096);
447    }
448
449    #[allow(clippy::redundant_clone)]
450    #[test]
451    fn can_clone_if_pool_is_not_exhausted() {
452        static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
453
454        pub struct S;
455
456        impl Clone for S {
457            fn clone(&self) -> Self {
458                STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
459                Self
460            }
461        }
462
463        box_pool!(P: S);
464
465        P.manage(unsafe {
466            static mut B: BoxBlock<S> = BoxBlock::new();
467            &mut B
468        });
469        P.manage(unsafe {
470            static mut B: BoxBlock<S> = BoxBlock::new();
471            &mut B
472        });
473
474        let first = P.alloc(S).ok().unwrap();
475        let _second = first.clone();
476
477        assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
478
479        let is_oom = P.alloc(S).is_err();
480        assert!(is_oom);
481    }
482
483    #[allow(clippy::redundant_clone)]
484    #[test]
485    fn clone_panics_if_pool_exhausted() {
486        static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
487
488        pub struct S;
489
490        impl Clone for S {
491            fn clone(&self) -> Self {
492                STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
493                Self
494            }
495        }
496
497        box_pool!(P: S);
498
499        P.manage(unsafe {
500            static mut B: BoxBlock<S> = BoxBlock::new();
501            &mut B
502        });
503
504        let first = P.alloc(S).ok().unwrap();
505
506        let thread = thread::spawn(move || {
507            let _second = first.clone();
508        });
509
510        let thread_panicked = thread.join().is_err();
511        assert!(thread_panicked);
512
513        // we diverge from `alloc::Box<T>` in that we call `T::clone` first and then request
514        // memory from the allocator whereas `alloc::Box<T>` does it the other way around
515        // assert!(!STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
516    }
517
518    #[allow(clippy::redundant_clone)]
519    #[test]
520    fn panicking_clone_does_not_leak_memory() {
521        static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
522
523        pub struct S;
524
525        impl Clone for S {
526            fn clone(&self) -> Self {
527                STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
528                panic!()
529            }
530        }
531
532        box_pool!(P: S);
533
534        P.manage(unsafe {
535            static mut B: BoxBlock<S> = BoxBlock::new();
536            &mut B
537        });
538        P.manage(unsafe {
539            static mut B: BoxBlock<S> = BoxBlock::new();
540            &mut B
541        });
542
543        let boxed = P.alloc(S).ok().unwrap();
544
545        let thread = thread::spawn(move || {
546            let _boxed = boxed.clone();
547        });
548
549        let thread_panicked = thread.join().is_err();
550        assert!(thread_panicked);
551
552        assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
553
554        let once = P.alloc(S);
555        let twice = P.alloc(S);
556
557        assert!(once.is_ok());
558        assert!(twice.is_ok());
559    }
560}