heapless/pool/
boxed.rs

1//! `std::boxed::Box`-like API on top of a lock-free memory pool
2//!
3//! # Example usage
4//!
5//! ```
6//! use core::ptr::addr_of_mut;
7//! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}};
8//!
9//! box_pool!(MyBoxPool: u128);
10//!
11//! // cannot allocate without first giving memory blocks to the pool
12//! assert!(MyBoxPool.alloc(42).is_err());
13//!
14//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
15//! let block: &'static mut BoxBlock<u128> = unsafe {
16//!     static mut BLOCK: BoxBlock <u128>= BoxBlock::new();
17//!     addr_of_mut!(BLOCK).as_mut().unwrap()
18//! };
19//!
20//! // give block of memory to the pool
21//! MyBoxPool.manage(block);
22//!
23//! // it's now possible to allocate
24//! let mut boxed = MyBoxPool.alloc(1).unwrap();
25//!
26//! // mutation is possible
27//! *boxed += 1;
28//! assert_eq!(2, *boxed);
29//!
30//! // number of boxes is limited to the number of blocks managed by the pool
31//! let res = MyBoxPool.alloc(3);
32//! assert!(res.is_err());
33//!
34//! // give another memory block to the pool
35//! MyBoxPool.manage(unsafe {
36//!     static mut BLOCK: BoxBlock<u128> = BoxBlock::new();
37//!     addr_of_mut!(BLOCK).as_mut().unwrap()
38//! });
39//!
40//! // cloning also consumes a memory block from the pool
41//! let mut separate_box = boxed.clone();
42//! *separate_box += 1;
43//! assert_eq!(3, *separate_box);
44//!
45//! // after the clone it's not possible to allocate again
46//! let res = MyBoxPool.alloc(4);
47//! assert!(res.is_err());
48//!
49//! // `boxed`'s destructor returns the memory block to the pool
50//! drop(boxed);
51//!
52//! // it's possible to allocate again
53//! let res = MyBoxPool.alloc(5);
54//!
55//! assert!(res.is_ok());
56//! ```
57//!
58//! # Array block initialization
59//!
60//! You can create a static variable that contains an array of memory blocks and give all the blocks
61//! to the `BoxPool`. This requires an intermediate `const` value as shown below:
62//!
63//! ```
64//! use core::ptr::addr_of_mut;
65//! use heapless::{box_pool, pool::boxed::BoxBlock};
66//!
67//! box_pool!(MyBoxPool: u128);
68//!
69//! const POOL_CAPACITY: usize = 8;
70//!
71//! let blocks: &'static mut [BoxBlock<u128>] = {
72//!     #[allow(clippy::declare_interior_mutable_const)]
73//!     const BLOCK: BoxBlock<u128> = BoxBlock::new(); // <=
74//!     static mut BLOCKS: [BoxBlock<u128>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
75//!     unsafe { addr_of_mut!(BLOCKS).as_mut().unwrap() }
76//! };
77//!
78//! for block in blocks {
79//!     MyBoxPool.manage(block);
80//! }
81//! ```
82
83use core::{
84    fmt,
85    hash::{Hash, Hasher},
86    mem::{ManuallyDrop, MaybeUninit},
87    ops, ptr,
88};
89
90use stable_deref_trait::StableDeref;
91
92use super::treiber::{NonNullPtr, Stack, UnionNode};
93
94/// Creates a new `BoxPool` singleton with the given `$name` that manages the specified `$data_type`
95///
96/// For more extensive documentation see the [module level documentation](crate::pool::boxed)
97#[macro_export]
98macro_rules! box_pool {
99    ($name:ident: $data_type:ty) => {
100        pub struct $name;
101
102        impl $crate::pool::boxed::BoxPool for $name {
103            type Data = $data_type;
104
105            fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> {
106                // Even though the static variable is not exposed to user code, it is
107                // still useful to have a descriptive symbol name for debugging.
108                #[allow(non_upper_case_globals)]
109                static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> =
110                    $crate::pool::boxed::BoxPoolImpl::new();
111
112                &$name
113            }
114        }
115
116        impl $name {
117            /// Inherent method version of `BoxPool::alloc`
118            #[allow(dead_code)]
119            pub fn alloc(
120                &self,
121                value: $data_type,
122            ) -> Result<$crate::pool::boxed::Box<$name>, $data_type> {
123                <$name as $crate::pool::boxed::BoxPool>::alloc(value)
124            }
125
126            /// Inherent method version of `BoxPool::manage`
127            #[allow(dead_code)]
128            pub fn manage(&self, block: &'static mut $crate::pool::boxed::BoxBlock<$data_type>) {
129                <$name as $crate::pool::boxed::BoxPool>::manage(block)
130            }
131        }
132    };
133}
134
135/// A singleton that manages `pool::boxed::Box`-es
136///
137/// # Usage
138///
139/// Do not implement this trait yourself; instead use the `box_pool!` macro to create a type that
140/// implements this trait.
141///
142/// # Semver guarantees
143///
144/// *Implementing* this trait is exempt from semver guarantees.
145/// i.e. a new patch release is allowed to break downstream `BoxPool` implementations.
146///
147/// *Using* the trait, e.g. in generic code, does fall under semver guarantees.
148pub trait BoxPool: Sized {
149    /// The data type managed by the memory pool
150    type Data: 'static;
151
152    /// `box_pool!` implementation detail
153    #[doc(hidden)]
154    fn singleton() -> &'static BoxPoolImpl<Self::Data>;
155
156    /// Allocate a new `Box` initialized to the given `value`
157    ///
158    /// `manage` should be called at least once before calling `alloc`
159    ///
160    /// # Errors
161    ///
162    /// The `Err`or variant is returned when the memory pool has run out of memory blocks
163    fn alloc(value: Self::Data) -> Result<Box<Self>, Self::Data> {
164        Ok(Box {
165            node_ptr: Self::singleton().alloc(value)?,
166        })
167    }
168
169    /// Add a statically allocated memory block to the memory pool
170    fn manage(block: &'static mut BoxBlock<Self::Data>) {
171        Self::singleton().manage(block);
172    }
173}
174
175/// Like `std::boxed::Box` but managed by memory pool `P` rather than `#[global_allocator]`
176pub struct Box<P>
177where
178    P: BoxPool,
179{
180    node_ptr: NonNullPtr<UnionNode<MaybeUninit<P::Data>>>,
181}
182
183impl<A> Clone for Box<A>
184where
185    A: BoxPool,
186    A::Data: Clone,
187{
188    fn clone(&self) -> Self {
189        A::alloc((**self).clone()).ok().expect("OOM")
190    }
191}
192
193impl<A> fmt::Debug for Box<A>
194where
195    A: BoxPool,
196    A::Data: fmt::Debug,
197{
198    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
199        A::Data::fmt(self, f)
200    }
201}
202
203impl<P> ops::Deref for Box<P>
204where
205    P: BoxPool,
206{
207    type Target = P::Data;
208
209    fn deref(&self) -> &Self::Target {
210        unsafe { &*self.node_ptr.as_ptr().cast::<P::Data>() }
211    }
212}
213
214impl<P> ops::DerefMut for Box<P>
215where
216    P: BoxPool,
217{
218    fn deref_mut(&mut self) -> &mut Self::Target {
219        unsafe { &mut *self.node_ptr.as_ptr().cast::<P::Data>() }
220    }
221}
222
223unsafe impl<P> StableDeref for Box<P> where P: BoxPool {}
224
225impl<A> fmt::Display for Box<A>
226where
227    A: BoxPool,
228    A::Data: fmt::Display,
229{
230    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
231        A::Data::fmt(self, f)
232    }
233}
234
235impl<P> Drop for Box<P>
236where
237    P: BoxPool,
238{
239    fn drop(&mut self) {
240        let node = self.node_ptr;
241
242        unsafe { ptr::drop_in_place(node.as_ptr().cast::<P::Data>()) }
243
244        unsafe { P::singleton().stack.push(node) }
245    }
246}
247
248impl<A> Eq for Box<A>
249where
250    A: BoxPool,
251    A::Data: Eq,
252{
253}
254
255impl<A> Hash for Box<A>
256where
257    A: BoxPool,
258    A::Data: Hash,
259{
260    fn hash<H>(&self, state: &mut H)
261    where
262        H: Hasher,
263    {
264        (**self).hash(state);
265    }
266}
267
268impl<A> Ord for Box<A>
269where
270    A: BoxPool,
271    A::Data: Ord,
272{
273    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
274        A::Data::cmp(self, other)
275    }
276}
277
278impl<A, B> PartialEq<Box<B>> for Box<A>
279where
280    A: BoxPool,
281    B: BoxPool,
282    A::Data: PartialEq<B::Data>,
283{
284    fn eq(&self, other: &Box<B>) -> bool {
285        A::Data::eq(self, other)
286    }
287}
288
289impl<A, B> PartialOrd<Box<B>> for Box<A>
290where
291    A: BoxPool,
292    B: BoxPool,
293    A::Data: PartialOrd<B::Data>,
294{
295    fn partial_cmp(&self, other: &Box<B>) -> Option<core::cmp::Ordering> {
296        A::Data::partial_cmp(self, other)
297    }
298}
299
300unsafe impl<P> Send for Box<P>
301where
302    P: BoxPool,
303    P::Data: Send,
304{
305}
306
307unsafe impl<P> Sync for Box<P>
308where
309    P: BoxPool,
310    P::Data: Sync,
311{
312}
313
314/// `box_pool!` implementation detail
315// newtype to avoid having to make field types public
316#[doc(hidden)]
317pub struct BoxPoolImpl<T> {
318    stack: Stack<UnionNode<MaybeUninit<T>>>,
319}
320
321impl<T> BoxPoolImpl<T> {
322    #[allow(clippy::new_without_default)]
323    pub const fn new() -> Self {
324        Self {
325            stack: Stack::new(),
326        }
327    }
328
329    fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<T>>>, T> {
330        if let Some(node_ptr) = self.stack.try_pop() {
331            unsafe { node_ptr.as_ptr().cast::<T>().write(value) }
332
333            Ok(node_ptr)
334        } else {
335            Err(value)
336        }
337    }
338
339    fn manage(&self, block: &'static mut BoxBlock<T>) {
340        let node: &'static mut _ = &mut block.node;
341
342        unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
343    }
344}
345
346unsafe impl<T> Sync for BoxPoolImpl<T> {}
347
348/// A chunk of memory that a `BoxPool` singleton can manage
349pub struct BoxBlock<T> {
350    node: UnionNode<MaybeUninit<T>>,
351}
352
353impl<T> BoxBlock<T> {
354    /// Creates a new memory block
355    pub const fn new() -> Self {
356        Self {
357            node: UnionNode {
358                data: ManuallyDrop::new(MaybeUninit::uninit()),
359            },
360        }
361    }
362}
363
364impl<T> Default for BoxBlock<T> {
365    fn default() -> Self {
366        Self::new()
367    }
368}
369
370#[cfg(test)]
371mod tests {
372    use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
373    use std::ptr::addr_of_mut;
374    use std::thread;
375
376    use super::*;
377
378    #[test]
379    fn cannot_alloc_if_empty() {
380        box_pool!(MyBoxPool: i32);
381
382        assert_eq!(Err(42), MyBoxPool.alloc(42));
383    }
384
385    #[test]
386    fn can_alloc_if_pool_manages_one_block() {
387        box_pool!(MyBoxPool: i32);
388
389        let block = unsafe {
390            static mut BLOCK: BoxBlock<i32> = BoxBlock::new();
391            addr_of_mut!(BLOCK).as_mut().unwrap()
392        };
393        MyBoxPool.manage(block);
394
395        assert_eq!(42, *MyBoxPool.alloc(42).unwrap());
396    }
397
398    #[test]
399    fn alloc_drop_alloc() {
400        box_pool!(MyBoxPool: i32);
401
402        let block = unsafe {
403            static mut BLOCK: BoxBlock<i32> = BoxBlock::new();
404            addr_of_mut!(BLOCK).as_mut().unwrap()
405        };
406        MyBoxPool.manage(block);
407
408        let boxed = MyBoxPool.alloc(1).unwrap();
409
410        drop(boxed);
411
412        assert_eq!(2, *MyBoxPool.alloc(2).unwrap());
413    }
414
415    #[test]
416    fn runs_destructor_exactly_once_on_drop() {
417        static COUNT: AtomicUsize = AtomicUsize::new(0);
418
419        pub struct MyStruct;
420
421        impl Drop for MyStruct {
422            fn drop(&mut self) {
423                COUNT.fetch_add(1, Ordering::Relaxed);
424            }
425        }
426
427        box_pool!(MyBoxPool: MyStruct);
428
429        let block = unsafe {
430            static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
431            addr_of_mut!(BLOCK).as_mut().unwrap()
432        };
433        MyBoxPool.manage(block);
434
435        let boxed = MyBoxPool.alloc(MyStruct).ok().unwrap();
436
437        assert_eq!(0, COUNT.load(Ordering::Relaxed));
438
439        drop(boxed);
440
441        assert_eq!(1, COUNT.load(Ordering::Relaxed));
442    }
443
444    #[test]
445    fn zst_is_well_aligned() {
446        #[repr(align(4096))]
447        pub struct Zst4096;
448
449        box_pool!(MyBoxPool: Zst4096);
450
451        let block = unsafe {
452            static mut BLOCK: BoxBlock<Zst4096> = BoxBlock::new();
453            addr_of_mut!(BLOCK).as_mut().unwrap()
454        };
455        MyBoxPool.manage(block);
456
457        let boxed = MyBoxPool.alloc(Zst4096).ok().unwrap();
458
459        let raw = &*boxed as *const Zst4096;
460        assert_eq!(0, raw as usize % 4096);
461    }
462
463    #[test]
464    fn can_clone_if_pool_is_not_exhausted() {
465        static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
466
467        pub struct MyStruct;
468
469        impl Clone for MyStruct {
470            fn clone(&self) -> Self {
471                STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
472                Self
473            }
474        }
475
476        box_pool!(MyBoxPool: MyStruct);
477
478        MyBoxPool.manage(unsafe {
479            static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
480            addr_of_mut!(BLOCK).as_mut().unwrap()
481        });
482        MyBoxPool.manage(unsafe {
483            static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
484            addr_of_mut!(BLOCK).as_mut().unwrap()
485        });
486
487        let first = MyBoxPool.alloc(MyStruct).ok().unwrap();
488        let _second = first.clone();
489
490        assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
491
492        let is_oom = MyBoxPool.alloc(MyStruct).is_err();
493        assert!(is_oom);
494    }
495
496    #[test]
497    fn clone_panics_if_pool_exhausted() {
498        static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
499
500        pub struct MyStruct;
501
502        impl Clone for MyStruct {
503            fn clone(&self) -> Self {
504                STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
505                Self
506            }
507        }
508
509        box_pool!(MyBoxPool: MyStruct);
510
511        MyBoxPool.manage(unsafe {
512            static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
513            addr_of_mut!(BLOCK).as_mut().unwrap()
514        });
515
516        let first = MyBoxPool.alloc(MyStruct).ok().unwrap();
517
518        let thread = thread::spawn(move || {
519            let _second = first.clone();
520        });
521
522        let thread_panicked = thread.join().is_err();
523        assert!(thread_panicked);
524
525        // we diverge from `alloc::Box<T>` in that we call `T::clone` first and then request
526        // memory from the allocator whereas `alloc::Box<T>` does it the other way around
527        // assert!(!STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
528    }
529
530    #[test]
531    fn panicking_clone_does_not_leak_memory() {
532        static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
533
534        pub struct MyStruct;
535
536        impl Clone for MyStruct {
537            fn clone(&self) -> Self {
538                STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
539                panic!()
540            }
541        }
542
543        box_pool!(MyBoxPool: MyStruct);
544
545        MyBoxPool.manage(unsafe {
546            static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
547            addr_of_mut!(BLOCK).as_mut().unwrap()
548        });
549        MyBoxPool.manage(unsafe {
550            static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
551            addr_of_mut!(BLOCK).as_mut().unwrap()
552        });
553
554        let boxed = MyBoxPool.alloc(MyStruct).ok().unwrap();
555
556        let thread = thread::spawn(move || {
557            let _boxed = boxed.clone();
558        });
559
560        let thread_panicked = thread.join().is_err();
561        assert!(thread_panicked);
562
563        assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
564
565        let once = MyBoxPool.alloc(MyStruct);
566        let twice = MyBoxPool.alloc(MyStruct);
567
568        assert!(once.is_ok());
569        assert!(twice.is_ok());
570    }
571}