heapless/pool/
arc.rs

1//! `std::sync::Arc`-like API on top of a lock-free memory pool
2//!
3//! # Example usage
4//!
5//! ```
6//! use core::ptr::addr_of_mut;
7//! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}};
8//!
9//! arc_pool!(MyArcPool: u128);
10//!
11//! // cannot allocate without first giving memory blocks to the pool
12//! assert!(MyArcPool.alloc(42).is_err());
13//!
14//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
15//! let block: &'static mut ArcBlock<u128> = unsafe {
16//!     static mut BLOCK: ArcBlock<u128> = ArcBlock::new();
17//!     addr_of_mut!(BLOCK).as_mut().unwrap()
18//! };
19//!
20//! MyArcPool.manage(block);
21//!
22//! let arc = MyArcPool.alloc(1).unwrap();
23//!
24//! // number of smart pointers is limited to the number of blocks managed by the pool
25//! let res = MyArcPool.alloc(2);
26//! assert!(res.is_err());
27//!
28//! // but cloning does not consume an `ArcBlock`
29//! let arc2 = arc.clone();
30//!
31//! assert_eq!(1, *arc2);
32//!
33//! // `arc`'s destructor returns the memory block to the pool
34//! drop(arc2); // decrease reference counter
35//! drop(arc); // release memory
36//!
37//! // it's now possible to allocate a new `Arc` smart pointer
38//! let res = MyArcPool.alloc(3);
39//!
40//! assert!(res.is_ok());
41//! ```
42//!
43//! # Array block initialization
44//!
45//! You can create a static variable that contains an array of memory blocks and give all the blocks
46//! to the `ArcPool`. This requires an intermediate `const` value as shown below:
47//!
48//! ```
49//! use core::ptr::addr_of_mut;
50//! use heapless::{arc_pool, pool::arc::ArcBlock};
51//!
52//! arc_pool!(MyArcPool: u128);
53//!
54//! const POOL_CAPACITY: usize = 8;
55//!
56//! let blocks: &'static mut [ArcBlock<u128>] = {
57//!     const BLOCK: ArcBlock<u128> = ArcBlock::new(); // <=
58//!     static mut BLOCKS: [ArcBlock<u128>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
59//!     unsafe { addr_of_mut!(BLOCKS).as_mut().unwrap() }
60//! };
61//!
62//! for block in blocks {
63//!     MyArcPool.manage(block);
64//! }
65//! ```
66
67// reference counting logic is based on version 1.63.0 of the Rust standard library (`alloc`  crate)
68// which is licensed under 'MIT or APACHE-2.0'
69// https://github.com/rust-lang/rust/blob/1.63.0/library/alloc/src/sync.rs#L235 (last visited
70// 2022-09-05)
71
72use core::{
73    fmt,
74    hash::{Hash, Hasher},
75    mem::{ManuallyDrop, MaybeUninit},
76    ops, ptr,
77};
78
79#[cfg(not(feature = "portable-atomic"))]
80use core::sync::atomic;
81#[cfg(feature = "portable-atomic")]
82use portable_atomic as atomic;
83
84use atomic::{AtomicUsize, Ordering};
85
86use super::treiber::{NonNullPtr, Stack, UnionNode};
87
88/// Creates a new `ArcPool` singleton with the given `$name` that manages the specified `$data_type`
89///
90/// For more extensive documentation see the [module level documentation](crate::pool::arc)
91#[macro_export]
92macro_rules! arc_pool {
93    ($name:ident: $data_type:ty) => {
94        pub struct $name;
95
96        impl $crate::pool::arc::ArcPool for $name {
97            type Data = $data_type;
98
99            fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> {
100                // Even though the static variable is not exposed to user code, it is
101                // still useful to have a descriptive symbol name for debugging.
102                #[allow(non_upper_case_globals)]
103                static $name: $crate::pool::arc::ArcPoolImpl<$data_type> =
104                    $crate::pool::arc::ArcPoolImpl::new();
105
106                &$name
107            }
108        }
109
110        impl $name {
111            /// Inherent method version of `ArcPool::alloc`
112            #[allow(dead_code)]
113            pub fn alloc(
114                &self,
115                value: $data_type,
116            ) -> Result<$crate::pool::arc::Arc<$name>, $data_type> {
117                <$name as $crate::pool::arc::ArcPool>::alloc(value)
118            }
119
120            /// Inherent method version of `ArcPool::manage`
121            #[allow(dead_code)]
122            pub fn manage(&self, block: &'static mut $crate::pool::arc::ArcBlock<$data_type>) {
123                <$name as $crate::pool::arc::ArcPool>::manage(block)
124            }
125        }
126    };
127}
128
129/// A singleton that manages `pool::arc::Arc` smart pointers
130pub trait ArcPool: Sized {
131    /// The data type managed by the memory pool
132    type Data: 'static;
133
134    /// `arc_pool!` implementation detail
135    #[doc(hidden)]
136    fn singleton() -> &'static ArcPoolImpl<Self::Data>;
137
138    /// Allocate a new `Arc` smart pointer initialized to the given `value`
139    ///
140    /// `manage` should be called at least once before calling `alloc`
141    ///
142    /// # Errors
143    ///
144    /// The `Err`or variant is returned when the memory pool has run out of memory blocks
145    fn alloc(value: Self::Data) -> Result<Arc<Self>, Self::Data> {
146        Ok(Arc {
147            node_ptr: Self::singleton().alloc(value)?,
148        })
149    }
150
151    /// Add a statically allocated memory block to the memory pool
152    fn manage(block: &'static mut ArcBlock<Self::Data>) {
153        Self::singleton().manage(block);
154    }
155}
156
157/// `arc_pool!` implementation detail
158// newtype to avoid having to make field types public
159#[doc(hidden)]
160pub struct ArcPoolImpl<T> {
161    stack: Stack<UnionNode<MaybeUninit<ArcInner<T>>>>,
162}
163
164impl<T> ArcPoolImpl<T> {
165    /// `arc_pool!` implementation detail
166    #[doc(hidden)]
167    #[allow(clippy::new_without_default)]
168    pub const fn new() -> Self {
169        Self {
170            stack: Stack::new(),
171        }
172    }
173
174    fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<ArcInner<T>>>>, T> {
175        if let Some(node_ptr) = self.stack.try_pop() {
176            let inner = ArcInner {
177                data: value,
178                strong: AtomicUsize::new(1),
179            };
180            unsafe { node_ptr.as_ptr().cast::<ArcInner<T>>().write(inner) }
181
182            Ok(node_ptr)
183        } else {
184            Err(value)
185        }
186    }
187
188    fn manage(&self, block: &'static mut ArcBlock<T>) {
189        let node: &'static mut _ = &mut block.node;
190
191        unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
192    }
193}
194
195unsafe impl<T> Sync for ArcPoolImpl<T> {}
196
197/// Like `std::sync::Arc` but managed by memory pool `P`
198pub struct Arc<P>
199where
200    P: ArcPool,
201{
202    node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>,
203}
204
205impl<P> Arc<P>
206where
207    P: ArcPool,
208{
209    fn inner(&self) -> &ArcInner<P::Data> {
210        unsafe { &*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>() }
211    }
212
213    fn from_inner(node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>) -> Self {
214        Self { node_ptr }
215    }
216
217    unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data {
218        &mut *ptr::addr_of_mut!((*this.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data)
219    }
220
221    #[inline(never)]
222    unsafe fn drop_slow(&mut self) {
223        // run `P::Data`'s destructor
224        ptr::drop_in_place(Self::get_mut_unchecked(self));
225
226        // return memory to pool
227        P::singleton().stack.push(self.node_ptr);
228    }
229}
230
231impl<P> AsRef<P::Data> for Arc<P>
232where
233    P: ArcPool,
234{
235    fn as_ref(&self) -> &P::Data {
236        self
237    }
238}
239
240const MAX_REFCOUNT: usize = (isize::MAX) as usize;
241
242impl<P> Clone for Arc<P>
243where
244    P: ArcPool,
245{
246    fn clone(&self) -> Self {
247        let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed);
248
249        if old_size > MAX_REFCOUNT {
250            // XXX original code calls `intrinsics::abort` which is unstable API
251            panic!();
252        }
253
254        Self::from_inner(self.node_ptr)
255    }
256}
257
258impl<A> fmt::Debug for Arc<A>
259where
260    A: ArcPool,
261    A::Data: fmt::Debug,
262{
263    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
264        A::Data::fmt(self, f)
265    }
266}
267
268impl<P> ops::Deref for Arc<P>
269where
270    P: ArcPool,
271{
272    type Target = P::Data;
273
274    fn deref(&self) -> &Self::Target {
275        unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data) }
276    }
277}
278
279impl<A> fmt::Display for Arc<A>
280where
281    A: ArcPool,
282    A::Data: fmt::Display,
283{
284    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
285        A::Data::fmt(self, f)
286    }
287}
288
289impl<A> Drop for Arc<A>
290where
291    A: ArcPool,
292{
293    fn drop(&mut self) {
294        if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 {
295            return;
296        }
297
298        atomic::fence(Ordering::Acquire);
299
300        unsafe { self.drop_slow() }
301    }
302}
303
304impl<A> Eq for Arc<A>
305where
306    A: ArcPool,
307    A::Data: Eq,
308{
309}
310
311impl<A> Hash for Arc<A>
312where
313    A: ArcPool,
314    A::Data: Hash,
315{
316    fn hash<H>(&self, state: &mut H)
317    where
318        H: Hasher,
319    {
320        (**self).hash(state);
321    }
322}
323
324impl<A> Ord for Arc<A>
325where
326    A: ArcPool,
327    A::Data: Ord,
328{
329    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
330        A::Data::cmp(self, other)
331    }
332}
333
334impl<A, B> PartialEq<Arc<B>> for Arc<A>
335where
336    A: ArcPool,
337    B: ArcPool,
338    A::Data: PartialEq<B::Data>,
339{
340    fn eq(&self, other: &Arc<B>) -> bool {
341        A::Data::eq(self, &**other)
342    }
343}
344
345impl<A, B> PartialOrd<Arc<B>> for Arc<A>
346where
347    A: ArcPool,
348    B: ArcPool,
349    A::Data: PartialOrd<B::Data>,
350{
351    fn partial_cmp(&self, other: &Arc<B>) -> Option<core::cmp::Ordering> {
352        A::Data::partial_cmp(self, &**other)
353    }
354}
355
356unsafe impl<A> Send for Arc<A>
357where
358    A: ArcPool,
359    A::Data: Sync + Send,
360{
361}
362
363unsafe impl<A> Sync for Arc<A>
364where
365    A: ArcPool,
366    A::Data: Sync + Send,
367{
368}
369
370impl<A> Unpin for Arc<A> where A: ArcPool {}
371
372struct ArcInner<T> {
373    data: T,
374    strong: AtomicUsize,
375}
376
377/// A chunk of memory that an `ArcPool` can manage
378pub struct ArcBlock<T> {
379    node: UnionNode<MaybeUninit<ArcInner<T>>>,
380}
381
382impl<T> ArcBlock<T> {
383    /// Creates a new memory block
384    pub const fn new() -> Self {
385        Self {
386            node: UnionNode {
387                data: ManuallyDrop::new(MaybeUninit::uninit()),
388            },
389        }
390    }
391}
392
393impl<T> Default for ArcBlock<T> {
394    fn default() -> Self {
395        Self::new()
396    }
397}
398
399#[cfg(test)]
400mod tests {
401    use super::*;
402    use std::ptr::addr_of_mut;
403
404    #[test]
405    fn cannot_alloc_if_empty() {
406        arc_pool!(MyArcPool: i32);
407
408        assert_eq!(Err(42), MyArcPool.alloc(42),);
409    }
410
411    #[test]
412    fn can_alloc_if_manages_one_block() {
413        arc_pool!(MyArcPool: i32);
414
415        let block = unsafe {
416            static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
417            addr_of_mut!(BLOCK).as_mut().unwrap()
418        };
419        MyArcPool.manage(block);
420
421        assert_eq!(42, *MyArcPool.alloc(42).unwrap());
422    }
423
424    #[test]
425    fn alloc_drop_alloc() {
426        arc_pool!(MyArcPool: i32);
427
428        let block = unsafe {
429            static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
430            addr_of_mut!(BLOCK).as_mut().unwrap()
431        };
432        MyArcPool.manage(block);
433
434        let arc = MyArcPool.alloc(1).unwrap();
435
436        drop(arc);
437
438        assert_eq!(2, *MyArcPool.alloc(2).unwrap());
439    }
440
441    #[test]
442    fn strong_count_starts_at_one() {
443        arc_pool!(MyArcPool: i32);
444
445        let block = unsafe {
446            static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
447            addr_of_mut!(BLOCK).as_mut().unwrap()
448        };
449        MyArcPool.manage(block);
450
451        let arc = MyArcPool.alloc(1).ok().unwrap();
452
453        assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed));
454    }
455
456    #[test]
457    fn clone_increases_strong_count() {
458        arc_pool!(MyArcPool: i32);
459
460        let block = unsafe {
461            static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
462            addr_of_mut!(BLOCK).as_mut().unwrap()
463        };
464        MyArcPool.manage(block);
465
466        let arc = MyArcPool.alloc(1).ok().unwrap();
467
468        let before = arc.inner().strong.load(Ordering::Relaxed);
469
470        let arc2 = arc.clone();
471
472        let expected = before + 1;
473        assert_eq!(expected, arc.inner().strong.load(Ordering::Relaxed));
474        assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
475    }
476
477    #[test]
478    fn drop_decreases_strong_count() {
479        arc_pool!(MyArcPool: i32);
480
481        let block = unsafe {
482            static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
483            addr_of_mut!(BLOCK).as_mut().unwrap()
484        };
485        MyArcPool.manage(block);
486
487        let arc = MyArcPool.alloc(1).ok().unwrap();
488        let arc2 = arc.clone();
489
490        let before = arc.inner().strong.load(Ordering::Relaxed);
491
492        drop(arc);
493
494        let expected = before - 1;
495        assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
496    }
497
498    #[test]
499    fn runs_destructor_exactly_once_when_strong_count_reaches_zero() {
500        static COUNT: AtomicUsize = AtomicUsize::new(0);
501
502        pub struct MyStruct;
503
504        impl Drop for MyStruct {
505            fn drop(&mut self) {
506                COUNT.fetch_add(1, Ordering::Relaxed);
507            }
508        }
509
510        arc_pool!(MyArcPool: MyStruct);
511
512        let block = unsafe {
513            static mut BLOCK: ArcBlock<MyStruct> = ArcBlock::new();
514            addr_of_mut!(BLOCK).as_mut().unwrap()
515        };
516        MyArcPool.manage(block);
517
518        let arc = MyArcPool.alloc(MyStruct).ok().unwrap();
519
520        assert_eq!(0, COUNT.load(Ordering::Relaxed));
521
522        drop(arc);
523
524        assert_eq!(1, COUNT.load(Ordering::Relaxed));
525    }
526
527    #[test]
528    fn zst_is_well_aligned() {
529        #[repr(align(4096))]
530        pub struct Zst4096;
531
532        arc_pool!(MyArcPool: Zst4096);
533
534        let block = unsafe {
535            static mut BLOCK: ArcBlock<Zst4096> = ArcBlock::new();
536            addr_of_mut!(BLOCK).as_mut().unwrap()
537        };
538        MyArcPool.manage(block);
539
540        let arc = MyArcPool.alloc(Zst4096).ok().unwrap();
541
542        let raw = &*arc as *const Zst4096;
543        assert_eq!(0, raw as usize % 4096);
544    }
545}