heapless/pool/
boxed.rs
1use core::{
81 fmt,
82 hash::{Hash, Hasher},
83 mem::{ManuallyDrop, MaybeUninit},
84 ops, ptr,
85};
86
87use stable_deref_trait::StableDeref;
88
89use super::treiber::{NonNullPtr, Stack, UnionNode};
90
91#[macro_export]
95macro_rules! box_pool {
96 ($name:ident: $data_type:ty) => {
97 pub struct $name;
98
99 impl $crate::pool::boxed::BoxPool for $name {
100 type Data = $data_type;
101
102 fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> {
103 static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> =
104 $crate::pool::boxed::BoxPoolImpl::new();
105
106 &$name
107 }
108 }
109
110 impl $name {
111 #[allow(dead_code)]
113 pub fn alloc(
114 &self,
115 value: $data_type,
116 ) -> Result<$crate::pool::boxed::Box<$name>, $data_type> {
117 <$name as $crate::pool::boxed::BoxPool>::alloc(value)
118 }
119
120 #[allow(dead_code)]
122 pub fn manage(&self, block: &'static mut $crate::pool::boxed::BoxBlock<$data_type>) {
123 <$name as $crate::pool::boxed::BoxPool>::manage(block)
124 }
125 }
126 };
127}
128
129pub trait BoxPool: Sized {
143 type Data: 'static;
145
146 #[doc(hidden)]
148 fn singleton() -> &'static BoxPoolImpl<Self::Data>;
149
150 fn alloc(value: Self::Data) -> Result<Box<Self>, Self::Data> {
158 Ok(Box {
159 node_ptr: Self::singleton().alloc(value)?,
160 })
161 }
162
163 fn manage(block: &'static mut BoxBlock<Self::Data>) {
165 Self::singleton().manage(block)
166 }
167}
168
169pub struct Box<P>
171where
172 P: BoxPool,
173{
174 node_ptr: NonNullPtr<UnionNode<MaybeUninit<P::Data>>>,
175}
176
177impl<A> Clone for Box<A>
178where
179 A: BoxPool,
180 A::Data: Clone,
181{
182 fn clone(&self) -> Self {
183 A::alloc((**self).clone()).ok().expect("OOM")
184 }
185}
186
187impl<A> fmt::Debug for Box<A>
188where
189 A: BoxPool,
190 A::Data: fmt::Debug,
191{
192 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
193 A::Data::fmt(self, f)
194 }
195}
196
197impl<P> ops::Deref for Box<P>
198where
199 P: BoxPool,
200{
201 type Target = P::Data;
202
203 fn deref(&self) -> &Self::Target {
204 unsafe { &*self.node_ptr.as_ptr().cast::<P::Data>() }
205 }
206}
207
208impl<P> ops::DerefMut for Box<P>
209where
210 P: BoxPool,
211{
212 fn deref_mut(&mut self) -> &mut Self::Target {
213 unsafe { &mut *self.node_ptr.as_ptr().cast::<P::Data>() }
214 }
215}
216
217unsafe impl<P> StableDeref for Box<P> where P: BoxPool {}
218
219impl<A> fmt::Display for Box<A>
220where
221 A: BoxPool,
222 A::Data: fmt::Display,
223{
224 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
225 A::Data::fmt(self, f)
226 }
227}
228
229impl<P> Drop for Box<P>
230where
231 P: BoxPool,
232{
233 fn drop(&mut self) {
234 let node = self.node_ptr;
235
236 unsafe { ptr::drop_in_place(node.as_ptr().cast::<P::Data>()) }
237
238 unsafe { P::singleton().stack.push(node) }
239 }
240}
241
242impl<A> Eq for Box<A>
243where
244 A: BoxPool,
245 A::Data: Eq,
246{
247}
248
249impl<A> Hash for Box<A>
250where
251 A: BoxPool,
252 A::Data: Hash,
253{
254 fn hash<H>(&self, state: &mut H)
255 where
256 H: Hasher,
257 {
258 (**self).hash(state)
259 }
260}
261
262impl<A> Ord for Box<A>
263where
264 A: BoxPool,
265 A::Data: Ord,
266{
267 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
268 A::Data::cmp(self, other)
269 }
270}
271
272impl<A, B> PartialEq<Box<B>> for Box<A>
273where
274 A: BoxPool,
275 B: BoxPool,
276 A::Data: PartialEq<B::Data>,
277{
278 fn eq(&self, other: &Box<B>) -> bool {
279 A::Data::eq(self, other)
280 }
281}
282
283impl<A, B> PartialOrd<Box<B>> for Box<A>
284where
285 A: BoxPool,
286 B: BoxPool,
287 A::Data: PartialOrd<B::Data>,
288{
289 fn partial_cmp(&self, other: &Box<B>) -> Option<core::cmp::Ordering> {
290 A::Data::partial_cmp(self, other)
291 }
292}
293
294unsafe impl<P> Send for Box<P>
295where
296 P: BoxPool,
297 P::Data: Send,
298{
299}
300
301unsafe impl<P> Sync for Box<P>
302where
303 P: BoxPool,
304 P::Data: Sync,
305{
306}
307
308#[doc(hidden)]
311pub struct BoxPoolImpl<T> {
312 stack: Stack<UnionNode<MaybeUninit<T>>>,
313}
314
315impl<T> BoxPoolImpl<T> {
316 pub const fn new() -> Self {
317 Self {
318 stack: Stack::new(),
319 }
320 }
321
322 fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<T>>>, T> {
323 if let Some(node_ptr) = self.stack.try_pop() {
324 unsafe { node_ptr.as_ptr().cast::<T>().write(value) }
325
326 Ok(node_ptr)
327 } else {
328 Err(value)
329 }
330 }
331
332 fn manage(&self, block: &'static mut BoxBlock<T>) {
333 let node: &'static mut _ = &mut block.node;
334
335 unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
336 }
337}
338
339unsafe impl<T> Sync for BoxPoolImpl<T> {}
340
341pub struct BoxBlock<T> {
343 node: UnionNode<MaybeUninit<T>>,
344}
345
346impl<T> BoxBlock<T> {
347 pub const fn new() -> Self {
349 Self {
350 node: UnionNode {
351 data: ManuallyDrop::new(MaybeUninit::uninit()),
352 },
353 }
354 }
355}
356
357#[cfg(test)]
358mod tests {
359 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
360 use std::thread;
361
362 use super::*;
363
364 #[test]
365 fn cannot_alloc_if_empty() {
366 box_pool!(P: i32);
367
368 assert_eq!(Err(42), P.alloc(42));
369 }
370
371 #[test]
372 fn can_alloc_if_pool_manages_one_block() {
373 box_pool!(P: i32);
374
375 let block = unsafe {
376 static mut B: BoxBlock<i32> = BoxBlock::new();
377 &mut B
378 };
379 P.manage(block);
380
381 assert_eq!(42, *P.alloc(42).unwrap());
382 }
383
384 #[test]
385 fn alloc_drop_alloc() {
386 box_pool!(P: i32);
387
388 let block = unsafe {
389 static mut B: BoxBlock<i32> = BoxBlock::new();
390 &mut B
391 };
392 P.manage(block);
393
394 let boxed = P.alloc(1).unwrap();
395
396 drop(boxed);
397
398 assert_eq!(2, *P.alloc(2).unwrap());
399 }
400
401 #[test]
402 fn runs_destructor_exactly_once_on_drop() {
403 static COUNT: AtomicUsize = AtomicUsize::new(0);
404
405 pub struct S;
406
407 impl Drop for S {
408 fn drop(&mut self) {
409 COUNT.fetch_add(1, Ordering::Relaxed);
410 }
411 }
412
413 box_pool!(P: S);
414
415 let block = unsafe {
416 static mut B: BoxBlock<S> = BoxBlock::new();
417 &mut B
418 };
419 P.manage(block);
420
421 let boxed = P.alloc(S).ok().unwrap();
422
423 assert_eq!(0, COUNT.load(Ordering::Relaxed));
424
425 drop(boxed);
426
427 assert_eq!(1, COUNT.load(Ordering::Relaxed));
428 }
429
430 #[test]
431 fn zst_is_well_aligned() {
432 #[repr(align(4096))]
433 pub struct Zst4096;
434
435 box_pool!(P: Zst4096);
436
437 let block = unsafe {
438 static mut B: BoxBlock<Zst4096> = BoxBlock::new();
439 &mut B
440 };
441 P.manage(block);
442
443 let boxed = P.alloc(Zst4096).ok().unwrap();
444
445 let raw = &*boxed as *const Zst4096;
446 assert_eq!(0, raw as usize % 4096);
447 }
448
449 #[allow(clippy::redundant_clone)]
450 #[test]
451 fn can_clone_if_pool_is_not_exhausted() {
452 static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
453
454 pub struct S;
455
456 impl Clone for S {
457 fn clone(&self) -> Self {
458 STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
459 Self
460 }
461 }
462
463 box_pool!(P: S);
464
465 P.manage(unsafe {
466 static mut B: BoxBlock<S> = BoxBlock::new();
467 &mut B
468 });
469 P.manage(unsafe {
470 static mut B: BoxBlock<S> = BoxBlock::new();
471 &mut B
472 });
473
474 let first = P.alloc(S).ok().unwrap();
475 let _second = first.clone();
476
477 assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
478
479 let is_oom = P.alloc(S).is_err();
480 assert!(is_oom);
481 }
482
483 #[allow(clippy::redundant_clone)]
484 #[test]
485 fn clone_panics_if_pool_exhausted() {
486 static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
487
488 pub struct S;
489
490 impl Clone for S {
491 fn clone(&self) -> Self {
492 STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
493 Self
494 }
495 }
496
497 box_pool!(P: S);
498
499 P.manage(unsafe {
500 static mut B: BoxBlock<S> = BoxBlock::new();
501 &mut B
502 });
503
504 let first = P.alloc(S).ok().unwrap();
505
506 let thread = thread::spawn(move || {
507 let _second = first.clone();
508 });
509
510 let thread_panicked = thread.join().is_err();
511 assert!(thread_panicked);
512
513 }
517
518 #[allow(clippy::redundant_clone)]
519 #[test]
520 fn panicking_clone_does_not_leak_memory() {
521 static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
522
523 pub struct S;
524
525 impl Clone for S {
526 fn clone(&self) -> Self {
527 STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
528 panic!()
529 }
530 }
531
532 box_pool!(P: S);
533
534 P.manage(unsafe {
535 static mut B: BoxBlock<S> = BoxBlock::new();
536 &mut B
537 });
538 P.manage(unsafe {
539 static mut B: BoxBlock<S> = BoxBlock::new();
540 &mut B
541 });
542
543 let boxed = P.alloc(S).ok().unwrap();
544
545 let thread = thread::spawn(move || {
546 let _boxed = boxed.clone();
547 });
548
549 let thread_panicked = thread.join().is_err();
550 assert!(thread_panicked);
551
552 assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
553
554 let once = P.alloc(S);
555 let twice = P.alloc(S);
556
557 assert!(once.is_ok());
558 assert!(twice.is_ok());
559 }
560}