1use core::{
73 fmt,
74 hash::{Hash, Hasher},
75 mem::{ManuallyDrop, MaybeUninit},
76 ops, ptr,
77};
78
79#[cfg(not(feature = "portable-atomic"))]
80use core::sync::atomic;
81#[cfg(feature = "portable-atomic")]
82use portable_atomic as atomic;
83
84use atomic::{AtomicUsize, Ordering};
85
86use super::treiber::{NonNullPtr, Stack, UnionNode};
87
88#[macro_export]
92macro_rules! arc_pool {
93 ($name:ident: $data_type:ty) => {
94 pub struct $name;
95
96 impl $crate::pool::arc::ArcPool for $name {
97 type Data = $data_type;
98
99 fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> {
100 #[allow(non_upper_case_globals)]
103 static $name: $crate::pool::arc::ArcPoolImpl<$data_type> =
104 $crate::pool::arc::ArcPoolImpl::new();
105
106 &$name
107 }
108 }
109
110 impl $name {
111 #[allow(dead_code)]
113 pub fn alloc(
114 &self,
115 value: $data_type,
116 ) -> Result<$crate::pool::arc::Arc<$name>, $data_type> {
117 <$name as $crate::pool::arc::ArcPool>::alloc(value)
118 }
119
120 #[allow(dead_code)]
122 pub fn manage(&self, block: &'static mut $crate::pool::arc::ArcBlock<$data_type>) {
123 <$name as $crate::pool::arc::ArcPool>::manage(block)
124 }
125 }
126 };
127}
128
129pub trait ArcPool: Sized {
131 type Data: 'static;
133
134 #[doc(hidden)]
136 fn singleton() -> &'static ArcPoolImpl<Self::Data>;
137
138 fn alloc(value: Self::Data) -> Result<Arc<Self>, Self::Data> {
146 Ok(Arc {
147 node_ptr: Self::singleton().alloc(value)?,
148 })
149 }
150
151 fn manage(block: &'static mut ArcBlock<Self::Data>) {
153 Self::singleton().manage(block);
154 }
155}
156
157#[doc(hidden)]
160pub struct ArcPoolImpl<T> {
161 stack: Stack<UnionNode<MaybeUninit<ArcInner<T>>>>,
162}
163
164impl<T> ArcPoolImpl<T> {
165 #[doc(hidden)]
167 #[allow(clippy::new_without_default)]
168 pub const fn new() -> Self {
169 Self {
170 stack: Stack::new(),
171 }
172 }
173
174 fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<ArcInner<T>>>>, T> {
175 if let Some(node_ptr) = self.stack.try_pop() {
176 let inner = ArcInner {
177 data: value,
178 strong: AtomicUsize::new(1),
179 };
180 unsafe { node_ptr.as_ptr().cast::<ArcInner<T>>().write(inner) }
181
182 Ok(node_ptr)
183 } else {
184 Err(value)
185 }
186 }
187
188 fn manage(&self, block: &'static mut ArcBlock<T>) {
189 let node: &'static mut _ = &mut block.node;
190
191 unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
192 }
193}
194
195unsafe impl<T> Sync for ArcPoolImpl<T> {}
196
197pub struct Arc<P>
199where
200 P: ArcPool,
201{
202 node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>,
203}
204
205impl<P> Arc<P>
206where
207 P: ArcPool,
208{
209 fn inner(&self) -> &ArcInner<P::Data> {
210 unsafe { &*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>() }
211 }
212
213 fn from_inner(node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>) -> Self {
214 Self { node_ptr }
215 }
216
217 unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data {
218 &mut *ptr::addr_of_mut!((*this.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data)
219 }
220
221 #[inline(never)]
222 unsafe fn drop_slow(&mut self) {
223 ptr::drop_in_place(Self::get_mut_unchecked(self));
225
226 P::singleton().stack.push(self.node_ptr);
228 }
229}
230
231impl<P> AsRef<P::Data> for Arc<P>
232where
233 P: ArcPool,
234{
235 fn as_ref(&self) -> &P::Data {
236 self
237 }
238}
239
240const MAX_REFCOUNT: usize = (isize::MAX) as usize;
241
242impl<P> Clone for Arc<P>
243where
244 P: ArcPool,
245{
246 fn clone(&self) -> Self {
247 let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed);
248
249 if old_size > MAX_REFCOUNT {
250 panic!();
252 }
253
254 Self::from_inner(self.node_ptr)
255 }
256}
257
258impl<A> fmt::Debug for Arc<A>
259where
260 A: ArcPool,
261 A::Data: fmt::Debug,
262{
263 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
264 A::Data::fmt(self, f)
265 }
266}
267
268impl<P> ops::Deref for Arc<P>
269where
270 P: ArcPool,
271{
272 type Target = P::Data;
273
274 fn deref(&self) -> &Self::Target {
275 unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data) }
276 }
277}
278
279impl<A> fmt::Display for Arc<A>
280where
281 A: ArcPool,
282 A::Data: fmt::Display,
283{
284 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
285 A::Data::fmt(self, f)
286 }
287}
288
289impl<A> Drop for Arc<A>
290where
291 A: ArcPool,
292{
293 fn drop(&mut self) {
294 if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 {
295 return;
296 }
297
298 atomic::fence(Ordering::Acquire);
299
300 unsafe { self.drop_slow() }
301 }
302}
303
304impl<A> Eq for Arc<A>
305where
306 A: ArcPool,
307 A::Data: Eq,
308{
309}
310
311impl<A> Hash for Arc<A>
312where
313 A: ArcPool,
314 A::Data: Hash,
315{
316 fn hash<H>(&self, state: &mut H)
317 where
318 H: Hasher,
319 {
320 (**self).hash(state);
321 }
322}
323
324impl<A> Ord for Arc<A>
325where
326 A: ArcPool,
327 A::Data: Ord,
328{
329 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
330 A::Data::cmp(self, other)
331 }
332}
333
334impl<A, B> PartialEq<Arc<B>> for Arc<A>
335where
336 A: ArcPool,
337 B: ArcPool,
338 A::Data: PartialEq<B::Data>,
339{
340 fn eq(&self, other: &Arc<B>) -> bool {
341 A::Data::eq(self, &**other)
342 }
343}
344
345impl<A, B> PartialOrd<Arc<B>> for Arc<A>
346where
347 A: ArcPool,
348 B: ArcPool,
349 A::Data: PartialOrd<B::Data>,
350{
351 fn partial_cmp(&self, other: &Arc<B>) -> Option<core::cmp::Ordering> {
352 A::Data::partial_cmp(self, &**other)
353 }
354}
355
356unsafe impl<A> Send for Arc<A>
357where
358 A: ArcPool,
359 A::Data: Sync + Send,
360{
361}
362
363unsafe impl<A> Sync for Arc<A>
364where
365 A: ArcPool,
366 A::Data: Sync + Send,
367{
368}
369
370impl<A> Unpin for Arc<A> where A: ArcPool {}
371
372struct ArcInner<T> {
373 data: T,
374 strong: AtomicUsize,
375}
376
377pub struct ArcBlock<T> {
379 node: UnionNode<MaybeUninit<ArcInner<T>>>,
380}
381
382impl<T> ArcBlock<T> {
383 pub const fn new() -> Self {
385 Self {
386 node: UnionNode {
387 data: ManuallyDrop::new(MaybeUninit::uninit()),
388 },
389 }
390 }
391}
392
393impl<T> Default for ArcBlock<T> {
394 fn default() -> Self {
395 Self::new()
396 }
397}
398
399#[cfg(test)]
400mod tests {
401 use super::*;
402 use std::ptr::addr_of_mut;
403
404 #[test]
405 fn cannot_alloc_if_empty() {
406 arc_pool!(MyArcPool: i32);
407
408 assert_eq!(Err(42), MyArcPool.alloc(42),);
409 }
410
411 #[test]
412 fn can_alloc_if_manages_one_block() {
413 arc_pool!(MyArcPool: i32);
414
415 let block = unsafe {
416 static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
417 addr_of_mut!(BLOCK).as_mut().unwrap()
418 };
419 MyArcPool.manage(block);
420
421 assert_eq!(42, *MyArcPool.alloc(42).unwrap());
422 }
423
424 #[test]
425 fn alloc_drop_alloc() {
426 arc_pool!(MyArcPool: i32);
427
428 let block = unsafe {
429 static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
430 addr_of_mut!(BLOCK).as_mut().unwrap()
431 };
432 MyArcPool.manage(block);
433
434 let arc = MyArcPool.alloc(1).unwrap();
435
436 drop(arc);
437
438 assert_eq!(2, *MyArcPool.alloc(2).unwrap());
439 }
440
441 #[test]
442 fn strong_count_starts_at_one() {
443 arc_pool!(MyArcPool: i32);
444
445 let block = unsafe {
446 static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
447 addr_of_mut!(BLOCK).as_mut().unwrap()
448 };
449 MyArcPool.manage(block);
450
451 let arc = MyArcPool.alloc(1).ok().unwrap();
452
453 assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed));
454 }
455
456 #[test]
457 fn clone_increases_strong_count() {
458 arc_pool!(MyArcPool: i32);
459
460 let block = unsafe {
461 static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
462 addr_of_mut!(BLOCK).as_mut().unwrap()
463 };
464 MyArcPool.manage(block);
465
466 let arc = MyArcPool.alloc(1).ok().unwrap();
467
468 let before = arc.inner().strong.load(Ordering::Relaxed);
469
470 let arc2 = arc.clone();
471
472 let expected = before + 1;
473 assert_eq!(expected, arc.inner().strong.load(Ordering::Relaxed));
474 assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
475 }
476
477 #[test]
478 fn drop_decreases_strong_count() {
479 arc_pool!(MyArcPool: i32);
480
481 let block = unsafe {
482 static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
483 addr_of_mut!(BLOCK).as_mut().unwrap()
484 };
485 MyArcPool.manage(block);
486
487 let arc = MyArcPool.alloc(1).ok().unwrap();
488 let arc2 = arc.clone();
489
490 let before = arc.inner().strong.load(Ordering::Relaxed);
491
492 drop(arc);
493
494 let expected = before - 1;
495 assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
496 }
497
498 #[test]
499 fn runs_destructor_exactly_once_when_strong_count_reaches_zero() {
500 static COUNT: AtomicUsize = AtomicUsize::new(0);
501
502 pub struct MyStruct;
503
504 impl Drop for MyStruct {
505 fn drop(&mut self) {
506 COUNT.fetch_add(1, Ordering::Relaxed);
507 }
508 }
509
510 arc_pool!(MyArcPool: MyStruct);
511
512 let block = unsafe {
513 static mut BLOCK: ArcBlock<MyStruct> = ArcBlock::new();
514 addr_of_mut!(BLOCK).as_mut().unwrap()
515 };
516 MyArcPool.manage(block);
517
518 let arc = MyArcPool.alloc(MyStruct).ok().unwrap();
519
520 assert_eq!(0, COUNT.load(Ordering::Relaxed));
521
522 drop(arc);
523
524 assert_eq!(1, COUNT.load(Ordering::Relaxed));
525 }
526
527 #[test]
528 fn zst_is_well_aligned() {
529 #[repr(align(4096))]
530 pub struct Zst4096;
531
532 arc_pool!(MyArcPool: Zst4096);
533
534 let block = unsafe {
535 static mut BLOCK: ArcBlock<Zst4096> = ArcBlock::new();
536 addr_of_mut!(BLOCK).as_mut().unwrap()
537 };
538 MyArcPool.manage(block);
539
540 let arc = MyArcPool.alloc(Zst4096).ok().unwrap();
541
542 let raw = &*arc as *const Zst4096;
543 assert_eq!(0, raw as usize % 4096);
544 }
545}