sel4_atomic_ptr/
lib.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
//
// Copyright 2023, Colias Group, LLC
//
// SPDX-License-Identifier: MIT OR Apache-2.0
//

#![no_std]
#![feature(core_intrinsics)]
#![allow(internal_features)]

use core::fmt;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
use core::sync::atomic;

mod generic;
mod ops;
mod ordering;

use ordering::OrderingExhaustive;

#[repr(transparent)]
pub struct AtomicPtr<'a, T> {
    pointer: NonNull<T>,
    reference: PhantomData<&'a T>,
}

impl<T> Copy for AtomicPtr<'_, T> {}

impl<T> Clone for AtomicPtr<'_, T> {
    fn clone(&self) -> Self {
        *self
    }
}

impl<T> fmt::Debug for AtomicPtr<'_, T> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        f.debug_tuple("AtomicPtr").field(&self.pointer).finish()
    }
}

impl<T> AtomicPtr<'_, T> {
    /// # Safety
    ///
    /// Necessary but not sufficient:
    /// * `pointer` must be aligned to `align_of::<core::sync::atomic::Atomic*>()`
    #[allow(clippy::missing_safety_doc)]
    pub const unsafe fn new(pointer: NonNull<T>) -> Self {
        AtomicPtr {
            pointer,
            reference: PhantomData,
        }
    }

    pub fn as_raw_ptr(self) -> NonNull<T> {
        self.pointer
    }
}

#[allow(clippy::missing_safety_doc)]
pub unsafe trait Atomic: AtomicSealed + Copy {
    const ALIGNMENT: usize;
    const IS_SIGNED: bool;
}

use sealing::AtomicSealed;

mod sealing {
    pub trait AtomicSealed {}
}

macro_rules! impl_atomic {
    (
        $t:ty,
        $target_has_atomic_key:literal,
        $is_signed:literal,
        $analog_for_alignment:path
    ) => {
        // TODO these attributes are overly conservative
        #[cfg(target_has_atomic = $target_has_atomic_key)]
        unsafe impl Atomic for $t {
            const ALIGNMENT: usize = mem::align_of::<$analog_for_alignment>();
            const IS_SIGNED: bool = $is_signed;
        }

        impl AtomicSealed for $t {}
    };
}

macro_rules! impl_atomic_for_each_signedness {
    (
        $t_unsigned:ty,
        $t_signed:ty,
        $target_has_atomic_key:literal,
        $unsigned_analog_for_alignment:path,
        $signed_analog_for_alignment:path
    ) => {
        impl_atomic!(
            $t_unsigned,
            $target_has_atomic_key,
            false,
            $unsigned_analog_for_alignment
        );
        impl_atomic!(
            $t_signed,
            $target_has_atomic_key,
            true,
            $signed_analog_for_alignment
        );
    };
}

impl_atomic_for_each_signedness!(u8, i8, "8", atomic::AtomicU8, atomic::AtomicI8);
impl_atomic_for_each_signedness!(u16, i16, "16", atomic::AtomicU16, atomic::AtomicI16);
impl_atomic_for_each_signedness!(u32, i32, "32", atomic::AtomicU32, atomic::AtomicI32);
impl_atomic_for_each_signedness!(u64, i64, "64", atomic::AtomicU64, atomic::AtomicI64);

#[cfg(target_pointer_width = "32")]
impl_atomic_for_each_signedness!(usize, isize, "32", atomic::AtomicUsize, atomic::AtomicIsize);

#[cfg(target_pointer_width = "64")]
impl_atomic_for_each_signedness!(usize, isize, "64", atomic::AtomicUsize, atomic::AtomicIsize);