sel4_shared_memory/atomic_ops/
mod.rs1use core::sync::atomic::{self, Ordering};
8
9use aligned::Aligned;
10use zerocopy::{FromBytes, IntoBytes};
11
12use sel4_abstract_ptr::memory_type::AtomicOps;
13
14use crate::SharedMemory;
15
16mod generic;
17mod ordering;
18
19#[allow(private_bounds)]
20pub trait Atomic: AtomicSealed {
21 type Value: Copy + FromBytes + IntoBytes;
22}
23
24trait AtomicSealed {
25 const IS_SIGNED: bool;
26}
27
28impl<A: Atomic> Atomic for Aligned<A, A::Value> {
29 type Value = A::Value;
30}
31
32impl<A: Atomic> AtomicSealed for Aligned<A, A::Value> {
33 const IS_SIGNED: bool = A::IS_SIGNED;
34}
35
36macro_rules! impl_atomic {
37 (
38 $atomic:path,
39 $value:ty,
40 $target_has_atomic_key:literal,
41 $is_signed:literal
42 ) => {
43 #[cfg(target_has_atomic = $target_has_atomic_key)]
44 impl Atomic for $atomic {
45 type Value = $value;
46 }
47
48 #[cfg(target_has_atomic = $target_has_atomic_key)]
49 impl AtomicSealed for $atomic {
50 const IS_SIGNED: bool = $is_signed;
51 }
52
53 #[cfg(target_has_atomic = $target_has_atomic_key)]
54 #[cfg(target_has_atomic_equal_alignment = $target_has_atomic_key)]
55 impl Atomic for $value {
56 type Value = $value;
57 }
58
59 #[cfg(target_has_atomic = $target_has_atomic_key)]
60 #[cfg(target_has_atomic_equal_alignment = $target_has_atomic_key)]
61 impl AtomicSealed for $value {
62 const IS_SIGNED: bool = $is_signed;
63 }
64 };
65}
66
67macro_rules! impl_atomic_for_each_signedness {
68 (
69 $value_unsigned:ty,
70 $value_signed:ty,
71 $target_has_atomic_key:literal,
72 $atomic_unsigned:path,
73 $atomic_signed:path
74 ) => {
75 impl_atomic!(
76 $atomic_unsigned,
77 $value_unsigned,
78 $target_has_atomic_key,
79 false
80 );
81 impl_atomic!($atomic_signed, $value_signed, $target_has_atomic_key, true);
82 };
83}
84
85impl_atomic_for_each_signedness!(u8, i8, "8", atomic::AtomicU8, atomic::AtomicI8);
86impl_atomic_for_each_signedness!(u16, i16, "16", atomic::AtomicU16, atomic::AtomicI16);
87impl_atomic_for_each_signedness!(u32, i32, "32", atomic::AtomicU32, atomic::AtomicI32);
88impl_atomic_for_each_signedness!(u64, i64, "64", atomic::AtomicU64, atomic::AtomicI64);
89
90#[cfg(target_pointer_width = "32")]
91impl_atomic_for_each_signedness!(usize, isize, "32", atomic::AtomicUsize, atomic::AtomicIsize);
92
93#[cfg(target_pointer_width = "64")]
94impl_atomic_for_each_signedness!(usize, isize, "64", atomic::AtomicUsize, atomic::AtomicIsize);
95
96impl<T: Atomic> AtomicOps<T> for SharedMemory {
97 type Value = T::Value;
98
99 #[inline]
100 unsafe fn atomic_store(dst: *mut T, val: Self::Value, order: Ordering) {
101 unsafe { generic::atomic_store(dst.cast(), val, order.into()) }
102 }
103
104 #[inline]
105 unsafe fn atomic_load(dst: *const T, order: Ordering) -> Self::Value {
106 unsafe { generic::atomic_load(dst.cast(), order.into()) }
107 }
108
109 #[inline]
110 unsafe fn atomic_swap(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
111 unsafe { generic::atomic_swap(dst.cast(), val, order.into()) }
112 }
113
114 #[inline]
115 unsafe fn atomic_add(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
116 unsafe { generic::atomic_add(dst.cast(), val, order.into()) }
117 }
118
119 #[inline]
120 unsafe fn atomic_sub(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
121 unsafe { generic::atomic_sub(dst.cast(), val, order.into()) }
122 }
123
124 #[inline]
125 unsafe fn atomic_compare_exchange(
126 dst: *mut T,
127 old: Self::Value,
128 new: Self::Value,
129 success: Ordering,
130 failure: Ordering,
131 ) -> Result<Self::Value, Self::Value> {
132 unsafe {
133 generic::atomic_compare_exchange(dst.cast(), old, new, success.into(), failure.into())
134 }
135 }
136
137 #[inline]
138 unsafe fn atomic_compare_exchange_weak(
139 dst: *mut T,
140 old: Self::Value,
141 new: Self::Value,
142 success: Ordering,
143 failure: Ordering,
144 ) -> Result<Self::Value, Self::Value> {
145 unsafe {
146 generic::atomic_compare_exchange_weak(
147 dst.cast(),
148 old,
149 new,
150 success.into(),
151 failure.into(),
152 )
153 }
154 }
155
156 #[inline]
157 unsafe fn atomic_and(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
158 unsafe { generic::atomic_and(dst.cast(), val, order.into()) }
159 }
160
161 #[inline]
162 unsafe fn atomic_nand(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
163 unsafe { generic::atomic_nand(dst.cast(), val, order.into()) }
164 }
165
166 #[inline]
167 unsafe fn atomic_or(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
168 unsafe { generic::atomic_or(dst.cast(), val, order.into()) }
169 }
170
171 #[inline]
172 unsafe fn atomic_xor(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
173 unsafe { generic::atomic_xor(dst.cast(), val, order.into()) }
174 }
175
176 #[inline]
177 unsafe fn atomic_max(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
178 unsafe {
179 if T::IS_SIGNED {
180 generic::atomic_max(dst.cast(), val, order.into())
181 } else {
182 generic::atomic_umax(dst.cast(), val, order.into())
183 }
184 }
185 }
186
187 #[inline]
188 unsafe fn atomic_min(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
189 unsafe {
190 if T::IS_SIGNED {
191 generic::atomic_min(dst.cast(), val, order.into())
192 } else {
193 generic::atomic_umin(dst.cast(), val, order.into())
194 }
195 }
196 }
197}