sel4_shared_memory/atomic_ops/
mod.rs

1//
2// Copyright 2023, Colias Group, LLC
3//
4// SPDX-License-Identifier: MIT OR Apache-2.0
5//
6
7use core::sync::atomic::{self, Ordering};
8
9use aligned::Aligned;
10use zerocopy::{FromBytes, IntoBytes};
11
12use sel4_abstract_ptr::memory_type::AtomicOps;
13
14use crate::SharedMemory;
15
16mod generic;
17mod ordering;
18
19#[allow(private_bounds)]
20pub trait Atomic: AtomicSealed {
21    type Value: Copy + FromBytes + IntoBytes;
22}
23
24trait AtomicSealed {
25    const IS_SIGNED: bool;
26}
27
28impl<A: Atomic> Atomic for Aligned<A, A::Value> {
29    type Value = A::Value;
30}
31
32impl<A: Atomic> AtomicSealed for Aligned<A, A::Value> {
33    const IS_SIGNED: bool = A::IS_SIGNED;
34}
35
36macro_rules! impl_atomic {
37    (
38        $atomic:path,
39        $value:ty,
40        $target_has_atomic_key:literal,
41        $is_signed:literal
42    ) => {
43        #[cfg(target_has_atomic = $target_has_atomic_key)]
44        impl Atomic for $atomic {
45            type Value = $value;
46        }
47
48        #[cfg(target_has_atomic = $target_has_atomic_key)]
49        impl AtomicSealed for $atomic {
50            const IS_SIGNED: bool = $is_signed;
51        }
52    };
53}
54
55macro_rules! impl_atomic_for_each_signedness {
56    (
57        $value_unsigned:ty,
58        $value_signed:ty,
59        $target_has_atomic_key:literal,
60        $atomic_unsigned:path,
61        $atomic_signed:path
62    ) => {
63        impl_atomic!(
64            $atomic_unsigned,
65            $value_unsigned,
66            $target_has_atomic_key,
67            false
68        );
69        impl_atomic!($atomic_signed, $value_signed, $target_has_atomic_key, true);
70    };
71}
72
73impl_atomic_for_each_signedness!(u8, i8, "8", atomic::AtomicU8, atomic::AtomicI8);
74impl_atomic_for_each_signedness!(u16, i16, "16", atomic::AtomicU16, atomic::AtomicI16);
75impl_atomic_for_each_signedness!(u32, i32, "32", atomic::AtomicU32, atomic::AtomicI32);
76impl_atomic_for_each_signedness!(u64, i64, "64", atomic::AtomicU64, atomic::AtomicI64);
77
78#[cfg(target_pointer_width = "32")]
79impl_atomic_for_each_signedness!(usize, isize, "32", atomic::AtomicUsize, atomic::AtomicIsize);
80
81#[cfg(target_pointer_width = "64")]
82impl_atomic_for_each_signedness!(usize, isize, "64", atomic::AtomicUsize, atomic::AtomicIsize);
83
84impl<T: Atomic> AtomicOps<T> for SharedMemory {
85    type Value = T::Value;
86
87    #[inline]
88    unsafe fn atomic_store(dst: *mut T, val: Self::Value, order: Ordering) {
89        unsafe { generic::atomic_store(dst.cast(), val, order.into()) }
90    }
91
92    #[inline]
93    unsafe fn atomic_load(dst: *const T, order: Ordering) -> Self::Value {
94        unsafe { generic::atomic_load(dst.cast(), order.into()) }
95    }
96
97    #[inline]
98    unsafe fn atomic_swap(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
99        unsafe { generic::atomic_swap(dst.cast(), val, order.into()) }
100    }
101
102    #[inline]
103    unsafe fn atomic_add(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
104        unsafe { generic::atomic_add(dst.cast(), val, order.into()) }
105    }
106
107    #[inline]
108    unsafe fn atomic_sub(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
109        unsafe { generic::atomic_sub(dst.cast(), val, order.into()) }
110    }
111
112    #[inline]
113    unsafe fn atomic_compare_exchange(
114        dst: *mut T,
115        old: Self::Value,
116        new: Self::Value,
117        success: Ordering,
118        failure: Ordering,
119    ) -> Result<Self::Value, Self::Value> {
120        unsafe {
121            generic::atomic_compare_exchange(dst.cast(), old, new, success.into(), failure.into())
122        }
123    }
124
125    #[inline]
126    unsafe fn atomic_compare_exchange_weak(
127        dst: *mut T,
128        old: Self::Value,
129        new: Self::Value,
130        success: Ordering,
131        failure: Ordering,
132    ) -> Result<Self::Value, Self::Value> {
133        unsafe {
134            generic::atomic_compare_exchange_weak(
135                dst.cast(),
136                old,
137                new,
138                success.into(),
139                failure.into(),
140            )
141        }
142    }
143
144    #[inline]
145    unsafe fn atomic_and(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
146        unsafe { generic::atomic_and(dst.cast(), val, order.into()) }
147    }
148
149    #[inline]
150    unsafe fn atomic_nand(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
151        unsafe { generic::atomic_nand(dst.cast(), val, order.into()) }
152    }
153
154    #[inline]
155    unsafe fn atomic_or(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
156        unsafe { generic::atomic_or(dst.cast(), val, order.into()) }
157    }
158
159    #[inline]
160    unsafe fn atomic_xor(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
161        unsafe { generic::atomic_xor(dst.cast(), val, order.into()) }
162    }
163
164    #[inline]
165    unsafe fn atomic_max(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
166        unsafe {
167            if T::IS_SIGNED {
168                generic::atomic_max(dst.cast(), val, order.into())
169            } else {
170                generic::atomic_umax(dst.cast(), val, order.into())
171            }
172        }
173    }
174
175    #[inline]
176    unsafe fn atomic_min(dst: *mut T, val: Self::Value, order: Ordering) -> Self::Value {
177        unsafe {
178            if T::IS_SIGNED {
179                generic::atomic_min(dst.cast(), val, order.into())
180            } else {
181                generic::atomic_umin(dst.cast(), val, order.into())
182            }
183        }
184    }
185}