1#![no_std]
19
20use core::alloc::Layout;
21use core::mem;
22use core::ptr;
23use core::slice;
24
25#[cfg(feature = "alloc")]
26extern crate alloc;
27
28#[cfg(not(any(
29 target_arch = "aarch64",
30 target_arch = "arm",
31 target_arch = "riscv32",
32 target_arch = "riscv64",
33 target_arch = "x86_64",
34)))]
35compile_error!("unsupported architecture");
36
37mod set_thread_pointer;
38
39pub use set_thread_pointer::{DEFAULT_SET_THREAD_POINTER_FN, SetThreadPointerFn};
40
41mod static_allocation;
42pub use static_allocation::*;
43
44#[cfg(feature = "on-stack")]
45mod on_stack;
46
47#[cfg(feature = "on-heap")]
48mod on_heap;
49
50#[cfg(feature = "on-heap")]
51pub use on_heap::*;
52
53#[derive(Debug, Copy, Clone, PartialEq, Eq)]
54pub struct UncheckedTlsImage {
55 pub vaddr: usize,
56 pub filesz: usize,
57 pub memsz: usize,
58 pub align: usize,
59}
60
61impl UncheckedTlsImage {
62 pub fn check(&self) -> Result<TlsImage, InvalidTlsImageError> {
63 if self.memsz >= self.filesz && self.align.is_power_of_two() && self.align > 0 {
64 Ok(TlsImage { checked: *self })
65 } else {
66 Err(InvalidTlsImageError::new())
67 }
68 }
69}
70
71#[derive(Debug, Copy, Clone, PartialEq, Eq)]
72pub struct InvalidTlsImageError(());
73
74impl InvalidTlsImageError {
75 fn new() -> Self {
76 Self(())
77 }
78}
79
80#[repr(C)]
81#[derive(Debug, Copy, Clone, PartialEq, Eq)]
82pub struct TlsImage {
83 checked: UncheckedTlsImage,
84}
85
86impl TlsImage {
87 pub fn reservation_layout(&self) -> TlsReservationLayout {
88 TlsReservationLayout::from_segment_layout(self.segment_layout())
89 }
90
91 fn segment_layout(&self) -> Layout {
92 Layout::from_size_align(self.checked.memsz, self.checked.align).unwrap()
93 }
94
95 fn image_data(&self) -> *const [u8] {
96 ptr::slice_from_raw_parts(self.checked.vaddr as *mut u8, self.checked.filesz)
97 }
98
99 #[allow(clippy::missing_safety_doc)]
100 pub unsafe fn initialize_reservation(&self, reservation_start: *mut u8) -> usize {
101 let reservation_layout = self.reservation_layout();
102 let reservation = unsafe {
103 slice::from_raw_parts_mut(reservation_start, reservation_layout.footprint().size())
104 };
105 let image_data = unsafe { self.image_data().as_ref().unwrap() };
106 let (tdata, tbss) = reservation[reservation_layout.segment_offset()..]
107 [..self.checked.memsz]
108 .split_at_mut(self.checked.filesz);
109 tdata.copy_from_slice(image_data);
110 tbss.fill(0);
111 let thread_pointer = (reservation_start as usize)
112 .checked_add(reservation_layout.thread_pointer_offset())
113 .unwrap(); if cfg!(target_arch = "x86_64") {
115 let thread_pointer_slice = &mut reservation
116 [reservation_layout.thread_pointer_offset()..][..mem::size_of::<usize>()];
117 thread_pointer_slice.copy_from_slice(&thread_pointer.to_ne_bytes());
118 }
119 thread_pointer
120 }
121
122 #[allow(clippy::missing_safety_doc)]
123 pub unsafe fn initialize_exact_reservation_region(
124 &self,
125 exact_reservation: &Region,
126 ) -> Result<usize, RegionLayoutError> {
127 if exact_reservation.fits_exactly(self.reservation_layout().footprint()) {
128 Ok(unsafe { self.initialize_reservation(exact_reservation.start()) })
129 } else {
130 Err(RegionLayoutError::new())
131 }
132 }
133
134 #[allow(clippy::missing_safety_doc)]
135 pub unsafe fn initialize_inexact_reservation_region(
136 &self,
137 inexact_reservation: &Region,
138 ) -> Result<usize, RegionLayoutError> {
139 if let Ok(TrimmedRegion { trimmed, .. }) =
140 inexact_reservation.trim(self.reservation_layout().footprint())
141 {
142 Ok(unsafe { self.initialize_exact_reservation_region(&trimmed).unwrap() })
143 } else {
144 Err(RegionLayoutError::new())
145 }
146 }
147}
148
149#[derive(Debug, Copy, Clone, PartialEq, Eq)]
150pub struct RegionLayoutError(());
151
152impl RegionLayoutError {
153 fn new() -> Self {
154 Self(())
155 }
156}
157
158#[derive(Debug, Copy, Clone, PartialEq, Eq)]
159pub struct TlsReservationLayout {
160 footprint: Layout,
161 segment_offset: usize,
162 thread_pointer_offset: usize,
163}
164
165impl TlsReservationLayout {
166 fn from_segment_layout(segment_layout: Layout) -> Self {
167 if cfg!(any(target_arch = "arm", target_arch = "aarch64")) {
168 let tcb_size = 2 * mem::size_of::<usize>();
169 let tcb_layout = Layout::from_size_align(tcb_size, tcb_size).unwrap();
170 let (footprint, segment_offset) = tcb_layout.extend(segment_layout).unwrap();
171 Self {
172 footprint,
173 segment_offset,
174 thread_pointer_offset: 0,
175 }
176 } else if cfg!(any(target_arch = "riscv32", target_arch = "riscv64")) {
177 Self {
178 footprint: segment_layout,
179 segment_offset: 0,
180 thread_pointer_offset: 0,
181 }
182 } else if cfg!(target_arch = "x86_64") {
183 let tcb_layout =
184 Layout::from_size_align(2 * mem::size_of::<usize>(), mem::size_of::<usize>())
185 .unwrap(); let (footprint, thread_pointer_offset) = segment_layout.extend(tcb_layout).unwrap();
187 Self {
188 footprint,
189 segment_offset: 0,
190 thread_pointer_offset,
191 }
192 } else {
193 unreachable!();
194 }
195 }
196
197 pub fn footprint(&self) -> Layout {
198 self.footprint
199 }
200
201 pub fn segment_offset(&self) -> usize {
202 self.segment_offset
203 }
204
205 pub fn thread_pointer_offset(&self) -> usize {
206 self.thread_pointer_offset
207 }
208}
209
210#[derive(Debug, Copy, Clone, PartialEq, Eq)]
211pub struct Region {
212 start: *mut u8,
213 size: usize,
214}
215
216impl Region {
217 pub const fn new(start: *mut u8, size: usize) -> Self {
218 Self { start, size }
219 }
220
221 pub const fn start(&self) -> *mut u8 {
222 self.start
223 }
224
225 pub const fn size(&self) -> usize {
226 self.size
227 }
228
229 fn fits_exactly(&self, layout: Layout) -> bool {
230 self.size() == layout.size() && self.start().align_offset(layout.align()) == 0
231 }
232
233 fn trim(&self, layout: Layout) -> Result<TrimmedRegion, TrimRegionError> {
234 let start_addr = self.start() as usize;
235 let trimmed_start_addr = start_addr
236 .checked_next_multiple_of(layout.align())
237 .ok_or(TrimRegionError::new())?;
238 let remainder_start_addr = trimmed_start_addr
239 .checked_add(layout.size())
240 .ok_or(TrimRegionError::new())?;
241 let remainder_end_addr = start_addr
242 .checked_add(self.size())
243 .ok_or(TrimRegionError::new())?;
244 if remainder_start_addr > remainder_end_addr {
245 return Err(TrimRegionError::new());
246 }
247 Ok(TrimmedRegion {
248 padding: Region::new(start_addr as *mut u8, trimmed_start_addr - start_addr),
249 trimmed: Region::new(
250 trimmed_start_addr as *mut u8,
251 remainder_start_addr - trimmed_start_addr,
252 ),
253 remainder: Region::new(
254 remainder_start_addr as *mut u8,
255 remainder_end_addr - remainder_start_addr,
256 ),
257 })
258 }
259}
260
261struct TrimmedRegion {
262 #[allow(dead_code)]
263 padding: Region,
264 trimmed: Region,
265 #[allow(dead_code)]
266 remainder: Region,
267}
268
269#[derive(Debug, Copy, Clone, PartialEq, Eq)]
270struct TrimRegionError(());
271
272impl TrimRegionError {
273 fn new() -> Self {
274 Self(())
275 }
276}