sel4_virtio_hal_impl/
lib.rs1#![no_std]
8
9use core::alloc::Layout;
10use core::ptr::{self, NonNull};
11
12use one_shot_mutex::sync::OneShotMutex;
13use virtio_drivers::{BufferDirection, Hal, PAGE_SIZE, PhysAddr};
14
15use sel4_abstract_allocator::basic::BasicAllocator;
16use sel4_abstract_allocator::{ByRange, WithAlignmentBound};
17use sel4_immediate_sync_once_cell::ImmediateSyncOnceCell;
18use sel4_shared_memory::SharedMemoryRef;
19
20static GLOBAL_STATE: ImmediateSyncOnceCell<OneShotMutex<State>> = ImmediateSyncOnceCell::new();
21
22struct State {
23 dma_region: SharedMemoryRef<'static, [u8]>,
24 dma_region_paddr: usize,
25 bounce_buffer_allocator: ByRange<WithAlignmentBound<BasicAllocator>>,
26}
27
28impl State {
29 fn offset_to_paddr(&self, offset: usize) -> PhysAddr {
30 self.dma_region_paddr
31 .checked_add(offset)
32 .unwrap()
33 .try_into()
34 .unwrap()
35 }
36
37 fn paddr_to_offset(&self, paddr: PhysAddr) -> usize {
38 usize::try_from(paddr)
39 .unwrap()
40 .checked_sub(self.dma_region_paddr)
41 .unwrap()
42 }
43}
44
45pub struct HalImpl;
46
47impl HalImpl {
48 pub fn init(dma_region_size: usize, dma_region_vaddr: usize, dma_region_paddr: usize) {
49 let dma_region_ptr = NonNull::new(ptr::slice_from_raw_parts_mut(
50 dma_region_vaddr as *mut _,
51 dma_region_size,
52 ))
53 .unwrap();
54
55 let dma_region = unsafe { SharedMemoryRef::new(dma_region_ptr) };
56
57 let max_alignment = 1
58 << dma_region_vaddr
59 .trailing_zeros()
60 .min(dma_region_paddr.trailing_zeros());
61
62 let bounce_buffer_allocator = ByRange::new(WithAlignmentBound::new(
63 BasicAllocator::new(dma_region_size),
64 max_alignment,
65 ));
66
67 GLOBAL_STATE
68 .set(OneShotMutex::new(State {
69 dma_region,
70 dma_region_paddr,
71 bounce_buffer_allocator,
72 }))
73 .ok()
74 .unwrap();
75 }
76}
77
78unsafe impl Hal for HalImpl {
79 fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
80 let mut state = GLOBAL_STATE.get().unwrap().lock();
81 assert!(pages > 0);
82 let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
83 let bounce_buffer_range = state.bounce_buffer_allocator.allocate(layout).unwrap();
84 let bounce_buffer_ptr = state
85 .dma_region
86 .as_mut_ptr()
87 .index(bounce_buffer_range.clone());
88 bounce_buffer_ptr.fill(0);
89 let vaddr = bounce_buffer_ptr.as_raw_ptr().cast::<u8>();
90 let paddr = state.offset_to_paddr(bounce_buffer_range.start);
91 (paddr, vaddr)
92 }
93
94 unsafe fn dma_dealloc(paddr: PhysAddr, _vaddr: NonNull<u8>, pages: usize) -> i32 {
95 let mut state = GLOBAL_STATE.get().unwrap().lock();
96 let bounce_buffer_range = {
97 let start = state.paddr_to_offset(paddr);
98 let size = pages * PAGE_SIZE;
99 start..(start + size)
100 };
101 state
102 .bounce_buffer_allocator
103 .deallocate(bounce_buffer_range);
104 0
105 }
106
107 unsafe fn mmio_phys_to_virt(_paddr: PhysAddr, _size: usize) -> NonNull<u8> {
108 panic!()
109 }
110
111 unsafe fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
112 let mut state = GLOBAL_STATE.get().unwrap().lock();
113 assert!(!buffer.is_empty());
114 let layout = Layout::from_size_align(buffer.len(), 1).unwrap();
115 let bounce_buffer_range = state.bounce_buffer_allocator.allocate(layout).unwrap();
116 let buffer_slice = unsafe { buffer.as_ref() };
117 state
118 .dma_region
119 .as_mut_ptr()
120 .index(bounce_buffer_range.clone())
121 .copy_from_slice(buffer_slice);
122 state.offset_to_paddr(bounce_buffer_range.start)
123 }
124
125 unsafe fn unshare(paddr: PhysAddr, mut buffer: NonNull<[u8]>, direction: BufferDirection) {
126 let mut state = GLOBAL_STATE.get().unwrap().lock();
127 let bounce_buffer_range = {
128 let start = state.paddr_to_offset(paddr);
129 start..(start + buffer.len())
130 };
131 if direction != BufferDirection::DriverToDevice {
132 let buffer_slice = unsafe { buffer.as_mut() };
133 state
134 .dma_region
135 .as_mut_ptr()
136 .index(bounce_buffer_range.clone())
137 .copy_into_slice(buffer_slice);
138 }
139 state
140 .bounce_buffer_allocator
141 .deallocate(bounce_buffer_range);
142 }
143}