sel4_virtio_hal_impl/
lib.rs
1#![no_std]
8
9use core::alloc::Layout;
10use core::ptr::{self, NonNull};
11
12use one_shot_mutex::sync::OneShotMutex;
13use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
14
15use sel4_abstract_allocator::basic::BasicAllocator;
16use sel4_abstract_allocator::{ByRange, WithAlignmentBound};
17use sel4_immediate_sync_once_cell::ImmediateSyncOnceCell;
18use sel4_shared_memory::SharedMemoryRef;
19
20static GLOBAL_STATE: ImmediateSyncOnceCell<OneShotMutex<State>> = ImmediateSyncOnceCell::new();
21
22struct State {
23 dma_region: SharedMemoryRef<'static, [u8]>,
24 dma_region_paddr: usize,
25 bounce_buffer_allocator: ByRange<WithAlignmentBound<BasicAllocator>>,
26}
27
28impl State {
29 fn offset_to_paddr(&self, offset: usize) -> PhysAddr {
30 self.dma_region_paddr.checked_add(offset).unwrap()
31 }
32
33 fn paddr_to_offset(&self, paddr: PhysAddr) -> usize {
34 paddr.checked_sub(self.dma_region_paddr).unwrap()
35 }
36}
37
38pub struct HalImpl;
39
40impl HalImpl {
41 pub fn init(dma_region_size: usize, dma_region_vaddr: usize, dma_region_paddr: usize) {
42 let dma_region_ptr = NonNull::new(ptr::slice_from_raw_parts_mut(
43 dma_region_vaddr as *mut _,
44 dma_region_size,
45 ))
46 .unwrap();
47
48 let dma_region = unsafe { SharedMemoryRef::new(dma_region_ptr) };
49
50 let max_alignment = 1
51 << dma_region_vaddr
52 .trailing_zeros()
53 .min(dma_region_paddr.trailing_zeros());
54
55 let bounce_buffer_allocator = ByRange::new(WithAlignmentBound::new(
56 BasicAllocator::new(dma_region_size),
57 max_alignment,
58 ));
59
60 GLOBAL_STATE
61 .set(OneShotMutex::new(State {
62 dma_region,
63 dma_region_paddr,
64 bounce_buffer_allocator,
65 }))
66 .ok()
67 .unwrap();
68 }
69}
70
71unsafe impl Hal for HalImpl {
72 fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
73 let mut state = GLOBAL_STATE.get().unwrap().lock();
74 assert!(pages > 0);
75 let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
76 let bounce_buffer_range = state.bounce_buffer_allocator.allocate(layout).unwrap();
77 let bounce_buffer_ptr = state
78 .dma_region
79 .as_mut_ptr()
80 .index(bounce_buffer_range.clone());
81 bounce_buffer_ptr.fill(0);
82 let vaddr = bounce_buffer_ptr.as_raw_ptr().cast::<u8>();
83 let paddr = state.offset_to_paddr(bounce_buffer_range.start);
84 (paddr, vaddr)
85 }
86
87 unsafe fn dma_dealloc(paddr: PhysAddr, _vaddr: NonNull<u8>, pages: usize) -> i32 {
88 let mut state = GLOBAL_STATE.get().unwrap().lock();
89 let bounce_buffer_range = {
90 let start = state.paddr_to_offset(paddr);
91 let size = pages * PAGE_SIZE;
92 start..(start + size)
93 };
94 state
95 .bounce_buffer_allocator
96 .deallocate(bounce_buffer_range);
97 0
98 }
99
100 unsafe fn mmio_phys_to_virt(_paddr: PhysAddr, _size: usize) -> NonNull<u8> {
101 panic!()
102 }
103
104 unsafe fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
105 let mut state = GLOBAL_STATE.get().unwrap().lock();
106 assert!(!buffer.is_empty());
107 let layout = Layout::from_size_align(buffer.len(), 1).unwrap();
108 let bounce_buffer_range = state.bounce_buffer_allocator.allocate(layout).unwrap();
109 state
110 .dma_region
111 .as_mut_ptr()
112 .index(bounce_buffer_range.clone())
113 .copy_from_slice(buffer.as_ref());
114 state.offset_to_paddr(bounce_buffer_range.start)
115 }
116
117 unsafe fn unshare(paddr: PhysAddr, mut buffer: NonNull<[u8]>, direction: BufferDirection) {
118 let mut state = GLOBAL_STATE.get().unwrap().lock();
119 let bounce_buffer_range = {
120 let start = state.paddr_to_offset(paddr);
121 start..(start + buffer.len())
122 };
123 if direction != BufferDirection::DriverToDevice {
124 state
125 .dma_region
126 .as_mut_ptr()
127 .index(bounce_buffer_range.clone())
128 .copy_into_slice(buffer.as_mut());
129 }
130 state
131 .bounce_buffer_allocator
132 .deallocate(bounce_buffer_range);
133 }
134}