virtio_drivers/transport/
x86_64.rs1mod cam;
4mod hypercalls;
5
6use super::{
7 pci::{
8 bus::{ConfigurationAccess, DeviceFunction, PciRoot, PCI_CAP_ID_VNDR},
9 device_type, CommonCfg, VirtioCapabilityInfo, VirtioPciError, CAP_BAR_OFFSET,
10 CAP_BAR_OFFSET_OFFSET, CAP_LENGTH_OFFSET, CAP_NOTIFY_OFF_MULTIPLIER_OFFSET,
11 VIRTIO_PCI_CAP_COMMON_CFG, VIRTIO_PCI_CAP_DEVICE_CFG, VIRTIO_PCI_CAP_ISR_CFG,
12 VIRTIO_PCI_CAP_NOTIFY_CFG, VIRTIO_VENDOR_ID,
13 },
14 DeviceStatus, DeviceType, Transport,
15};
16use crate::{hal::PhysAddr, transport::InterruptStatus, Error};
17pub use cam::HypCam;
18use hypercalls::HypIoRegion;
19use zerocopy::{FromBytes, Immutable, IntoBytes};
20
21macro_rules! configread {
22 ($common_cfg:expr, $field:ident) => {
23 $common_cfg.read(core::mem::offset_of!(CommonCfg, $field))
24 };
25}
26
27macro_rules! configwrite {
28 ($common_cfg:expr, $field:ident, $value:expr) => {
29 $common_cfg.write(core::mem::offset_of!(CommonCfg, $field), $value)
30 };
31}
32
33#[derive(Debug)]
35pub struct HypPciTransport {
36 device_type: DeviceType,
37 device_function: DeviceFunction,
39 common_cfg: HypIoRegion,
41 notify_region: HypIoRegion,
43 notify_off_multiplier: u32,
44 isr_status: HypIoRegion,
46 config_space: Option<HypIoRegion>,
48}
49
50impl HypPciTransport {
51 pub fn new<C: ConfigurationAccess>(
54 root: &mut PciRoot<C>,
55 device_function: DeviceFunction,
56 ) -> Result<Self, VirtioPciError> {
57 let device_vendor = root.configuration_access.read_word(device_function, 0);
58 let device_id = (device_vendor >> 16) as u16;
59 let vendor_id = device_vendor as u16;
60 if vendor_id != VIRTIO_VENDOR_ID {
61 return Err(VirtioPciError::InvalidVendorId(vendor_id));
62 }
63 let device_type =
64 device_type(device_id).ok_or(VirtioPciError::InvalidDeviceId(device_id))?;
65
66 let mut common_cfg = None;
68 let mut notify_cfg = None;
69 let mut notify_off_multiplier = 0;
70 let mut isr_cfg = None;
71 let mut device_cfg = None;
72 for capability in root.capabilities(device_function) {
73 if capability.id != PCI_CAP_ID_VNDR {
74 continue;
75 }
76 let cap_len = capability.private_header as u8;
77 let cfg_type = (capability.private_header >> 8) as u8;
78 if cap_len < 16 {
79 continue;
80 }
81 let struct_info = VirtioCapabilityInfo {
82 bar: root
83 .configuration_access
84 .read_word(device_function, capability.offset + CAP_BAR_OFFSET)
85 as u8,
86 offset: root
87 .configuration_access
88 .read_word(device_function, capability.offset + CAP_BAR_OFFSET_OFFSET),
89 length: root
90 .configuration_access
91 .read_word(device_function, capability.offset + CAP_LENGTH_OFFSET),
92 };
93
94 match cfg_type {
95 VIRTIO_PCI_CAP_COMMON_CFG if common_cfg.is_none() => {
96 common_cfg = Some(struct_info);
97 }
98 VIRTIO_PCI_CAP_NOTIFY_CFG if cap_len >= 20 && notify_cfg.is_none() => {
99 notify_cfg = Some(struct_info);
100 notify_off_multiplier = root.configuration_access.read_word(
101 device_function,
102 capability.offset + CAP_NOTIFY_OFF_MULTIPLIER_OFFSET,
103 );
104 }
105 VIRTIO_PCI_CAP_ISR_CFG if isr_cfg.is_none() => {
106 isr_cfg = Some(struct_info);
107 }
108 VIRTIO_PCI_CAP_DEVICE_CFG if device_cfg.is_none() => {
109 device_cfg = Some(struct_info);
110 }
111 _ => {}
112 }
113 }
114
115 let common_cfg = get_bar_region::<CommonCfg, _>(
116 root,
117 device_function,
118 &common_cfg.ok_or(VirtioPciError::MissingCommonConfig)?,
119 )?;
120
121 let notify_cfg = notify_cfg.ok_or(VirtioPciError::MissingNotifyConfig)?;
122 if notify_off_multiplier % 2 != 0 {
123 return Err(VirtioPciError::InvalidNotifyOffMultiplier(
124 notify_off_multiplier,
125 ));
126 }
127 let notify_region = get_bar_region::<u16, _>(root, device_function, ¬ify_cfg)?;
128
129 let isr_status = get_bar_region::<u8, _>(
130 root,
131 device_function,
132 &isr_cfg.ok_or(VirtioPciError::MissingIsrConfig)?,
133 )?;
134
135 let config_space = if let Some(device_cfg) = device_cfg {
136 Some(get_bar_region::<u32, _>(
137 root,
138 device_function,
139 &device_cfg,
140 )?)
141 } else {
142 None
143 };
144
145 Ok(Self {
146 device_type,
147 device_function,
148 common_cfg,
149 notify_region,
150 notify_off_multiplier,
151 isr_status,
152 config_space,
153 })
154 }
155}
156
157impl Transport for HypPciTransport {
158 fn device_type(&self) -> DeviceType {
159 self.device_type
160 }
161
162 fn read_device_features(&mut self) -> u64 {
163 configwrite!(self.common_cfg, device_feature_select, 0u32);
164 let device_features_low: u32 = configread!(self.common_cfg, device_feature);
165 configwrite!(self.common_cfg, device_feature_select, 1u32);
166 let device_features_high: u32 = configread!(self.common_cfg, device_feature);
167 ((device_features_high as u64) << 32) | (device_features_low as u64)
168 }
169
170 fn write_driver_features(&mut self, driver_features: u64) {
171 configwrite!(self.common_cfg, driver_feature_select, 0u32);
172 configwrite!(self.common_cfg, driver_feature, driver_features as u32);
173 configwrite!(self.common_cfg, driver_feature_select, 1u32);
174 configwrite!(
175 self.common_cfg,
176 driver_feature,
177 (driver_features >> 32) as u32
178 );
179 }
180
181 fn max_queue_size(&mut self, queue: u16) -> u32 {
182 configwrite!(self.common_cfg, queue_select, queue);
183 let queue_size: u16 = configread!(self.common_cfg, queue_size);
184 queue_size.into()
185 }
186
187 fn notify(&mut self, queue: u16) {
188 configwrite!(self.common_cfg, queue_select, queue);
189 let queue_notify_off: u16 = configread!(self.common_cfg, queue_notify_off);
191
192 let offset_bytes = usize::from(queue_notify_off) * self.notify_off_multiplier as usize;
193 self.notify_region.write(offset_bytes, queue);
194 }
195
196 fn get_status(&self) -> DeviceStatus {
197 let status: u8 = configread!(self.common_cfg, device_status);
198 DeviceStatus::from_bits_truncate(status.into())
199 }
200
201 fn set_status(&mut self, status: DeviceStatus) {
202 configwrite!(self.common_cfg, device_status, status.bits() as u8);
203 }
204
205 fn set_guest_page_size(&mut self, _guest_page_size: u32) {
206 }
208
209 fn requires_legacy_layout(&self) -> bool {
210 false
211 }
212
213 fn queue_set(
214 &mut self,
215 queue: u16,
216 size: u32,
217 descriptors: PhysAddr,
218 driver_area: PhysAddr,
219 device_area: PhysAddr,
220 ) {
221 configwrite!(self.common_cfg, queue_select, queue);
222 configwrite!(self.common_cfg, queue_size, size as u16);
223 configwrite!(self.common_cfg, queue_desc, descriptors);
224 configwrite!(self.common_cfg, queue_driver, driver_area);
225 configwrite!(self.common_cfg, queue_device, device_area);
226 configwrite!(self.common_cfg, queue_enable, 1u16);
227 }
228
229 fn queue_unset(&mut self, _queue: u16) {
230 }
233
234 fn queue_used(&mut self, queue: u16) -> bool {
235 configwrite!(self.common_cfg, queue_select, queue);
236 let queue_enable: u16 = configread!(self.common_cfg, queue_enable);
237 queue_enable == 1
238 }
239
240 fn ack_interrupt(&mut self) -> InterruptStatus {
241 let isr_status: u8 = self.isr_status.read(0);
243 InterruptStatus::from_bits_truncate(isr_status.into())
244 }
245
246 fn read_config_generation(&self) -> u32 {
247 configread!(self.common_cfg, config_generation)
248 }
249
250 fn read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error> {
251 assert!(align_of::<T>() <= 4,
252 "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
253 align_of::<T>());
254 assert_eq!(offset % align_of::<T>(), 0);
255
256 let config_space = self.config_space.ok_or(Error::ConfigSpaceMissing)?;
257 if config_space.size < offset + size_of::<T>() {
258 Err(Error::ConfigSpaceTooSmall)
259 } else {
260 Ok(config_space.read(offset))
261 }
262 }
263
264 fn write_config_space<T: IntoBytes + Immutable>(
265 &mut self,
266 offset: usize,
267 value: T,
268 ) -> Result<(), Error> {
269 assert!(align_of::<T>() <= 4,
270 "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
271 align_of::<T>());
272 assert_eq!(offset % align_of::<T>(), 0);
273
274 let config_space = self.config_space.ok_or(Error::ConfigSpaceMissing)?;
275 if config_space.size < offset + size_of::<T>() {
276 Err(Error::ConfigSpaceTooSmall)
277 } else {
278 config_space.write(offset, value);
279 Ok(())
280 }
281 }
282}
283
284fn get_bar_region<T, C: ConfigurationAccess>(
285 root: &mut PciRoot<C>,
286 device_function: DeviceFunction,
287 struct_info: &VirtioCapabilityInfo,
288) -> Result<HypIoRegion, VirtioPciError> {
289 let bar_info = root
290 .bar_info(device_function, struct_info.bar)?
291 .ok_or(VirtioPciError::BarNotAllocated(struct_info.bar))?;
292 let (bar_address, bar_size) = bar_info
293 .memory_address_size()
294 .ok_or(VirtioPciError::UnexpectedIoBar)?;
295 if bar_address == 0 {
296 return Err(VirtioPciError::BarNotAllocated(struct_info.bar));
297 }
298 if u64::from(struct_info.offset + struct_info.length) > bar_size
299 || size_of::<T>() > struct_info.length as usize
300 {
301 return Err(VirtioPciError::BarOffsetOutOfRange);
302 }
303 let paddr = bar_address as PhysAddr + struct_info.offset as PhysAddr;
304 if paddr % (align_of::<T>() as u64) != 0 {
305 return Err(VirtioPciError::Misaligned {
306 address: paddr as usize,
307 alignment: align_of::<T>(),
308 });
309 }
310 Ok(HypIoRegion {
311 paddr,
312 size: struct_info.length as usize,
313 })
314}