1#[repr(transparent)]
2#[derive(Clone, Eq, PartialEq)]
3pub struct seL4_Fault_NullFault(pub SeL4Bitfield<u64, 20usize>);
4impl seL4_Fault_NullFault {
5 pub fn new() -> Self {
6 let mut this = Self(Bitfield::zeroed());
7 this.set_seL4_FaultType(seL4_Fault_tag::seL4_Fault_NullFault);
8 this
9 }
10 pub fn unpack(&self) -> seL4_Fault_NullFault_Unpacked {
11 seL4_Fault_NullFault_Unpacked {}
12 }
13 #[allow(dead_code)]
14 fn get_seL4_FaultType(&self) -> u64 {
15 self.0.get_bits(0usize..4usize)
16 }
17 fn set_seL4_FaultType(&mut self, seL4_FaultType: u64) {
18 self.0.set_bits(0usize..4usize, seL4_FaultType)
19 }
20 #[allow(dead_code)]
21 const fn width_of_seL4_FaultType() -> usize {
22 4usize - 0usize
23 }
24}
25impl fmt::Debug for seL4_Fault_NullFault {
26 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
27 self.unpack().fmt(f)?;
28 write!(f, ".pack()")?;
29 Ok(())
30 }
31}
32#[derive(Debug, Clone, PartialEq, Eq)]
33pub struct seL4_Fault_NullFault_Unpacked {}
34impl seL4_Fault_NullFault_Unpacked {
35 pub fn pack(self) -> seL4_Fault_NullFault {
36 match self {
37 Self {} => seL4_Fault_NullFault::new(),
38 }
39 }
40}
41#[repr(transparent)]
42#[derive(Clone, Eq, PartialEq)]
43pub struct seL4_Fault_CapFault(pub SeL4Bitfield<u64, 20usize>);
44impl seL4_Fault_CapFault {
45 pub fn new(
46 IP: u64,
47 Addr: u64,
48 InRecvPhase: u64,
49 LookupFailureType: u64,
50 MR4: u64,
51 MR5: u64,
52 MR6: u64,
53 ) -> Self {
54 let mut this = Self(Bitfield::zeroed());
55 this.set_IP(IP);
56 this.set_Addr(Addr);
57 this.set_InRecvPhase(InRecvPhase);
58 this.set_LookupFailureType(LookupFailureType);
59 this.set_MR4(MR4);
60 this.set_MR5(MR5);
61 this.set_MR6(MR6);
62 this.set_seL4_FaultType(seL4_Fault_tag::seL4_Fault_CapFault);
63 this
64 }
65 pub fn unpack(&self) -> seL4_Fault_CapFault_Unpacked {
66 seL4_Fault_CapFault_Unpacked {
67 IP: self.get_IP(),
68 Addr: self.get_Addr(),
69 InRecvPhase: self.get_InRecvPhase(),
70 LookupFailureType: self.get_LookupFailureType(),
71 MR4: self.get_MR4(),
72 MR5: self.get_MR5(),
73 MR6: self.get_MR6(),
74 }
75 }
76 #[allow(dead_code)]
77 pub fn get_IP(&self) -> u64 {
78 self.0.get_bits(448usize..512usize)
79 }
80 pub fn set_IP(&mut self, IP: u64) {
81 self.0.set_bits(448usize..512usize, IP)
82 }
83 #[allow(dead_code)]
84 pub const fn width_of_IP() -> usize {
85 512usize - 448usize
86 }
87 #[allow(dead_code)]
88 pub fn get_Addr(&self) -> u64 {
89 self.0.get_bits(384usize..448usize)
90 }
91 pub fn set_Addr(&mut self, Addr: u64) {
92 self.0.set_bits(384usize..448usize, Addr)
93 }
94 #[allow(dead_code)]
95 pub const fn width_of_Addr() -> usize {
96 448usize - 384usize
97 }
98 #[allow(dead_code)]
99 pub fn get_InRecvPhase(&self) -> u64 {
100 self.0.get_bits(320usize..384usize)
101 }
102 pub fn set_InRecvPhase(&mut self, InRecvPhase: u64) {
103 self.0.set_bits(320usize..384usize, InRecvPhase)
104 }
105 #[allow(dead_code)]
106 pub const fn width_of_InRecvPhase() -> usize {
107 384usize - 320usize
108 }
109 #[allow(dead_code)]
110 pub fn get_LookupFailureType(&self) -> u64 {
111 self.0.get_bits(256usize..320usize)
112 }
113 pub fn set_LookupFailureType(&mut self, LookupFailureType: u64) {
114 self.0.set_bits(256usize..320usize, LookupFailureType)
115 }
116 #[allow(dead_code)]
117 pub const fn width_of_LookupFailureType() -> usize {
118 320usize - 256usize
119 }
120 #[allow(dead_code)]
121 pub fn get_MR4(&self) -> u64 {
122 self.0.get_bits(192usize..256usize)
123 }
124 pub fn set_MR4(&mut self, MR4: u64) {
125 self.0.set_bits(192usize..256usize, MR4)
126 }
127 #[allow(dead_code)]
128 pub const fn width_of_MR4() -> usize {
129 256usize - 192usize
130 }
131 #[allow(dead_code)]
132 pub fn get_MR5(&self) -> u64 {
133 self.0.get_bits(128usize..192usize)
134 }
135 pub fn set_MR5(&mut self, MR5: u64) {
136 self.0.set_bits(128usize..192usize, MR5)
137 }
138 #[allow(dead_code)]
139 pub const fn width_of_MR5() -> usize {
140 192usize - 128usize
141 }
142 #[allow(dead_code)]
143 pub fn get_MR6(&self) -> u64 {
144 self.0.get_bits(64usize..128usize)
145 }
146 pub fn set_MR6(&mut self, MR6: u64) {
147 self.0.set_bits(64usize..128usize, MR6)
148 }
149 #[allow(dead_code)]
150 pub const fn width_of_MR6() -> usize {
151 128usize - 64usize
152 }
153 #[allow(dead_code)]
154 fn get_seL4_FaultType(&self) -> u64 {
155 self.0.get_bits(0usize..4usize)
156 }
157 fn set_seL4_FaultType(&mut self, seL4_FaultType: u64) {
158 self.0.set_bits(0usize..4usize, seL4_FaultType)
159 }
160 #[allow(dead_code)]
161 const fn width_of_seL4_FaultType() -> usize {
162 4usize - 0usize
163 }
164}
165impl fmt::Debug for seL4_Fault_CapFault {
166 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
167 self.unpack().fmt(f)?;
168 write!(f, ".pack()")?;
169 Ok(())
170 }
171}
172#[derive(Debug, Clone, PartialEq, Eq)]
173pub struct seL4_Fault_CapFault_Unpacked {
174 pub IP: u64,
175 pub Addr: u64,
176 pub InRecvPhase: u64,
177 pub LookupFailureType: u64,
178 pub MR4: u64,
179 pub MR5: u64,
180 pub MR6: u64,
181}
182impl seL4_Fault_CapFault_Unpacked {
183 pub fn pack(self) -> seL4_Fault_CapFault {
184 match self {
185 Self { IP, Addr, InRecvPhase, LookupFailureType, MR4, MR5, MR6 } => {
186 seL4_Fault_CapFault::new(
187 IP,
188 Addr,
189 InRecvPhase,
190 LookupFailureType,
191 MR4,
192 MR5,
193 MR6,
194 )
195 }
196 }
197 }
198}
199#[repr(transparent)]
200#[derive(Clone, Eq, PartialEq)]
201pub struct seL4_Fault_UnknownSyscall(pub SeL4Bitfield<u64, 20usize>);
202impl seL4_Fault_UnknownSyscall {
203 pub fn new(
204 RAX: u64,
205 RBX: u64,
206 RCX: u64,
207 RDX: u64,
208 RSI: u64,
209 RDI: u64,
210 RBP: u64,
211 R8: u64,
212 R9: u64,
213 R10: u64,
214 R11: u64,
215 R12: u64,
216 R13: u64,
217 R14: u64,
218 R15: u64,
219 FaultIP: u64,
220 RSP: u64,
221 FLAGS: u64,
222 Syscall: u64,
223 ) -> Self {
224 let mut this = Self(Bitfield::zeroed());
225 this.set_RAX(RAX);
226 this.set_RBX(RBX);
227 this.set_RCX(RCX);
228 this.set_RDX(RDX);
229 this.set_RSI(RSI);
230 this.set_RDI(RDI);
231 this.set_RBP(RBP);
232 this.set_R8(R8);
233 this.set_R9(R9);
234 this.set_R10(R10);
235 this.set_R11(R11);
236 this.set_R12(R12);
237 this.set_R13(R13);
238 this.set_R14(R14);
239 this.set_R15(R15);
240 this.set_FaultIP(FaultIP);
241 this.set_RSP(RSP);
242 this.set_FLAGS(FLAGS);
243 this.set_Syscall(Syscall);
244 this.set_seL4_FaultType(seL4_Fault_tag::seL4_Fault_UnknownSyscall);
245 this
246 }
247 pub fn unpack(&self) -> seL4_Fault_UnknownSyscall_Unpacked {
248 seL4_Fault_UnknownSyscall_Unpacked {
249 RAX: self.get_RAX(),
250 RBX: self.get_RBX(),
251 RCX: self.get_RCX(),
252 RDX: self.get_RDX(),
253 RSI: self.get_RSI(),
254 RDI: self.get_RDI(),
255 RBP: self.get_RBP(),
256 R8: self.get_R8(),
257 R9: self.get_R9(),
258 R10: self.get_R10(),
259 R11: self.get_R11(),
260 R12: self.get_R12(),
261 R13: self.get_R13(),
262 R14: self.get_R14(),
263 R15: self.get_R15(),
264 FaultIP: self.get_FaultIP(),
265 RSP: self.get_RSP(),
266 FLAGS: self.get_FLAGS(),
267 Syscall: self.get_Syscall(),
268 }
269 }
270 #[allow(dead_code)]
271 pub fn get_RAX(&self) -> u64 {
272 self.0.get_bits(1216usize..1280usize)
273 }
274 pub fn set_RAX(&mut self, RAX: u64) {
275 self.0.set_bits(1216usize..1280usize, RAX)
276 }
277 #[allow(dead_code)]
278 pub const fn width_of_RAX() -> usize {
279 1280usize - 1216usize
280 }
281 #[allow(dead_code)]
282 pub fn get_RBX(&self) -> u64 {
283 self.0.get_bits(1152usize..1216usize)
284 }
285 pub fn set_RBX(&mut self, RBX: u64) {
286 self.0.set_bits(1152usize..1216usize, RBX)
287 }
288 #[allow(dead_code)]
289 pub const fn width_of_RBX() -> usize {
290 1216usize - 1152usize
291 }
292 #[allow(dead_code)]
293 pub fn get_RCX(&self) -> u64 {
294 self.0.get_bits(1088usize..1152usize)
295 }
296 pub fn set_RCX(&mut self, RCX: u64) {
297 self.0.set_bits(1088usize..1152usize, RCX)
298 }
299 #[allow(dead_code)]
300 pub const fn width_of_RCX() -> usize {
301 1152usize - 1088usize
302 }
303 #[allow(dead_code)]
304 pub fn get_RDX(&self) -> u64 {
305 self.0.get_bits(1024usize..1088usize)
306 }
307 pub fn set_RDX(&mut self, RDX: u64) {
308 self.0.set_bits(1024usize..1088usize, RDX)
309 }
310 #[allow(dead_code)]
311 pub const fn width_of_RDX() -> usize {
312 1088usize - 1024usize
313 }
314 #[allow(dead_code)]
315 pub fn get_RSI(&self) -> u64 {
316 self.0.get_bits(960usize..1024usize)
317 }
318 pub fn set_RSI(&mut self, RSI: u64) {
319 self.0.set_bits(960usize..1024usize, RSI)
320 }
321 #[allow(dead_code)]
322 pub const fn width_of_RSI() -> usize {
323 1024usize - 960usize
324 }
325 #[allow(dead_code)]
326 pub fn get_RDI(&self) -> u64 {
327 self.0.get_bits(896usize..960usize)
328 }
329 pub fn set_RDI(&mut self, RDI: u64) {
330 self.0.set_bits(896usize..960usize, RDI)
331 }
332 #[allow(dead_code)]
333 pub const fn width_of_RDI() -> usize {
334 960usize - 896usize
335 }
336 #[allow(dead_code)]
337 pub fn get_RBP(&self) -> u64 {
338 self.0.get_bits(832usize..896usize)
339 }
340 pub fn set_RBP(&mut self, RBP: u64) {
341 self.0.set_bits(832usize..896usize, RBP)
342 }
343 #[allow(dead_code)]
344 pub const fn width_of_RBP() -> usize {
345 896usize - 832usize
346 }
347 #[allow(dead_code)]
348 pub fn get_R8(&self) -> u64 {
349 self.0.get_bits(768usize..832usize)
350 }
351 pub fn set_R8(&mut self, R8: u64) {
352 self.0.set_bits(768usize..832usize, R8)
353 }
354 #[allow(dead_code)]
355 pub const fn width_of_R8() -> usize {
356 832usize - 768usize
357 }
358 #[allow(dead_code)]
359 pub fn get_R9(&self) -> u64 {
360 self.0.get_bits(704usize..768usize)
361 }
362 pub fn set_R9(&mut self, R9: u64) {
363 self.0.set_bits(704usize..768usize, R9)
364 }
365 #[allow(dead_code)]
366 pub const fn width_of_R9() -> usize {
367 768usize - 704usize
368 }
369 #[allow(dead_code)]
370 pub fn get_R10(&self) -> u64 {
371 self.0.get_bits(640usize..704usize)
372 }
373 pub fn set_R10(&mut self, R10: u64) {
374 self.0.set_bits(640usize..704usize, R10)
375 }
376 #[allow(dead_code)]
377 pub const fn width_of_R10() -> usize {
378 704usize - 640usize
379 }
380 #[allow(dead_code)]
381 pub fn get_R11(&self) -> u64 {
382 self.0.get_bits(576usize..640usize)
383 }
384 pub fn set_R11(&mut self, R11: u64) {
385 self.0.set_bits(576usize..640usize, R11)
386 }
387 #[allow(dead_code)]
388 pub const fn width_of_R11() -> usize {
389 640usize - 576usize
390 }
391 #[allow(dead_code)]
392 pub fn get_R12(&self) -> u64 {
393 self.0.get_bits(512usize..576usize)
394 }
395 pub fn set_R12(&mut self, R12: u64) {
396 self.0.set_bits(512usize..576usize, R12)
397 }
398 #[allow(dead_code)]
399 pub const fn width_of_R12() -> usize {
400 576usize - 512usize
401 }
402 #[allow(dead_code)]
403 pub fn get_R13(&self) -> u64 {
404 self.0.get_bits(448usize..512usize)
405 }
406 pub fn set_R13(&mut self, R13: u64) {
407 self.0.set_bits(448usize..512usize, R13)
408 }
409 #[allow(dead_code)]
410 pub const fn width_of_R13() -> usize {
411 512usize - 448usize
412 }
413 #[allow(dead_code)]
414 pub fn get_R14(&self) -> u64 {
415 self.0.get_bits(384usize..448usize)
416 }
417 pub fn set_R14(&mut self, R14: u64) {
418 self.0.set_bits(384usize..448usize, R14)
419 }
420 #[allow(dead_code)]
421 pub const fn width_of_R14() -> usize {
422 448usize - 384usize
423 }
424 #[allow(dead_code)]
425 pub fn get_R15(&self) -> u64 {
426 self.0.get_bits(320usize..384usize)
427 }
428 pub fn set_R15(&mut self, R15: u64) {
429 self.0.set_bits(320usize..384usize, R15)
430 }
431 #[allow(dead_code)]
432 pub const fn width_of_R15() -> usize {
433 384usize - 320usize
434 }
435 #[allow(dead_code)]
436 pub fn get_FaultIP(&self) -> u64 {
437 self.0.get_bits(256usize..320usize)
438 }
439 pub fn set_FaultIP(&mut self, FaultIP: u64) {
440 self.0.set_bits(256usize..320usize, FaultIP)
441 }
442 #[allow(dead_code)]
443 pub const fn width_of_FaultIP() -> usize {
444 320usize - 256usize
445 }
446 #[allow(dead_code)]
447 pub fn get_RSP(&self) -> u64 {
448 self.0.get_bits(192usize..256usize)
449 }
450 pub fn set_RSP(&mut self, RSP: u64) {
451 self.0.set_bits(192usize..256usize, RSP)
452 }
453 #[allow(dead_code)]
454 pub const fn width_of_RSP() -> usize {
455 256usize - 192usize
456 }
457 #[allow(dead_code)]
458 pub fn get_FLAGS(&self) -> u64 {
459 self.0.get_bits(128usize..192usize)
460 }
461 pub fn set_FLAGS(&mut self, FLAGS: u64) {
462 self.0.set_bits(128usize..192usize, FLAGS)
463 }
464 #[allow(dead_code)]
465 pub const fn width_of_FLAGS() -> usize {
466 192usize - 128usize
467 }
468 #[allow(dead_code)]
469 pub fn get_Syscall(&self) -> u64 {
470 self.0.get_bits(64usize..128usize)
471 }
472 pub fn set_Syscall(&mut self, Syscall: u64) {
473 self.0.set_bits(64usize..128usize, Syscall)
474 }
475 #[allow(dead_code)]
476 pub const fn width_of_Syscall() -> usize {
477 128usize - 64usize
478 }
479 #[allow(dead_code)]
480 fn get_seL4_FaultType(&self) -> u64 {
481 self.0.get_bits(0usize..4usize)
482 }
483 fn set_seL4_FaultType(&mut self, seL4_FaultType: u64) {
484 self.0.set_bits(0usize..4usize, seL4_FaultType)
485 }
486 #[allow(dead_code)]
487 const fn width_of_seL4_FaultType() -> usize {
488 4usize - 0usize
489 }
490}
491impl fmt::Debug for seL4_Fault_UnknownSyscall {
492 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
493 self.unpack().fmt(f)?;
494 write!(f, ".pack()")?;
495 Ok(())
496 }
497}
498#[derive(Debug, Clone, PartialEq, Eq)]
499pub struct seL4_Fault_UnknownSyscall_Unpacked {
500 pub RAX: u64,
501 pub RBX: u64,
502 pub RCX: u64,
503 pub RDX: u64,
504 pub RSI: u64,
505 pub RDI: u64,
506 pub RBP: u64,
507 pub R8: u64,
508 pub R9: u64,
509 pub R10: u64,
510 pub R11: u64,
511 pub R12: u64,
512 pub R13: u64,
513 pub R14: u64,
514 pub R15: u64,
515 pub FaultIP: u64,
516 pub RSP: u64,
517 pub FLAGS: u64,
518 pub Syscall: u64,
519}
520impl seL4_Fault_UnknownSyscall_Unpacked {
521 pub fn pack(self) -> seL4_Fault_UnknownSyscall {
522 match self {
523 Self {
524 RAX,
525 RBX,
526 RCX,
527 RDX,
528 RSI,
529 RDI,
530 RBP,
531 R8,
532 R9,
533 R10,
534 R11,
535 R12,
536 R13,
537 R14,
538 R15,
539 FaultIP,
540 RSP,
541 FLAGS,
542 Syscall,
543 } => {
544 seL4_Fault_UnknownSyscall::new(
545 RAX,
546 RBX,
547 RCX,
548 RDX,
549 RSI,
550 RDI,
551 RBP,
552 R8,
553 R9,
554 R10,
555 R11,
556 R12,
557 R13,
558 R14,
559 R15,
560 FaultIP,
561 RSP,
562 FLAGS,
563 Syscall,
564 )
565 }
566 }
567 }
568}
569#[repr(transparent)]
570#[derive(Clone, Eq, PartialEq)]
571pub struct seL4_Fault_UserException(pub SeL4Bitfield<u64, 20usize>);
572impl seL4_Fault_UserException {
573 pub fn new(FaultIP: u64, Stack: u64, FLAGS: u64, Number: u64, Code: u64) -> Self {
574 let mut this = Self(Bitfield::zeroed());
575 this.set_FaultIP(FaultIP);
576 this.set_Stack(Stack);
577 this.set_FLAGS(FLAGS);
578 this.set_Number(Number);
579 this.set_Code(Code);
580 this.set_seL4_FaultType(seL4_Fault_tag::seL4_Fault_UserException);
581 this
582 }
583 pub fn unpack(&self) -> seL4_Fault_UserException_Unpacked {
584 seL4_Fault_UserException_Unpacked {
585 FaultIP: self.get_FaultIP(),
586 Stack: self.get_Stack(),
587 FLAGS: self.get_FLAGS(),
588 Number: self.get_Number(),
589 Code: self.get_Code(),
590 }
591 }
592 #[allow(dead_code)]
593 pub fn get_FaultIP(&self) -> u64 {
594 self.0.get_bits(320usize..384usize)
595 }
596 pub fn set_FaultIP(&mut self, FaultIP: u64) {
597 self.0.set_bits(320usize..384usize, FaultIP)
598 }
599 #[allow(dead_code)]
600 pub const fn width_of_FaultIP() -> usize {
601 384usize - 320usize
602 }
603 #[allow(dead_code)]
604 pub fn get_Stack(&self) -> u64 {
605 self.0.get_bits(256usize..320usize)
606 }
607 pub fn set_Stack(&mut self, Stack: u64) {
608 self.0.set_bits(256usize..320usize, Stack)
609 }
610 #[allow(dead_code)]
611 pub const fn width_of_Stack() -> usize {
612 320usize - 256usize
613 }
614 #[allow(dead_code)]
615 pub fn get_FLAGS(&self) -> u64 {
616 self.0.get_bits(192usize..256usize)
617 }
618 pub fn set_FLAGS(&mut self, FLAGS: u64) {
619 self.0.set_bits(192usize..256usize, FLAGS)
620 }
621 #[allow(dead_code)]
622 pub const fn width_of_FLAGS() -> usize {
623 256usize - 192usize
624 }
625 #[allow(dead_code)]
626 pub fn get_Number(&self) -> u64 {
627 self.0.get_bits(128usize..192usize)
628 }
629 pub fn set_Number(&mut self, Number: u64) {
630 self.0.set_bits(128usize..192usize, Number)
631 }
632 #[allow(dead_code)]
633 pub const fn width_of_Number() -> usize {
634 192usize - 128usize
635 }
636 #[allow(dead_code)]
637 pub fn get_Code(&self) -> u64 {
638 self.0.get_bits(64usize..128usize)
639 }
640 pub fn set_Code(&mut self, Code: u64) {
641 self.0.set_bits(64usize..128usize, Code)
642 }
643 #[allow(dead_code)]
644 pub const fn width_of_Code() -> usize {
645 128usize - 64usize
646 }
647 #[allow(dead_code)]
648 fn get_seL4_FaultType(&self) -> u64 {
649 self.0.get_bits(0usize..4usize)
650 }
651 fn set_seL4_FaultType(&mut self, seL4_FaultType: u64) {
652 self.0.set_bits(0usize..4usize, seL4_FaultType)
653 }
654 #[allow(dead_code)]
655 const fn width_of_seL4_FaultType() -> usize {
656 4usize - 0usize
657 }
658}
659impl fmt::Debug for seL4_Fault_UserException {
660 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
661 self.unpack().fmt(f)?;
662 write!(f, ".pack()")?;
663 Ok(())
664 }
665}
666#[derive(Debug, Clone, PartialEq, Eq)]
667pub struct seL4_Fault_UserException_Unpacked {
668 pub FaultIP: u64,
669 pub Stack: u64,
670 pub FLAGS: u64,
671 pub Number: u64,
672 pub Code: u64,
673}
674impl seL4_Fault_UserException_Unpacked {
675 pub fn pack(self) -> seL4_Fault_UserException {
676 match self {
677 Self { FaultIP, Stack, FLAGS, Number, Code } => {
678 seL4_Fault_UserException::new(FaultIP, Stack, FLAGS, Number, Code)
679 }
680 }
681 }
682}
683#[repr(transparent)]
684#[derive(Clone, Eq, PartialEq)]
685pub struct seL4_Fault_VMFault(pub SeL4Bitfield<u64, 20usize>);
686impl seL4_Fault_VMFault {
687 pub fn new(IP: u64, Addr: u64, PrefetchFault: u64, FSR: u64) -> Self {
688 let mut this = Self(Bitfield::zeroed());
689 this.set_IP(IP);
690 this.set_Addr(Addr);
691 this.set_PrefetchFault(PrefetchFault);
692 this.set_FSR(FSR);
693 this.set_seL4_FaultType(seL4_Fault_tag::seL4_Fault_VMFault);
694 this
695 }
696 pub fn unpack(&self) -> seL4_Fault_VMFault_Unpacked {
697 seL4_Fault_VMFault_Unpacked {
698 IP: self.get_IP(),
699 Addr: self.get_Addr(),
700 PrefetchFault: self.get_PrefetchFault(),
701 FSR: self.get_FSR(),
702 }
703 }
704 #[allow(dead_code)]
705 pub fn get_IP(&self) -> u64 {
706 self.0.get_bits(256usize..320usize)
707 }
708 pub fn set_IP(&mut self, IP: u64) {
709 self.0.set_bits(256usize..320usize, IP)
710 }
711 #[allow(dead_code)]
712 pub const fn width_of_IP() -> usize {
713 320usize - 256usize
714 }
715 #[allow(dead_code)]
716 pub fn get_Addr(&self) -> u64 {
717 self.0.get_bits(192usize..256usize)
718 }
719 pub fn set_Addr(&mut self, Addr: u64) {
720 self.0.set_bits(192usize..256usize, Addr)
721 }
722 #[allow(dead_code)]
723 pub const fn width_of_Addr() -> usize {
724 256usize - 192usize
725 }
726 #[allow(dead_code)]
727 pub fn get_PrefetchFault(&self) -> u64 {
728 self.0.get_bits(128usize..192usize)
729 }
730 pub fn set_PrefetchFault(&mut self, PrefetchFault: u64) {
731 self.0.set_bits(128usize..192usize, PrefetchFault)
732 }
733 #[allow(dead_code)]
734 pub const fn width_of_PrefetchFault() -> usize {
735 192usize - 128usize
736 }
737 #[allow(dead_code)]
738 pub fn get_FSR(&self) -> u64 {
739 self.0.get_bits(64usize..128usize)
740 }
741 pub fn set_FSR(&mut self, FSR: u64) {
742 self.0.set_bits(64usize..128usize, FSR)
743 }
744 #[allow(dead_code)]
745 pub const fn width_of_FSR() -> usize {
746 128usize - 64usize
747 }
748 #[allow(dead_code)]
749 fn get_seL4_FaultType(&self) -> u64 {
750 self.0.get_bits(0usize..4usize)
751 }
752 fn set_seL4_FaultType(&mut self, seL4_FaultType: u64) {
753 self.0.set_bits(0usize..4usize, seL4_FaultType)
754 }
755 #[allow(dead_code)]
756 const fn width_of_seL4_FaultType() -> usize {
757 4usize - 0usize
758 }
759}
760impl fmt::Debug for seL4_Fault_VMFault {
761 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
762 self.unpack().fmt(f)?;
763 write!(f, ".pack()")?;
764 Ok(())
765 }
766}
767#[derive(Debug, Clone, PartialEq, Eq)]
768pub struct seL4_Fault_VMFault_Unpacked {
769 pub IP: u64,
770 pub Addr: u64,
771 pub PrefetchFault: u64,
772 pub FSR: u64,
773}
774impl seL4_Fault_VMFault_Unpacked {
775 pub fn pack(self) -> seL4_Fault_VMFault {
776 match self {
777 Self { IP, Addr, PrefetchFault, FSR } => {
778 seL4_Fault_VMFault::new(IP, Addr, PrefetchFault, FSR)
779 }
780 }
781 }
782}
783pub mod seL4_Fault_tag {
784 pub const seL4_Fault_NullFault: u64 = 0;
785 pub const seL4_Fault_CapFault: u64 = 1;
786 pub const seL4_Fault_UnknownSyscall: u64 = 2;
787 pub const seL4_Fault_UserException: u64 = 3;
788 pub const seL4_Fault_VMFault: u64 = 5;
789}
790#[repr(transparent)]
791#[derive(Clone, PartialEq, Eq)]
792pub struct seL4_Fault(pub SeL4Bitfield<u64, 20usize>);
793impl seL4_Fault {
794 pub fn splay(self) -> seL4_Fault_Splayed {
795 match self.get_tag() {
796 seL4_Fault_tag::seL4_Fault_NullFault => {
797 seL4_Fault_Splayed::NullFault(seL4_Fault_NullFault(self.0))
798 }
799 seL4_Fault_tag::seL4_Fault_CapFault => {
800 seL4_Fault_Splayed::CapFault(seL4_Fault_CapFault(self.0))
801 }
802 seL4_Fault_tag::seL4_Fault_UnknownSyscall => {
803 seL4_Fault_Splayed::UnknownSyscall(seL4_Fault_UnknownSyscall(self.0))
804 }
805 seL4_Fault_tag::seL4_Fault_UserException => {
806 seL4_Fault_Splayed::UserException(seL4_Fault_UserException(self.0))
807 }
808 seL4_Fault_tag::seL4_Fault_VMFault => {
809 seL4_Fault_Splayed::VMFault(seL4_Fault_VMFault(self.0))
810 }
811 _ => panic!(),
812 }
813 }
814 pub fn get_tag(&self) -> u64 {
815 self.0.get_bits(0usize..4usize)
816 }
817}
818impl fmt::Debug for seL4_Fault {
819 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
820 self.clone().splay().fmt(f)?;
821 write!(f, ".unsplay()")?;
822 Ok(())
823 }
824}
825#[derive(Debug, Clone, PartialEq, Eq)]
826pub enum seL4_Fault_Splayed {
827 NullFault(seL4_Fault_NullFault),
828 CapFault(seL4_Fault_CapFault),
829 UnknownSyscall(seL4_Fault_UnknownSyscall),
830 UserException(seL4_Fault_UserException),
831 VMFault(seL4_Fault_VMFault),
832}
833impl seL4_Fault_Splayed {
834 pub fn unsplay(self) -> seL4_Fault {
835 match self {
836 seL4_Fault_Splayed::NullFault(seL4_Fault_NullFault(bitfield)) => {
837 seL4_Fault(bitfield)
838 }
839 seL4_Fault_Splayed::CapFault(seL4_Fault_CapFault(bitfield)) => {
840 seL4_Fault(bitfield)
841 }
842 seL4_Fault_Splayed::UnknownSyscall(seL4_Fault_UnknownSyscall(bitfield)) => {
843 seL4_Fault(bitfield)
844 }
845 seL4_Fault_Splayed::UserException(seL4_Fault_UserException(bitfield)) => {
846 seL4_Fault(bitfield)
847 }
848 seL4_Fault_Splayed::VMFault(seL4_Fault_VMFault(bitfield)) => {
849 seL4_Fault(bitfield)
850 }
851 }
852 }
853}
854impl seL4_Fault_NullFault {
855 pub fn unsplay(self) -> seL4_Fault {
856 seL4_Fault(self.0)
857 }
858}
859impl seL4_Fault_NullFault_Unpacked {
860 pub fn unsplay(self) -> seL4_Fault {
861 self.pack().unsplay()
862 }
863}
864impl seL4_Fault_CapFault {
865 pub fn unsplay(self) -> seL4_Fault {
866 seL4_Fault(self.0)
867 }
868}
869impl seL4_Fault_CapFault_Unpacked {
870 pub fn unsplay(self) -> seL4_Fault {
871 self.pack().unsplay()
872 }
873}
874impl seL4_Fault_UnknownSyscall {
875 pub fn unsplay(self) -> seL4_Fault {
876 seL4_Fault(self.0)
877 }
878}
879impl seL4_Fault_UnknownSyscall_Unpacked {
880 pub fn unsplay(self) -> seL4_Fault {
881 self.pack().unsplay()
882 }
883}
884impl seL4_Fault_UserException {
885 pub fn unsplay(self) -> seL4_Fault {
886 seL4_Fault(self.0)
887 }
888}
889impl seL4_Fault_UserException_Unpacked {
890 pub fn unsplay(self) -> seL4_Fault {
891 self.pack().unsplay()
892 }
893}
894impl seL4_Fault_VMFault {
895 pub fn unsplay(self) -> seL4_Fault {
896 seL4_Fault(self.0)
897 }
898}
899impl seL4_Fault_VMFault_Unpacked {
900 pub fn unsplay(self) -> seL4_Fault {
901 self.pack().unsplay()
902 }
903}