Skip to main content

zerocopy/
lib.rs

1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13//   cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! ***<span style="font-size: 140%">Fast, safe, <span
16//! style="color:red;">compile error</span>. Pick two.</span>***
17//!
18//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
19//! so you don't have to.
20//!
21//! *For an overview of what's changed from zerocopy 0.7, check out our [release
22//! notes][release-notes], which include a step-by-step upgrading guide.*
23//!
24//! *Have questions? Need more out of zerocopy? Submit a [customer request
25//! issue][customer-request-issue] or ask the maintainers on
26//! [GitHub][github-q-a] or [Discord][discord]!*
27//!
28//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
29//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
30//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
31//! [discord]: https://discord.gg/MAvWH2R6zk
32//!
33//! # Overview
34//!
35//! ##### Conversion Traits
36//!
37//! Zerocopy provides four derivable traits for zero-cost conversions:
38//! - [`TryFromBytes`] indicates that a type may safely be converted from
39//!   certain byte sequences (conditional on runtime checks)
40//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
41//!   instance of a type
42//! - [`FromBytes`] indicates that a type may safely be converted from an
43//!   arbitrary byte sequence
44//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
45//!   sequence
46//!
47//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
48//!
49//! [slice-dsts]: KnownLayout#dynamically-sized-types
50//!
51//! ##### Marker Traits
52//!
53//! Zerocopy provides three derivable marker traits that do not provide any
54//! functionality themselves, but are required to call certain methods provided
55//! by the conversion traits:
56//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
57//!   qualities of a type
58//! - [`Immutable`] indicates that a type is free from interior mutability,
59//!   except by ownership or an exclusive (`&mut`) borrow
60//! - [`Unaligned`] indicates that a type's alignment requirement is 1
61//!
62//! You should generally derive these marker traits whenever possible.
63//!
64//! ##### Conversion Macros
65//!
66//! Zerocopy provides six macros for safe casting between types:
67//!
68//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
69//!   one type to a value of another type of the same size
70//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
71//!   mutable reference of one type to a mutable reference of another type of
72//!   the same size
73//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
74//!   mutable or immutable reference of one type to an immutable reference of
75//!   another type of the same size
76//!
77//! These macros perform *compile-time* size and alignment checks, meaning that
78//! unconditional casts have zero cost at runtime. Conditional casts do not need
79//! to validate size or alignment runtime, but do need to validate contents.
80//!
81//! These macros cannot be used in generic contexts. For generic conversions,
82//! use the methods defined by the [conversion traits](#conversion-traits).
83//!
84//! ##### Byteorder-Aware Numerics
85//!
86//! Zerocopy provides byte-order aware integer types that support these
87//! conversions; see the [`byteorder`] module. These types are especially useful
88//! for network parsing.
89//!
90//! # Cargo Features
91//!
92//! - **`alloc`**
93//!   By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
94//!   the `alloc` crate is added as a dependency, and some allocation-related
95//!   functionality is added.
96//!
97//! - **`std`**
98//!   By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
99//!   `std` crate is added as a dependency (ie, `no_std` is disabled), and
100//!   support for some `std` types is added. `std` implies `alloc`.
101//!
102//! - **`derive`**
103//!   Provides derives for the core marker traits via the `zerocopy-derive`
104//!   crate. These derives are re-exported from `zerocopy`, so it is not
105//!   necessary to depend on `zerocopy-derive` directly.
106//!
107//!   However, you may experience better compile times if you instead directly
108//!   depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
109//!   since doing so will allow Rust to compile these crates in parallel. To do
110//!   so, do *not* enable the `derive` feature, and list both dependencies in
111//!   your `Cargo.toml` with the same leading non-zero version number; e.g:
112//!
113//!   ```toml
114//!   [dependencies]
115//!   zerocopy = "0.X"
116//!   zerocopy-derive = "0.X"
117//!   ```
118//!
119//!   To avoid the risk of [duplicate import errors][duplicate-import-errors] if
120//!   one of your dependencies enables zerocopy's `derive` feature, import
121//!   derives as `use zerocopy_derive::*` rather than by name (e.g., `use
122//!   zerocopy_derive::FromBytes`).
123//!
124//! - **`simd`**
125//!   When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
126//!   `IntoBytes` impls are emitted for all stable SIMD types which exist on the
127//!   target platform. Note that the layout of SIMD types is not yet stabilized,
128//!   so these impls may be removed in the future if layout changes make them
129//!   invalid. For more information, see the Unsafe Code Guidelines Reference
130//!   page on the [layout of packed SIMD vectors][simd-layout].
131//!
132//! - **`simd-nightly`**
133//!   Enables the `simd` feature and adds support for SIMD types which are only
134//!   available on nightly. Since these types are unstable, support for any type
135//!   may be removed at any point in the future.
136//!
137//! - **`float-nightly`**
138//!   Adds support for the unstable `f16` and `f128` types. These types are
139//!   not yet fully implemented and may not be supported on all platforms.
140//!
141//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
142//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
143//!
144//! # Security Ethos
145//!
146//! Zerocopy is expressly designed for use in security-critical contexts. We
147//! strive to ensure that that zerocopy code is sound under Rust's current
148//! memory model, and *any future memory model*. We ensure this by:
149//! - **...not 'guessing' about Rust's semantics.**
150//!   We annotate `unsafe` code with a precise rationale for its soundness that
151//!   cites a relevant section of Rust's official documentation. When Rust's
152//!   documented semantics are unclear, we work with the Rust Operational
153//!   Semantics Team to clarify Rust's documentation.
154//! - **...rigorously testing our implementation.**
155//!   We run tests using [Miri], ensuring that zerocopy is sound across a wide
156//!   array of supported target platforms of varying endianness and pointer
157//!   width, and across both current and experimental memory models of Rust.
158//! - **...formally proving the correctness of our implementation.**
159//!   We apply formal verification tools like [Kani][kani] to prove zerocopy's
160//!   correctness.
161//!
162//! For more information, see our full [soundness policy].
163//!
164//! [Miri]: https://github.com/rust-lang/miri
165//! [Kani]: https://github.com/model-checking/kani
166//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
167//!
168//! # Relationship to Project Safe Transmute
169//!
170//! [Project Safe Transmute] is an official initiative of the Rust Project to
171//! develop language-level support for safer transmutation. The Project consults
172//! with crates like zerocopy to identify aspects of safer transmutation that
173//! would benefit from compiler support, and has developed an [experimental,
174//! compiler-supported analysis][mcp-transmutability] which determines whether,
175//! for a given type, any value of that type may be soundly transmuted into
176//! another type. Once this functionality is sufficiently mature, zerocopy
177//! intends to replace its internal transmutability analysis (implemented by our
178//! custom derives) with the compiler-supported one. This change will likely be
179//! an implementation detail that is invisible to zerocopy's users.
180//!
181//! Project Safe Transmute will not replace the need for most of zerocopy's
182//! higher-level abstractions. The experimental compiler analysis is a tool for
183//! checking the soundness of `unsafe` code, not a tool to avoid writing
184//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
185//! will still be required in order to provide higher-level abstractions on top
186//! of the building block provided by Project Safe Transmute.
187//!
188//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
189//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
190//!
191//! # MSRV
192//!
193//! See our [MSRV policy].
194//!
195//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
196//!
197//! # Changelog
198//!
199//! Zerocopy uses [GitHub Releases].
200//!
201//! [GitHub Releases]: https://github.com/google/zerocopy/releases
202//!
203//! # Thanks
204//!
205//! Zerocopy is maintained by engineers at Google with help from [many wonderful
206//! contributors][contributors]. Thank you to everyone who has lent a hand in
207//! making Rust a little more secure!
208//!
209//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
210
211// Sometimes we want to use lints which were added after our MSRV.
212// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
213// this attribute, any unknown lint would cause a CI failure when testing with
214// our MSRV.
215#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
216#![deny(renamed_and_removed_lints)]
217#![deny(
218    anonymous_parameters,
219    deprecated_in_future,
220    late_bound_lifetime_arguments,
221    missing_copy_implementations,
222    missing_debug_implementations,
223    missing_docs,
224    path_statements,
225    patterns_in_fns_without_body,
226    rust_2018_idioms,
227    trivial_numeric_casts,
228    unreachable_pub,
229    unsafe_op_in_unsafe_fn,
230    unused_extern_crates,
231    // We intentionally choose not to deny `unused_qualifications`. When items
232    // are added to the prelude (e.g., `core::mem::size_of`), this has the
233    // consequence of making some uses trigger this lint on the latest toolchain
234    // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
235    // does not work on older toolchains.
236    //
237    // We tested a more complicated fix in #1413, but ultimately decided that,
238    // since this lint is just a minor style lint, the complexity isn't worth it
239    // - it's fine to occasionally have unused qualifications slip through,
240    // especially since these do not affect our user-facing API in any way.
241    variant_size_differences
242)]
243#![cfg_attr(
244    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
245    deny(fuzzy_provenance_casts, lossy_provenance_casts)
246)]
247#![deny(
248    clippy::all,
249    clippy::alloc_instead_of_core,
250    clippy::arithmetic_side_effects,
251    clippy::as_underscore,
252    clippy::assertions_on_result_states,
253    clippy::as_conversions,
254    clippy::correctness,
255    clippy::dbg_macro,
256    clippy::decimal_literal_representation,
257    clippy::double_must_use,
258    clippy::get_unwrap,
259    clippy::indexing_slicing,
260    clippy::missing_inline_in_public_items,
261    clippy::missing_safety_doc,
262    clippy::multiple_unsafe_ops_per_block,
263    clippy::must_use_candidate,
264    clippy::must_use_unit,
265    clippy::obfuscated_if_else,
266    clippy::perf,
267    clippy::print_stdout,
268    clippy::return_self_not_must_use,
269    clippy::std_instead_of_core,
270    clippy::style,
271    clippy::suspicious,
272    clippy::todo,
273    clippy::undocumented_unsafe_blocks,
274    clippy::unimplemented,
275    clippy::unnested_or_patterns,
276    clippy::unwrap_used,
277    clippy::use_debug
278)]
279// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes
280// has false positives, and we test on our MSRV in CI, so it doesn't help us
281// anyway.
282#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)]
283#![deny(
284    rustdoc::bare_urls,
285    rustdoc::broken_intra_doc_links,
286    rustdoc::invalid_codeblock_attributes,
287    rustdoc::invalid_html_tags,
288    rustdoc::invalid_rust_codeblocks,
289    rustdoc::missing_crate_level_docs,
290    rustdoc::private_intra_doc_links
291)]
292// In test code, it makes sense to weight more heavily towards concise, readable
293// code over correct or debuggable code.
294#![cfg_attr(any(test, kani), allow(
295    // In tests, you get line numbers and have access to source code, so panic
296    // messages are less important. You also often unwrap a lot, which would
297    // make expect'ing instead very verbose.
298    clippy::unwrap_used,
299    // In tests, there's no harm to "panic risks" - the worst that can happen is
300    // that your test will fail, and you'll fix it. By contrast, panic risks in
301    // production code introduce the possibly of code panicking unexpectedly "in
302    // the field".
303    clippy::arithmetic_side_effects,
304    clippy::indexing_slicing,
305))]
306#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
307#![cfg_attr(
308    all(feature = "simd-nightly", target_arch = "arm"),
309    feature(stdarch_arm_neon_intrinsics)
310)]
311#![cfg_attr(
312    all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
313    feature(stdarch_powerpc)
314)]
315#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
316#![cfg_attr(doc_cfg, feature(doc_cfg))]
317#![cfg_attr(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, feature(coverage_attribute))]
318#![cfg_attr(
319    any(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, miri),
320    feature(layout_for_ptr)
321)]
322#![cfg_attr(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), feature(test))]
323
324// This is a hack to allow zerocopy-derive derives to work in this crate. They
325// assume that zerocopy is linked as an extern crate, so they access items from
326// it as `zerocopy::Xxx`. This makes that still work.
327#[cfg(any(feature = "derive", test))]
328extern crate self as zerocopy;
329
330#[cfg(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS))]
331extern crate test;
332
333#[doc(hidden)]
334#[macro_use]
335pub mod util;
336
337pub mod byte_slice;
338pub mod byteorder;
339mod deprecated;
340
341#[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)]
342pub mod doctests;
343
344// This module is `pub` so that zerocopy's error types and error handling
345// documentation is grouped together in a cohesive module. In practice, we
346// expect most users to use the re-export of `error`'s items to avoid identifier
347// stuttering.
348pub mod error;
349mod impls;
350#[doc(hidden)]
351pub mod layout;
352mod macros;
353#[doc(hidden)]
354pub mod pointer;
355mod r#ref;
356mod split_at;
357// FIXME(#252): If we make this pub, come up with a better name.
358mod wrappers;
359
360use core::{
361    cell::{Cell, UnsafeCell},
362    cmp::Ordering,
363    fmt::{self, Debug, Display, Formatter},
364    hash::Hasher,
365    marker::PhantomData,
366    mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
367    num::{
368        NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
369        NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
370    },
371    ops::{Deref, DerefMut},
372    ptr::{self, NonNull},
373    slice,
374};
375#[cfg(feature = "std")]
376use std::io;
377
378#[doc(hidden)]
379pub use crate::pointer::invariant::{self, BecauseExclusive};
380#[doc(hidden)]
381pub use crate::pointer::PtrInner;
382pub use crate::{
383    byte_slice::*,
384    byteorder::*,
385    error::*,
386    r#ref::*,
387    split_at::{Split, SplitAt},
388    wrappers::*,
389};
390
391#[cfg(any(feature = "alloc", test, kani))]
392extern crate alloc;
393#[cfg(any(feature = "alloc", test))]
394use alloc::{boxed::Box, vec::Vec};
395#[cfg(any(feature = "alloc", test))]
396use core::alloc::Layout;
397
398use util::MetadataOf;
399
400// Used by `KnownLayout`.
401#[doc(hidden)]
402pub use crate::layout::*;
403// Used by `TryFromBytes::is_bit_valid`.
404#[doc(hidden)]
405pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
406// For each trait polyfill, as soon as the corresponding feature is stable, the
407// polyfill import will be unused because method/function resolution will prefer
408// the inherent method/function over a trait method/function. Thus, we suppress
409// the `unused_imports` warning.
410//
411// See the documentation on `util::polyfills` for more information.
412#[allow(unused_imports)]
413use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
414
415#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_DEV_MODE)))]
416const _: () = {
417    #[deprecated = "Development of zerocopy using cargo is not supported. Please use `cargo.sh` or `win-cargo.bat` instead."]
418    #[allow(unused)]
419    const WARNING: () = ();
420    #[warn(deprecated)]
421    WARNING
422};
423
424/// Implements [`KnownLayout`].
425///
426/// This derive analyzes various aspects of a type's layout that are needed for
427/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
428/// e.g.:
429///
430/// ```
431/// # use zerocopy_derive::KnownLayout;
432/// #[derive(KnownLayout)]
433/// struct MyStruct {
434/// # /*
435///     ...
436/// # */
437/// }
438///
439/// #[derive(KnownLayout)]
440/// enum MyEnum {
441/// #   V00,
442/// # /*
443///     ...
444/// # */
445/// }
446///
447/// #[derive(KnownLayout)]
448/// union MyUnion {
449/// #   variant: u8,
450/// # /*
451///     ...
452/// # */
453/// }
454/// ```
455///
456/// # Limitations
457///
458/// This derive cannot currently be applied to unsized structs without an
459/// explicit `repr` attribute.
460///
461/// Some invocations of this derive run afoul of a [known bug] in Rust's type
462/// privacy checker. For example, this code:
463///
464/// ```compile_fail,E0446
465/// use zerocopy::*;
466/// # use zerocopy_derive::*;
467///
468/// #[derive(KnownLayout)]
469/// #[repr(C)]
470/// pub struct PublicType {
471///     leading: Foo,
472///     trailing: Bar,
473/// }
474///
475/// #[derive(KnownLayout)]
476/// struct Foo;
477///
478/// #[derive(KnownLayout)]
479/// struct Bar;
480/// ```
481///
482/// ...results in a compilation error:
483///
484/// ```text
485/// error[E0446]: private type `Bar` in public interface
486///  --> examples/bug.rs:3:10
487///    |
488/// 3  | #[derive(KnownLayout)]
489///    |          ^^^^^^^^^^^ can't leak private type
490/// ...
491/// 14 | struct Bar;
492///    | ---------- `Bar` declared as private
493///    |
494///    = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
495/// ```
496///
497/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
498/// structs whose trailing field type is less public than the enclosing struct.
499///
500/// To work around this, mark the trailing field type `pub` and annotate it with
501/// `#[doc(hidden)]`; e.g.:
502///
503/// ```no_run
504/// use zerocopy::*;
505/// # use zerocopy_derive::*;
506///
507/// #[derive(KnownLayout)]
508/// #[repr(C)]
509/// pub struct PublicType {
510///     leading: Foo,
511///     trailing: Bar,
512/// }
513///
514/// #[derive(KnownLayout)]
515/// struct Foo;
516///
517/// #[doc(hidden)]
518/// #[derive(KnownLayout)]
519/// pub struct Bar; // <- `Bar` is now also `pub`
520/// ```
521///
522/// [known bug]: https://github.com/rust-lang/rust/issues/45713
523#[cfg(any(feature = "derive", test))]
524#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
525pub use zerocopy_derive::KnownLayout;
526// These exist so that code which was written against the old names will get
527// less confusing error messages when they upgrade to a more recent version of
528// zerocopy. On our MSRV toolchain, the error messages read, for example:
529//
530//   error[E0603]: trait `FromZeroes` is private
531//       --> examples/deprecated.rs:1:15
532//        |
533//   1    | use zerocopy::FromZeroes;
534//        |               ^^^^^^^^^^ private trait
535//        |
536//   note: the trait `FromZeroes` is defined here
537//       --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
538//        |
539//   1845 | use FromZeros as FromZeroes;
540//        |     ^^^^^^^^^^^^^^^^^^^^^^^
541//
542// The "note" provides enough context to make it easy to figure out how to fix
543// the error.
544#[allow(unused)]
545use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
546
547/// Indicates that zerocopy can reason about certain aspects of a type's layout.
548///
549/// This trait is required by many of zerocopy's APIs. It supports sized types,
550/// slices, and [slice DSTs](#dynamically-sized-types).
551///
552/// # Implementation
553///
554/// **Do not implement this trait yourself!** Instead, use
555/// [`#[derive(KnownLayout)]`][derive]; e.g.:
556///
557/// ```
558/// # use zerocopy_derive::KnownLayout;
559/// #[derive(KnownLayout)]
560/// struct MyStruct {
561/// # /*
562///     ...
563/// # */
564/// }
565///
566/// #[derive(KnownLayout)]
567/// enum MyEnum {
568/// # /*
569///     ...
570/// # */
571/// }
572///
573/// #[derive(KnownLayout)]
574/// union MyUnion {
575/// #   variant: u8,
576/// # /*
577///     ...
578/// # */
579/// }
580/// ```
581///
582/// This derive performs a sophisticated analysis to deduce the layout
583/// characteristics of types. You **must** implement this trait via the derive.
584///
585/// # Dynamically-sized types
586///
587/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
588///
589/// A slice DST is a type whose trailing field is either a slice or another
590/// slice DST, rather than a type with fixed size. For example:
591///
592/// ```
593/// #[repr(C)]
594/// struct PacketHeader {
595/// # /*
596///     ...
597/// # */
598/// }
599///
600/// #[repr(C)]
601/// struct Packet {
602///     header: PacketHeader,
603///     body: [u8],
604/// }
605/// ```
606///
607/// It can be useful to think of slice DSTs as a generalization of slices - in
608/// other words, a normal slice is just the special case of a slice DST with
609/// zero leading fields. In particular:
610/// - Like slices, slice DSTs can have different lengths at runtime
611/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
612///   or via other indirection such as `Box`
613/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
614///   encodes the number of elements in the trailing slice field
615///
616/// ## Slice DST layout
617///
618/// Just like other composite Rust types, the layout of a slice DST is not
619/// well-defined unless it is specified using an explicit `#[repr(...)]`
620/// attribute such as `#[repr(C)]`. [Other representations are
621/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
622/// example.
623///
624/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
625/// types][repr-c-structs], but the presence of a variable-length field
626/// introduces the possibility of *dynamic padding*. In particular, it may be
627/// necessary to add trailing padding *after* the trailing slice field in order
628/// to satisfy the outer type's alignment, and the amount of padding required
629/// may be a function of the length of the trailing slice field. This is just a
630/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
631/// but it can result in surprising behavior. For example, consider the
632/// following type:
633///
634/// ```
635/// #[repr(C)]
636/// struct Foo {
637///     a: u32,
638///     b: u8,
639///     z: [u16],
640/// }
641/// ```
642///
643/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
644/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
645/// `Foo`:
646///
647/// ```text
648/// byte offset | 01234567
649///       field | aaaab---
650///                    ><
651/// ```
652///
653/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
654/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
655/// round up to offset 6. This means that there is one byte of padding between
656/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
657/// then two bytes of padding after `z` in order to satisfy the overall
658/// alignment of `Foo`. The size of this instance is 8 bytes.
659///
660/// What about if `z` has length 1?
661///
662/// ```text
663/// byte offset | 01234567
664///       field | aaaab-zz
665/// ```
666///
667/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
668/// that we no longer need padding after `z` in order to satisfy `Foo`'s
669/// alignment. We've now seen two different values of `Foo` with two different
670/// lengths of `z`, but they both have the same size - 8 bytes.
671///
672/// What about if `z` has length 2?
673///
674/// ```text
675/// byte offset | 012345678901
676///       field | aaaab-zzzz--
677/// ```
678///
679/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
680/// size to 10, and so we now need another 2 bytes of padding after `z` to
681/// satisfy `Foo`'s alignment.
682///
683/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
684/// applied to slice DSTs, but it can be surprising that the amount of trailing
685/// padding becomes a function of the trailing slice field's length, and thus
686/// can only be computed at runtime.
687///
688/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
689/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
690///
691/// ## What is a valid size?
692///
693/// There are two places in zerocopy's API that we refer to "a valid size" of a
694/// type. In normal casts or conversions, where the source is a byte slice, we
695/// need to know whether the source byte slice is a valid size of the
696/// destination type. In prefix or suffix casts, we need to know whether *there
697/// exists* a valid size of the destination type which fits in the source byte
698/// slice and, if so, what the largest such size is.
699///
700/// As outlined above, a slice DST's size is defined by the number of elements
701/// in its trailing slice field. However, there is not necessarily a 1-to-1
702/// mapping between trailing slice field length and overall size. As we saw in
703/// the previous section with the type `Foo`, instances with both 0 and 1
704/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
705///
706/// When we say "x is a valid size of `T`", we mean one of two things:
707/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
708/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
709///   `T` with `len` trailing slice elements has size `x`
710///
711/// When we say "largest possible size of `T` that fits in a byte slice", we
712/// mean one of two things:
713/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
714///   `size_of::<T>()` bytes long
715/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
716///   that the instance of `T` with `len` trailing slice elements fits in the
717///   byte slice, and to choose the largest such `len`, if any
718///
719///
720/// # Safety
721///
722/// This trait does not convey any safety guarantees to code outside this crate.
723///
724/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
725/// releases of zerocopy may make backwards-breaking changes to these items,
726/// including changes that only affect soundness, which may cause code which
727/// uses those items to silently become unsound.
728///
729#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
730#[cfg_attr(
731    not(feature = "derive"),
732    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
733)]
734#[cfg_attr(
735    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
736    diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
737)]
738pub unsafe trait KnownLayout {
739    // The `Self: Sized` bound makes it so that `KnownLayout` can still be
740    // object safe. It's not currently object safe thanks to `const LAYOUT`, and
741    // it likely won't be in the future, but there's no reason not to be
742    // forwards-compatible with object safety.
743    #[doc(hidden)]
744    fn only_derive_is_allowed_to_implement_this_trait()
745    where
746        Self: Sized;
747
748    /// The type of metadata stored in a pointer to `Self`.
749    ///
750    /// This is `()` for sized types and [`usize`] for slice DSTs.
751    type PointerMetadata: PointerMetadata;
752
753    /// A maybe-uninitialized analog of `Self`
754    ///
755    /// # Safety
756    ///
757    /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
758    /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
759    #[doc(hidden)]
760    type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
761
762    /// The layout of `Self`.
763    ///
764    /// # Safety
765    ///
766    /// Callers may assume that `LAYOUT` accurately reflects the layout of
767    /// `Self`. In particular:
768    /// - `LAYOUT.align` is equal to `Self`'s alignment
769    /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
770    ///   where `size == size_of::<Self>()`
771    /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
772    ///   SizeInfo::SliceDst(slice_layout)` where:
773    ///   - The size, `size`, of an instance of `Self` with `elems` trailing
774    ///     slice elements is equal to `slice_layout.offset +
775    ///     slice_layout.elem_size * elems` rounded up to the nearest multiple
776    ///     of `LAYOUT.align`
777    ///   - For such an instance, any bytes in the range `[slice_layout.offset +
778    ///     slice_layout.elem_size * elems, size)` are padding and must not be
779    ///     assumed to be initialized
780    #[doc(hidden)]
781    const LAYOUT: DstLayout;
782
783    /// SAFETY: The returned pointer has the same address and provenance as
784    /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
785    /// elements in its trailing slice.
786    #[doc(hidden)]
787    fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
788
789    /// Extracts the metadata from a pointer to `Self`.
790    ///
791    /// # Safety
792    ///
793    /// `pointer_to_metadata` always returns the correct metadata stored in
794    /// `ptr`.
795    #[doc(hidden)]
796    fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
797
798    /// Computes the length of the byte range addressed by `ptr`.
799    ///
800    /// Returns `None` if the resulting length would not fit in an `usize`.
801    ///
802    /// # Safety
803    ///
804    /// Callers may assume that `size_of_val_raw` always returns the correct
805    /// size.
806    ///
807    /// Callers may assume that, if `ptr` addresses a byte range whose length
808    /// fits in an `usize`, this will return `Some`.
809    #[doc(hidden)]
810    #[must_use]
811    #[inline(always)]
812    fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
813        let meta = Self::pointer_to_metadata(ptr.as_ptr());
814        // SAFETY: `size_for_metadata` promises to only return `None` if the
815        // resulting size would not fit in a `usize`.
816        Self::size_for_metadata(meta)
817    }
818
819    #[doc(hidden)]
820    #[must_use]
821    #[inline(always)]
822    fn raw_dangling() -> NonNull<Self> {
823        let meta = Self::PointerMetadata::from_elem_count(0);
824        Self::raw_from_ptr_len(NonNull::dangling(), meta)
825    }
826
827    /// Computes the size of an object of type `Self` with the given pointer
828    /// metadata.
829    ///
830    /// # Safety
831    ///
832    /// `size_for_metadata` promises to return `None` if and only if the
833    /// resulting size would not fit in a [`usize`]. Note that the returned size
834    /// could exceed the actual maximum valid size of an allocated object,
835    /// [`isize::MAX`].
836    ///
837    /// # Examples
838    ///
839    /// ```
840    /// use zerocopy::KnownLayout;
841    ///
842    /// assert_eq!(u8::size_for_metadata(()), Some(1));
843    /// assert_eq!(u16::size_for_metadata(()), Some(2));
844    /// assert_eq!(<[u8]>::size_for_metadata(42), Some(42));
845    /// assert_eq!(<[u16]>::size_for_metadata(42), Some(84));
846    ///
847    /// // This size exceeds the maximum valid object size (`isize::MAX`):
848    /// assert_eq!(<[u8]>::size_for_metadata(usize::MAX), Some(usize::MAX));
849    ///
850    /// // This size, if computed, would exceed `usize::MAX`:
851    /// assert_eq!(<[u16]>::size_for_metadata(usize::MAX), None);
852    /// ```
853    #[inline(always)]
854    fn size_for_metadata(meta: Self::PointerMetadata) -> Option<usize> {
855        meta.size_for_metadata(Self::LAYOUT)
856    }
857}
858
859/// Efficiently produces the [`TrailingSliceLayout`] of `T`.
860#[inline(always)]
861pub(crate) fn trailing_slice_layout<T>() -> TrailingSliceLayout
862where
863    T: ?Sized + KnownLayout<PointerMetadata = usize>,
864{
865    trait LayoutFacts {
866        const SIZE_INFO: TrailingSliceLayout;
867    }
868
869    impl<T: ?Sized> LayoutFacts for T
870    where
871        T: KnownLayout<PointerMetadata = usize>,
872    {
873        const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info {
874            crate::SizeInfo::Sized { .. } => const_panic!("unreachable"),
875            crate::SizeInfo::SliceDst(info) => info,
876        };
877    }
878
879    T::SIZE_INFO
880}
881
882/// The metadata associated with a [`KnownLayout`] type.
883#[doc(hidden)]
884pub trait PointerMetadata: Copy + Eq + Debug {
885    /// Constructs a `Self` from an element count.
886    ///
887    /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
888    /// `elems`. No other types are currently supported.
889    fn from_elem_count(elems: usize) -> Self;
890
891    /// Converts `self` to an element count.
892    ///
893    /// If `Self = ()`, this returns `0`. If `Self = usize`, this returns
894    /// `self`. No other types are currently supported.
895    fn to_elem_count(self) -> usize;
896
897    /// Computes the size of the object with the given layout and pointer
898    /// metadata.
899    ///
900    /// # Panics
901    ///
902    /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
903    /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
904    /// panic.
905    ///
906    /// # Safety
907    ///
908    /// `size_for_metadata` promises to only return `None` if the resulting size
909    /// would not fit in a `usize`.
910    fn size_for_metadata(self, layout: DstLayout) -> Option<usize>;
911}
912
913impl PointerMetadata for () {
914    #[inline]
915    #[allow(clippy::unused_unit)]
916    fn from_elem_count(_elems: usize) -> () {}
917
918    #[inline]
919    fn to_elem_count(self) -> usize {
920        0
921    }
922
923    #[inline]
924    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
925        match layout.size_info {
926            SizeInfo::Sized { size } => Some(size),
927            // NOTE: This branch is unreachable, but we return `None` rather
928            // than `unreachable!()` to avoid generating panic paths.
929            SizeInfo::SliceDst(_) => None,
930        }
931    }
932}
933
934impl PointerMetadata for usize {
935    #[inline]
936    fn from_elem_count(elems: usize) -> usize {
937        elems
938    }
939
940    #[inline]
941    fn to_elem_count(self) -> usize {
942        self
943    }
944
945    #[inline]
946    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
947        match layout.size_info {
948            SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
949                let slice_len = elem_size.checked_mul(self)?;
950                let without_padding = offset.checked_add(slice_len)?;
951                without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
952            }
953            // NOTE: This branch is unreachable, but we return `None` rather
954            // than `unreachable!()` to avoid generating panic paths.
955            SizeInfo::Sized { .. } => None,
956        }
957    }
958}
959
960// SAFETY: Delegates safety to `DstLayout::for_slice`.
961unsafe impl<T> KnownLayout for [T] {
962    #[allow(clippy::missing_inline_in_public_items, dead_code)]
963    #[cfg_attr(
964        all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
965        coverage(off)
966    )]
967    fn only_derive_is_allowed_to_implement_this_trait()
968    where
969        Self: Sized,
970    {
971    }
972
973    type PointerMetadata = usize;
974
975    // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
976    // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
977    // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
978    // identical, because they both lack a fixed-sized prefix and because they
979    // inherit the alignments of their inner element type (which are identical)
980    // [2][3].
981    //
982    // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
983    // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
984    // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
985    // back-to-back [2][3].
986    //
987    // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
988    //
989    //   `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
990    //   `T`
991    //
992    // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
993    //
994    //   Slices have the same layout as the section of the array they slice.
995    //
996    // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
997    //
998    //   An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
999    //   alignment of `T`. Arrays are laid out so that the zero-based `nth`
1000    //   element of the array is offset from the start of the array by `n *
1001    //   size_of::<T>()` bytes.
1002    type MaybeUninit = [CoreMaybeUninit<T>];
1003
1004    const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
1005
1006    // SAFETY: `.cast` preserves address and provenance. The returned pointer
1007    // refers to an object with `elems` elements by construction.
1008    #[inline(always)]
1009    fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
1010        // FIXME(#67): Remove this allow. See NonNullExt for more details.
1011        #[allow(unstable_name_collisions)]
1012        NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
1013    }
1014
1015    #[inline(always)]
1016    fn pointer_to_metadata(ptr: *mut [T]) -> usize {
1017        #[allow(clippy::as_conversions)]
1018        let slc = ptr as *const [()];
1019
1020        // SAFETY:
1021        // - `()` has alignment 1, so `slc` is trivially aligned.
1022        // - `slc` was derived from a non-null pointer.
1023        // - The size is 0 regardless of the length, so it is sound to
1024        //   materialize a reference regardless of location.
1025        // - By invariant, `self.ptr` has valid provenance.
1026        let slc = unsafe { &*slc };
1027
1028        // This is correct because the preceding `as` cast preserves the number
1029        // of slice elements. [1]
1030        //
1031        // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
1032        //
1033        //   For slice types like `[T]` and `[U]`, the raw pointer types `*const
1034        //   [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
1035        //   elements in this slice. Casts between these raw pointer types
1036        //   preserve the number of elements. ... The same holds for `str` and
1037        //   any compound type whose unsized tail is a slice type, such as
1038        //   struct `Foo(i32, [u8])` or `(u64, Foo)`.
1039        slc.len()
1040    }
1041}
1042
1043#[rustfmt::skip]
1044impl_known_layout!(
1045    (),
1046    u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
1047    bool, char,
1048    NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
1049    NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
1050);
1051#[rustfmt::skip]
1052#[cfg(feature = "float-nightly")]
1053impl_known_layout!(
1054    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1055    f16,
1056    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1057    f128
1058);
1059#[rustfmt::skip]
1060impl_known_layout!(
1061    T         => Option<T>,
1062    T: ?Sized => PhantomData<T>,
1063    T         => Wrapping<T>,
1064    T         => CoreMaybeUninit<T>,
1065    T: ?Sized => *const T,
1066    T: ?Sized => *mut T,
1067    T: ?Sized => &'_ T,
1068    T: ?Sized => &'_ mut T,
1069);
1070impl_known_layout!(const N: usize, T => [T; N]);
1071
1072// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
1073// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as `T`.
1074//
1075// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
1076//
1077//   `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
1078//   `T`
1079//
1080// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
1081//
1082//   `UnsafeCell<T>` has the same in-memory representation as its inner type
1083//   `T`.
1084//
1085// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1086//
1087//   `Cell<T>` has the same in-memory representation as `T`.
1088#[allow(clippy::multiple_unsafe_ops_per_block)]
1089const _: () = unsafe {
1090    unsafe_impl_known_layout!(
1091        #[repr([u8])]
1092        str
1093    );
1094    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1095    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1096    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1097};
1098
1099// SAFETY:
1100// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and
1101//   `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same:
1102//   - Fixed prefix size
1103//   - Alignment
1104//   - (For DSTs) trailing slice element size
1105// - By consequence of the above, referents `T::MaybeUninit` and `T` have the
1106//   require the same kind of pointer metadata, and thus it is valid to perform
1107//   an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation
1108//   preserves referent size (ie, `size_of_val_raw`).
1109const _: () = unsafe {
1110    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>)
1111};
1112
1113// FIXME(#196, #2856): Eventually, we'll want to support enums variants and
1114// union fields being treated uniformly since they behave similarly to each
1115// other in terms of projecting validity – specifically, for a type `T` with
1116// validity `V`, if `T` is a struct type, then its fields straightforwardly also
1117// have validity `V`. By contrast, if `T` is an enum or union type, then
1118// validity is not straightforwardly recursive in this way.
1119#[doc(hidden)]
1120pub const STRUCT_VARIANT_ID: i128 = -1;
1121#[doc(hidden)]
1122pub const UNION_VARIANT_ID: i128 = -2;
1123#[doc(hidden)]
1124pub const REPR_C_UNION_VARIANT_ID: i128 = -3;
1125
1126/// # Safety
1127///
1128/// `Self::ProjectToTag` must satisfy its safety invariant.
1129#[doc(hidden)]
1130pub unsafe trait HasTag {
1131    fn only_derive_is_allowed_to_implement_this_trait()
1132    where
1133        Self: Sized;
1134
1135    /// The type's enum tag, or `()` for non-enum types.
1136    type Tag: Immutable;
1137
1138    /// A pointer projection from `Self` to its tag.
1139    ///
1140    /// # Safety
1141    ///
1142    /// It must be the case that, for all `slf: Ptr<'_, Self, I>`, it is sound
1143    /// to project from `slf` to `Ptr<'_, Self::Tag, I>` using this projection.
1144    type ProjectToTag: pointer::cast::Project<Self, Self::Tag>;
1145}
1146
1147/// Projects a given field from `Self`.
1148///
1149/// All implementations of `HasField` for a particular field `f` in `Self`
1150/// should use the same `Field` type; this ensures that `Field` is inferable
1151/// given an explicit `VARIANT_ID` and `FIELD_ID`.
1152///
1153/// # Safety
1154///
1155/// A field `f` is `HasField` for `Self` if and only if:
1156///
1157/// - If `Self` has the layout of a struct or union type, then `VARIANT_ID` is
1158///   `STRUCT_VARIANT_ID` or `UNION_VARIANT_ID` respectively; otherwise, if
1159///   `Self` has the layout of an enum type, `VARIANT_ID` is the numerical index
1160///   of the enum variant in which `f` appears. Note that `Self` does not need
1161///   to actually *be* such a type – it just needs to have the same layout as
1162///   such a type. For example, a `#[repr(transparent)]` wrapper around an enum
1163///   has the same layout as that enum.
1164/// - If `f` has name `n`, `FIELD_ID` is `zerocopy::ident_id!(n)`; otherwise,
1165///   if `f` is at index `i`, `FIELD_ID` is `zerocopy::ident_id!(i)`.
1166/// - `Field` is a type with the same visibility as `f`.
1167/// - `Type` has the same type as `f`.
1168///
1169/// The caller must **not** assume that a pointer's referent being aligned
1170/// implies that calling `project` on that pointer will result in a pointer to
1171/// an aligned referent. For example, `HasField` may be implemented for
1172/// `#[repr(packed)]` structs.
1173///
1174/// The implementation of `project` must satisfy its safety post-condition.
1175#[doc(hidden)]
1176pub unsafe trait HasField<Field, const VARIANT_ID: i128, const FIELD_ID: i128>:
1177    HasTag
1178{
1179    fn only_derive_is_allowed_to_implement_this_trait()
1180    where
1181        Self: Sized;
1182
1183    /// The type of the field.
1184    type Type: ?Sized;
1185
1186    /// Projects from `slf` to the field.
1187    ///
1188    /// Users should generally not call `project` directly, and instead should
1189    /// use high-level APIs like [`PtrInner::project`] or [`Ptr::project`].
1190    ///
1191    /// # Safety
1192    ///
1193    /// The returned pointer refers to a non-strict subset of the bytes of
1194    /// `slf`'s referent, and has the same provenance as `slf`.
1195    #[must_use]
1196    fn project(slf: PtrInner<'_, Self>) -> *mut Self::Type;
1197}
1198
1199/// Projects a given field from `Self`.
1200///
1201/// Implementations of this trait encode the conditions under which a field can
1202/// be projected from a `Ptr<'_, Self, I>`, and how the invariants of that
1203/// [`Ptr`] (`I`) determine the invariants of pointers projected from it. In
1204/// other words, it is a type-level function over invariants; `I` goes in,
1205/// `Self::Invariants` comes out.
1206///
1207/// # Safety
1208///
1209/// `T: ProjectField<Field, I, VARIANT_ID, FIELD_ID>` if, for a
1210/// `ptr: Ptr<'_, T, I>` such that `T::is_projectable(ptr).is_ok()`,
1211/// `<T as HasField<Field, VARIANT_ID, FIELD_ID>>::project(ptr.as_inner())`
1212/// conforms to `T::Invariants`.
1213#[doc(hidden)]
1214pub unsafe trait ProjectField<Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>:
1215    HasField<Field, VARIANT_ID, FIELD_ID>
1216where
1217    I: invariant::Invariants,
1218{
1219    fn only_derive_is_allowed_to_implement_this_trait()
1220    where
1221        Self: Sized;
1222
1223    /// The invariants of the projected field pointer, with respect to the
1224    /// invariants, `I`, of the containing pointer. The aliasing dimension of
1225    /// the invariants is guaranteed to remain unchanged.
1226    type Invariants: invariant::Invariants<Aliasing = I::Aliasing>;
1227
1228    /// The failure mode of projection. `()` if the projection is fallible,
1229    /// otherwise [`core::convert::Infallible`].
1230    type Error;
1231
1232    /// Is the given field projectable from `ptr`?
1233    ///
1234    /// If a field with [`Self::Invariants`] is projectable from the referent,
1235    /// this function produces an `Ok(ptr)` from which the projection can be
1236    /// made; otherwise `Err`.
1237    ///
1238    /// This method must be overriden if the field's projectability depends on
1239    /// the value of the bytes in `ptr`.
1240    #[inline(always)]
1241    fn is_projectable<'a>(_ptr: Ptr<'a, Self::Tag, I>) -> Result<(), Self::Error> {
1242        trait IsInfallible {
1243            const IS_INFALLIBLE: bool;
1244        }
1245
1246        struct Projection<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128>(
1247            PhantomData<(Field, I, T)>,
1248        )
1249        where
1250            T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1251            I: invariant::Invariants;
1252
1253        impl<T, Field, I, const VARIANT_ID: i128, const FIELD_ID: i128> IsInfallible
1254            for Projection<T, Field, I, VARIANT_ID, FIELD_ID>
1255        where
1256            T: ?Sized + HasField<Field, VARIANT_ID, FIELD_ID>,
1257            I: invariant::Invariants,
1258        {
1259            const IS_INFALLIBLE: bool = {
1260                let is_infallible = match VARIANT_ID {
1261                    // For nondestructive projections of struct and union
1262                    // fields, the projected field's satisfaction of
1263                    // `Invariants` does not depend on the value of the
1264                    // referent. This default implementation of `is_projectable`
1265                    // is non-destructive, as it does not overwrite any part of
1266                    // the referent.
1267                    crate::STRUCT_VARIANT_ID | crate::UNION_VARIANT_ID => true,
1268                    _enum_variant => {
1269                        use crate::invariant::{Validity, ValidityKind};
1270                        match I::Validity::KIND {
1271                            // The `Uninit` and `Initialized` validity
1272                            // invariants do not depend on the enum's tag. In
1273                            // particular, we don't actually care about what
1274                            // variant is present – we can treat *any* range of
1275                            // uninitialized or initialized memory as containing
1276                            // an uninitialized or initialized instance of *any*
1277                            // type – the type itself is irrelevant.
1278                            ValidityKind::Uninit | ValidityKind::Initialized => true,
1279                            // The projectability of an enum field from an
1280                            // `AsInitialized` or `Valid` state is a dynamic
1281                            // property of its tag.
1282                            ValidityKind::AsInitialized | ValidityKind::Valid => false,
1283                        }
1284                    }
1285                };
1286                const_assert!(is_infallible);
1287                is_infallible
1288            };
1289        }
1290
1291        const_assert!(
1292            <Projection<Self, Field, I, VARIANT_ID, FIELD_ID> as IsInfallible>::IS_INFALLIBLE
1293        );
1294
1295        Ok(())
1296    }
1297}
1298
1299/// Analyzes whether a type is [`FromZeros`].
1300///
1301/// This derive analyzes, at compile time, whether the annotated type satisfies
1302/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1303/// supertraits if it is sound to do so. This derive can be applied to structs,
1304/// enums, and unions; e.g.:
1305///
1306/// ```
1307/// # use zerocopy_derive::{FromZeros, Immutable};
1308/// #[derive(FromZeros)]
1309/// struct MyStruct {
1310/// # /*
1311///     ...
1312/// # */
1313/// }
1314///
1315/// #[derive(FromZeros)]
1316/// #[repr(u8)]
1317/// enum MyEnum {
1318/// #   Variant0,
1319/// # /*
1320///     ...
1321/// # */
1322/// }
1323///
1324/// #[derive(FromZeros, Immutable)]
1325/// union MyUnion {
1326/// #   variant: u8,
1327/// # /*
1328///     ...
1329/// # */
1330/// }
1331/// ```
1332///
1333/// [safety conditions]: trait@FromZeros#safety
1334///
1335/// # Analysis
1336///
1337/// *This section describes, roughly, the analysis performed by this derive to
1338/// determine whether it is sound to implement `FromZeros` for a given type.
1339/// Unless you are modifying the implementation of this derive, or attempting to
1340/// manually implement `FromZeros` for a type yourself, you don't need to read
1341/// this section.*
1342///
1343/// If a type has the following properties, then this derive can implement
1344/// `FromZeros` for that type:
1345///
1346/// - If the type is a struct, all of its fields must be `FromZeros`.
1347/// - If the type is an enum:
1348///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1349///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1350///   - It must have a variant with a discriminant/tag of `0`, and its fields
1351///     must be `FromZeros`. See [the reference] for a description of
1352///     discriminant values are specified.
1353///   - The fields of that variant must be `FromZeros`.
1354///
1355/// This analysis is subject to change. Unsafe code may *only* rely on the
1356/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1357/// implementation details of this derive.
1358///
1359/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1360///
1361/// ## Why isn't an explicit representation required for structs?
1362///
1363/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1364/// that structs are marked with `#[repr(C)]`.
1365///
1366/// Per the [Rust reference](reference),
1367///
1368/// > The representation of a type can change the padding between fields, but
1369/// > does not change the layout of the fields themselves.
1370///
1371/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1372///
1373/// Since the layout of structs only consists of padding bytes and field bytes,
1374/// a struct is soundly `FromZeros` if:
1375/// 1. its padding is soundly `FromZeros`, and
1376/// 2. its fields are soundly `FromZeros`.
1377///
1378/// The answer to the first question is always yes: padding bytes do not have
1379/// any validity constraints. A [discussion] of this question in the Unsafe Code
1380/// Guidelines Working Group concluded that it would be virtually unimaginable
1381/// for future versions of rustc to add validity constraints to padding bytes.
1382///
1383/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1384///
1385/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1386/// its fields are `FromZeros`.
1387// FIXME(#146): Document why we don't require an enum to have an explicit `repr`
1388// attribute.
1389#[cfg(any(feature = "derive", test))]
1390#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1391pub use zerocopy_derive::FromZeros;
1392/// Analyzes whether a type is [`Immutable`].
1393///
1394/// This derive analyzes, at compile time, whether the annotated type satisfies
1395/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1396/// sound to do so. This derive can be applied to structs, enums, and unions;
1397/// e.g.:
1398///
1399/// ```
1400/// # use zerocopy_derive::Immutable;
1401/// #[derive(Immutable)]
1402/// struct MyStruct {
1403/// # /*
1404///     ...
1405/// # */
1406/// }
1407///
1408/// #[derive(Immutable)]
1409/// enum MyEnum {
1410/// #   Variant0,
1411/// # /*
1412///     ...
1413/// # */
1414/// }
1415///
1416/// #[derive(Immutable)]
1417/// union MyUnion {
1418/// #   variant: u8,
1419/// # /*
1420///     ...
1421/// # */
1422/// }
1423/// ```
1424///
1425/// # Analysis
1426///
1427/// *This section describes, roughly, the analysis performed by this derive to
1428/// determine whether it is sound to implement `Immutable` for a given type.
1429/// Unless you are modifying the implementation of this derive, you don't need
1430/// to read this section.*
1431///
1432/// If a type has the following properties, then this derive can implement
1433/// `Immutable` for that type:
1434///
1435/// - All fields must be `Immutable`.
1436///
1437/// This analysis is subject to change. Unsafe code may *only* rely on the
1438/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1439/// implementation details of this derive.
1440///
1441/// [safety conditions]: trait@Immutable#safety
1442#[cfg(any(feature = "derive", test))]
1443#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1444pub use zerocopy_derive::Immutable;
1445
1446/// Types which are free from interior mutability.
1447///
1448/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1449/// by ownership or an exclusive (`&mut`) borrow.
1450///
1451/// # Implementation
1452///
1453/// **Do not implement this trait yourself!** Instead, use
1454/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1455/// e.g.:
1456///
1457/// ```
1458/// # use zerocopy_derive::Immutable;
1459/// #[derive(Immutable)]
1460/// struct MyStruct {
1461/// # /*
1462///     ...
1463/// # */
1464/// }
1465///
1466/// #[derive(Immutable)]
1467/// enum MyEnum {
1468/// # /*
1469///     ...
1470/// # */
1471/// }
1472///
1473/// #[derive(Immutable)]
1474/// union MyUnion {
1475/// #   variant: u8,
1476/// # /*
1477///     ...
1478/// # */
1479/// }
1480/// ```
1481///
1482/// This derive performs a sophisticated, compile-time safety analysis to
1483/// determine whether a type is `Immutable`.
1484///
1485/// # Safety
1486///
1487/// Unsafe code outside of this crate must not make any assumptions about `T`
1488/// based on `T: Immutable`. We reserve the right to relax the requirements for
1489/// `Immutable` in the future, and if unsafe code outside of this crate makes
1490/// assumptions based on `T: Immutable`, future relaxations may cause that code
1491/// to become unsound.
1492///
1493// # Safety (Internal)
1494//
1495// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1496// `t: &T`, `t` does not permit interior mutation of its referent. Because
1497// [`UnsafeCell`] is the only type which permits interior mutation, it is
1498// sufficient (though not necessary) to guarantee that `T` contains no
1499// `UnsafeCell`s.
1500//
1501// [`UnsafeCell`]: core::cell::UnsafeCell
1502#[cfg_attr(
1503    feature = "derive",
1504    doc = "[derive]: zerocopy_derive::Immutable",
1505    doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1506)]
1507#[cfg_attr(
1508    not(feature = "derive"),
1509    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1510    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1511)]
1512#[cfg_attr(
1513    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1514    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1515)]
1516pub unsafe trait Immutable {
1517    // The `Self: Sized` bound makes it so that `Immutable` is still object
1518    // safe.
1519    #[doc(hidden)]
1520    fn only_derive_is_allowed_to_implement_this_trait()
1521    where
1522        Self: Sized;
1523}
1524
1525/// Implements [`TryFromBytes`].
1526///
1527/// This derive synthesizes the runtime checks required to check whether a
1528/// sequence of initialized bytes corresponds to a valid instance of a type.
1529/// This derive can be applied to structs, enums, and unions; e.g.:
1530///
1531/// ```
1532/// # use zerocopy_derive::{TryFromBytes, Immutable};
1533/// #[derive(TryFromBytes)]
1534/// struct MyStruct {
1535/// # /*
1536///     ...
1537/// # */
1538/// }
1539///
1540/// #[derive(TryFromBytes)]
1541/// #[repr(u8)]
1542/// enum MyEnum {
1543/// #   V00,
1544/// # /*
1545///     ...
1546/// # */
1547/// }
1548///
1549/// #[derive(TryFromBytes, Immutable)]
1550/// union MyUnion {
1551/// #   variant: u8,
1552/// # /*
1553///     ...
1554/// # */
1555/// }
1556/// ```
1557///
1558/// # Portability
1559///
1560/// To ensure consistent endianness for enums with multi-byte representations,
1561/// explicitly specify and convert each discriminant using `.to_le()` or
1562/// `.to_be()`; e.g.:
1563///
1564/// ```
1565/// # use zerocopy_derive::TryFromBytes;
1566/// // `DataStoreVersion` is encoded in little-endian.
1567/// #[derive(TryFromBytes)]
1568/// #[repr(u32)]
1569/// pub enum DataStoreVersion {
1570///     /// Version 1 of the data store.
1571///     V1 = 9u32.to_le(),
1572///
1573///     /// Version 2 of the data store.
1574///     V2 = 10u32.to_le(),
1575/// }
1576/// ```
1577///
1578/// [safety conditions]: trait@TryFromBytes#safety
1579#[cfg(any(feature = "derive", test))]
1580#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1581pub use zerocopy_derive::TryFromBytes;
1582
1583/// Types for which some bit patterns are valid.
1584///
1585/// A memory region of the appropriate length which contains initialized bytes
1586/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1587/// bytes corresponds to a [*valid instance*] of that type. For example,
1588/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1589/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1590/// `1`.
1591///
1592/// # Implementation
1593///
1594/// **Do not implement this trait yourself!** Instead, use
1595/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1596///
1597/// ```
1598/// # use zerocopy_derive::{TryFromBytes, Immutable};
1599/// #[derive(TryFromBytes)]
1600/// struct MyStruct {
1601/// # /*
1602///     ...
1603/// # */
1604/// }
1605///
1606/// #[derive(TryFromBytes)]
1607/// #[repr(u8)]
1608/// enum MyEnum {
1609/// #   V00,
1610/// # /*
1611///     ...
1612/// # */
1613/// }
1614///
1615/// #[derive(TryFromBytes, Immutable)]
1616/// union MyUnion {
1617/// #   variant: u8,
1618/// # /*
1619///     ...
1620/// # */
1621/// }
1622/// ```
1623///
1624/// This derive ensures that the runtime check of whether bytes correspond to a
1625/// valid instance is sound. You **must** implement this trait via the derive.
1626///
1627/// # What is a "valid instance"?
1628///
1629/// In Rust, each type has *bit validity*, which refers to the set of bit
1630/// patterns which may appear in an instance of that type. It is impossible for
1631/// safe Rust code to produce values which violate bit validity (ie, values
1632/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1633/// invalid value, this is considered [undefined behavior].
1634///
1635/// Rust's bit validity rules are currently being decided, which means that some
1636/// types have three classes of bit patterns: those which are definitely valid,
1637/// and whose validity is documented in the language; those which may or may not
1638/// be considered valid at some point in the future; and those which are
1639/// definitely invalid.
1640///
1641/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1642/// be valid if its validity is a documented guarantee provided by the
1643/// language.
1644///
1645/// For most use cases, Rust's current guarantees align with programmers'
1646/// intuitions about what ought to be valid. As a result, zerocopy's
1647/// conservatism should not affect most users.
1648///
1649/// If you are negatively affected by lack of support for a particular type,
1650/// we encourage you to let us know by [filing an issue][github-repo].
1651///
1652/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1653///
1654/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1655/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1656/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1657/// IntoBytes`, there exist values of `t: T` such that
1658/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1659/// generally assume that values produced by `IntoBytes` will necessarily be
1660/// accepted as valid by `TryFromBytes`.
1661///
1662/// # Safety
1663///
1664/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1665/// or representation of `T`. It merely provides the ability to perform a
1666/// validity check at runtime via methods like [`try_ref_from_bytes`].
1667///
1668/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1669/// Future releases of zerocopy may make backwards-breaking changes to these
1670/// items, including changes that only affect soundness, which may cause code
1671/// which uses those items to silently become unsound.
1672///
1673/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1674/// [github-repo]: https://github.com/google/zerocopy
1675/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1676/// [*valid instance*]: #what-is-a-valid-instance
1677#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1678#[cfg_attr(
1679    not(feature = "derive"),
1680    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1681)]
1682#[cfg_attr(
1683    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1684    diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1685)]
1686pub unsafe trait TryFromBytes {
1687    // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1688    // safe.
1689    #[doc(hidden)]
1690    fn only_derive_is_allowed_to_implement_this_trait()
1691    where
1692        Self: Sized;
1693
1694    /// Does a given memory range contain a valid instance of `Self`?
1695    ///
1696    /// # Safety
1697    ///
1698    /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1699    /// `*candidate` contains a valid `Self`.
1700    ///
1701    /// # Panics
1702    ///
1703    /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1704    /// `unsafe` code remains sound even in the face of `is_bit_valid`
1705    /// panicking. (We support user-defined validation routines; so long as
1706    /// these routines are not required to be `unsafe`, there is no way to
1707    /// ensure that these do not generate panics.)
1708    ///
1709    /// Besides user-defined validation routines panicking, `is_bit_valid` will
1710    /// either panic or fail to compile if called on a pointer with [`Shared`]
1711    /// aliasing when `Self: !Immutable`.
1712    ///
1713    /// [`UnsafeCell`]: core::cell::UnsafeCell
1714    /// [`Shared`]: invariant::Shared
1715    #[doc(hidden)]
1716    fn is_bit_valid<A>(candidate: Maybe<'_, Self, A>) -> bool
1717    where
1718        A: invariant::Alignment;
1719
1720    /// Attempts to interpret the given `source` as a `&Self`.
1721    ///
1722    /// If the bytes of `source` are a valid instance of `Self`, this method
1723    /// returns a reference to those bytes interpreted as a `Self`. If the
1724    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1725    /// `source` is not appropriately aligned, or if `source` is not a valid
1726    /// instance of `Self`, this returns `Err`. If [`Self:
1727    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1728    /// error][ConvertError::from].
1729    ///
1730    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1731    ///
1732    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1733    /// [self-unaligned]: Unaligned
1734    /// [slice-dst]: KnownLayout#dynamically-sized-types
1735    ///
1736    /// # Compile-Time Assertions
1737    ///
1738    /// This method cannot yet be used on unsized types whose dynamically-sized
1739    /// component is zero-sized. Attempting to use this method on such types
1740    /// results in a compile-time assertion error; e.g.:
1741    ///
1742    /// ```compile_fail,E0080
1743    /// use zerocopy::*;
1744    /// # use zerocopy_derive::*;
1745    ///
1746    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1747    /// #[repr(C)]
1748    /// struct ZSTy {
1749    ///     leading_sized: u16,
1750    ///     trailing_dst: [()],
1751    /// }
1752    ///
1753    /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
1754    /// ```
1755    ///
1756    /// # Examples
1757    ///
1758    /// ```
1759    /// use zerocopy::TryFromBytes;
1760    /// # use zerocopy_derive::*;
1761    ///
1762    /// // The only valid value of this type is the byte `0xC0`
1763    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1764    /// #[repr(u8)]
1765    /// enum C0 { xC0 = 0xC0 }
1766    ///
1767    /// // The only valid value of this type is the byte sequence `0xC0C0`.
1768    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1769    /// #[repr(C)]
1770    /// struct C0C0(C0, C0);
1771    ///
1772    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1773    /// #[repr(C)]
1774    /// struct Packet {
1775    ///     magic_number: C0C0,
1776    ///     mug_size: u8,
1777    ///     temperature: u8,
1778    ///     marshmallows: [[u8; 2]],
1779    /// }
1780    ///
1781    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1782    ///
1783    /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1784    ///
1785    /// assert_eq!(packet.mug_size, 240);
1786    /// assert_eq!(packet.temperature, 77);
1787    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1788    ///
1789    /// // These bytes are not valid instance of `Packet`.
1790    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1791    /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1792    /// ```
1793    ///
1794    #[doc = codegen_section!(
1795        header = "h5",
1796        bench = "try_ref_from_bytes",
1797        format = "coco",
1798        arity = 3,
1799        [
1800            open
1801            @index 1
1802            @title "Sized"
1803            @variant "static_size"
1804        ],
1805        [
1806            @index 2
1807            @title "Unsized"
1808            @variant "dynamic_size"
1809        ],
1810        [
1811            @index 3
1812            @title "Dynamically Padded"
1813            @variant "dynamic_padding"
1814        ]
1815    )]
1816    #[must_use = "has no side effects"]
1817    #[inline]
1818    fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1819    where
1820        Self: KnownLayout + Immutable,
1821    {
1822        static_assert_dst_is_not_zst!(Self);
1823        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1824            Ok(source) => {
1825                // This call may panic. If that happens, it doesn't cause any soundness
1826                // issues, as we have not generated any invalid state which we need to
1827                // fix before returning.
1828                match source.try_into_valid() {
1829                    Ok(valid) => Ok(valid.as_ref()),
1830                    Err(e) => {
1831                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1832                    }
1833                }
1834            }
1835            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1836        }
1837    }
1838
1839    /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1840    ///
1841    /// This method computes the [largest possible size of `Self`][valid-size]
1842    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1843    /// instance of `Self`, this method returns a reference to those bytes
1844    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1845    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1846    /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1847    /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1848    /// alignment error][ConvertError::from].
1849    ///
1850    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1851    ///
1852    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1853    /// [self-unaligned]: Unaligned
1854    /// [slice-dst]: KnownLayout#dynamically-sized-types
1855    ///
1856    /// # Compile-Time Assertions
1857    ///
1858    /// This method cannot yet be used on unsized types whose dynamically-sized
1859    /// component is zero-sized. Attempting to use this method on such types
1860    /// results in a compile-time assertion error; e.g.:
1861    ///
1862    /// ```compile_fail,E0080
1863    /// use zerocopy::*;
1864    /// # use zerocopy_derive::*;
1865    ///
1866    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1867    /// #[repr(C)]
1868    /// struct ZSTy {
1869    ///     leading_sized: u16,
1870    ///     trailing_dst: [()],
1871    /// }
1872    ///
1873    /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
1874    /// ```
1875    ///
1876    /// # Examples
1877    ///
1878    /// ```
1879    /// use zerocopy::TryFromBytes;
1880    /// # use zerocopy_derive::*;
1881    ///
1882    /// // The only valid value of this type is the byte `0xC0`
1883    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1884    /// #[repr(u8)]
1885    /// enum C0 { xC0 = 0xC0 }
1886    ///
1887    /// // The only valid value of this type is the bytes `0xC0C0`.
1888    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1889    /// #[repr(C)]
1890    /// struct C0C0(C0, C0);
1891    ///
1892    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1893    /// #[repr(C)]
1894    /// struct Packet {
1895    ///     magic_number: C0C0,
1896    ///     mug_size: u8,
1897    ///     temperature: u8,
1898    ///     marshmallows: [[u8; 2]],
1899    /// }
1900    ///
1901    /// // These are more bytes than are needed to encode a `Packet`.
1902    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1903    ///
1904    /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1905    ///
1906    /// assert_eq!(packet.mug_size, 240);
1907    /// assert_eq!(packet.temperature, 77);
1908    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1909    /// assert_eq!(suffix, &[6u8][..]);
1910    ///
1911    /// // These bytes are not valid instance of `Packet`.
1912    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1913    /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1914    /// ```
1915    ///
1916    #[doc = codegen_section!(
1917        header = "h5",
1918        bench = "try_ref_from_prefix",
1919        format = "coco",
1920        arity = 3,
1921        [
1922            open
1923            @index 1
1924            @title "Sized"
1925            @variant "static_size"
1926        ],
1927        [
1928            @index 2
1929            @title "Unsized"
1930            @variant "dynamic_size"
1931        ],
1932        [
1933            @index 3
1934            @title "Dynamically Padded"
1935            @variant "dynamic_padding"
1936        ]
1937    )]
1938    #[must_use = "has no side effects"]
1939    #[inline]
1940    fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1941    where
1942        Self: KnownLayout + Immutable,
1943    {
1944        static_assert_dst_is_not_zst!(Self);
1945        try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1946    }
1947
1948    /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1949    ///
1950    /// This method computes the [largest possible size of `Self`][valid-size]
1951    /// that can fit in the trailing bytes of `source`. If that suffix is a
1952    /// valid instance of `Self`, this method returns a reference to those bytes
1953    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1954    /// are insufficient bytes, or if the suffix of `source` would not be
1955    /// appropriately aligned, or if the suffix is not a valid instance of
1956    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1957    /// can [infallibly discard the alignment error][ConvertError::from].
1958    ///
1959    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1960    ///
1961    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1962    /// [self-unaligned]: Unaligned
1963    /// [slice-dst]: KnownLayout#dynamically-sized-types
1964    ///
1965    /// # Compile-Time Assertions
1966    ///
1967    /// This method cannot yet be used on unsized types whose dynamically-sized
1968    /// component is zero-sized. Attempting to use this method on such types
1969    /// results in a compile-time assertion error; e.g.:
1970    ///
1971    /// ```compile_fail,E0080
1972    /// use zerocopy::*;
1973    /// # use zerocopy_derive::*;
1974    ///
1975    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1976    /// #[repr(C)]
1977    /// struct ZSTy {
1978    ///     leading_sized: u16,
1979    ///     trailing_dst: [()],
1980    /// }
1981    ///
1982    /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
1983    /// ```
1984    ///
1985    /// # Examples
1986    ///
1987    /// ```
1988    /// use zerocopy::TryFromBytes;
1989    /// # use zerocopy_derive::*;
1990    ///
1991    /// // The only valid value of this type is the byte `0xC0`
1992    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1993    /// #[repr(u8)]
1994    /// enum C0 { xC0 = 0xC0 }
1995    ///
1996    /// // The only valid value of this type is the bytes `0xC0C0`.
1997    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1998    /// #[repr(C)]
1999    /// struct C0C0(C0, C0);
2000    ///
2001    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2002    /// #[repr(C)]
2003    /// struct Packet {
2004    ///     magic_number: C0C0,
2005    ///     mug_size: u8,
2006    ///     temperature: u8,
2007    ///     marshmallows: [[u8; 2]],
2008    /// }
2009    ///
2010    /// // These are more bytes than are needed to encode a `Packet`.
2011    /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2012    ///
2013    /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
2014    ///
2015    /// assert_eq!(packet.mug_size, 240);
2016    /// assert_eq!(packet.temperature, 77);
2017    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2018    /// assert_eq!(prefix, &[0u8][..]);
2019    ///
2020    /// // These bytes are not valid instance of `Packet`.
2021    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2022    /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
2023    /// ```
2024    ///
2025    #[doc = codegen_section!(
2026        header = "h5",
2027        bench = "try_ref_from_suffix",
2028        format = "coco",
2029        arity = 3,
2030        [
2031            open
2032            @index 1
2033            @title "Sized"
2034            @variant "static_size"
2035        ],
2036        [
2037            @index 2
2038            @title "Unsized"
2039            @variant "dynamic_size"
2040        ],
2041        [
2042            @index 3
2043            @title "Dynamically Padded"
2044            @variant "dynamic_padding"
2045        ]
2046    )]
2047    #[must_use = "has no side effects"]
2048    #[inline]
2049    fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2050    where
2051        Self: KnownLayout + Immutable,
2052    {
2053        static_assert_dst_is_not_zst!(Self);
2054        try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2055    }
2056
2057    /// Attempts to interpret the given `source` as a `&mut Self` without
2058    /// copying.
2059    ///
2060    /// If the bytes of `source` are a valid instance of `Self`, this method
2061    /// returns a reference to those bytes interpreted as a `Self`. If the
2062    /// length of `source` is not a [valid size of `Self`][valid-size], or if
2063    /// `source` is not appropriately aligned, or if `source` is not a valid
2064    /// instance of `Self`, this returns `Err`. If [`Self:
2065    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2066    /// error][ConvertError::from].
2067    ///
2068    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2069    ///
2070    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2071    /// [self-unaligned]: Unaligned
2072    /// [slice-dst]: KnownLayout#dynamically-sized-types
2073    ///
2074    /// # Compile-Time Assertions
2075    ///
2076    /// This method cannot yet be used on unsized types whose dynamically-sized
2077    /// component is zero-sized. Attempting to use this method on such types
2078    /// results in a compile-time assertion error; e.g.:
2079    ///
2080    /// ```compile_fail,E0080
2081    /// use zerocopy::*;
2082    /// # use zerocopy_derive::*;
2083    ///
2084    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2085    /// #[repr(C, packed)]
2086    /// struct ZSTy {
2087    ///     leading_sized: [u8; 2],
2088    ///     trailing_dst: [()],
2089    /// }
2090    ///
2091    /// let mut source = [85, 85];
2092    /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš  Compile Error!
2093    /// ```
2094    ///
2095    /// # Examples
2096    ///
2097    /// ```
2098    /// use zerocopy::TryFromBytes;
2099    /// # use zerocopy_derive::*;
2100    ///
2101    /// // The only valid value of this type is the byte `0xC0`
2102    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2103    /// #[repr(u8)]
2104    /// enum C0 { xC0 = 0xC0 }
2105    ///
2106    /// // The only valid value of this type is the bytes `0xC0C0`.
2107    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2108    /// #[repr(C)]
2109    /// struct C0C0(C0, C0);
2110    ///
2111    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2112    /// #[repr(C, packed)]
2113    /// struct Packet {
2114    ///     magic_number: C0C0,
2115    ///     mug_size: u8,
2116    ///     temperature: u8,
2117    ///     marshmallows: [[u8; 2]],
2118    /// }
2119    ///
2120    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
2121    ///
2122    /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
2123    ///
2124    /// assert_eq!(packet.mug_size, 240);
2125    /// assert_eq!(packet.temperature, 77);
2126    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2127    ///
2128    /// packet.temperature = 111;
2129    ///
2130    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
2131    ///
2132    /// // These bytes are not valid instance of `Packet`.
2133    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2134    /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
2135    /// ```
2136    ///
2137    #[doc = codegen_header!("h5", "try_mut_from_bytes")]
2138    ///
2139    /// See [`TryFromBytes::try_ref_from_bytes`](#method.try_ref_from_bytes.codegen).
2140    #[must_use = "has no side effects"]
2141    #[inline]
2142    fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2143    where
2144        Self: KnownLayout + IntoBytes,
2145    {
2146        static_assert_dst_is_not_zst!(Self);
2147        match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
2148            Ok(source) => {
2149                // This call may panic. If that happens, it doesn't cause any soundness
2150                // issues, as we have not generated any invalid state which we need to
2151                // fix before returning.
2152                match source.try_into_valid() {
2153                    Ok(source) => Ok(source.as_mut()),
2154                    Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2155                }
2156            }
2157            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2158        }
2159    }
2160
2161    /// Attempts to interpret the prefix of the given `source` as a `&mut
2162    /// Self`.
2163    ///
2164    /// This method computes the [largest possible size of `Self`][valid-size]
2165    /// that can fit in the leading bytes of `source`. If that prefix is a valid
2166    /// instance of `Self`, this method returns a reference to those bytes
2167    /// interpreted as `Self`, and a reference to the remaining bytes. If there
2168    /// are insufficient bytes, or if `source` is not appropriately aligned, or
2169    /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
2170    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
2171    /// alignment error][ConvertError::from].
2172    ///
2173    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2174    ///
2175    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2176    /// [self-unaligned]: Unaligned
2177    /// [slice-dst]: KnownLayout#dynamically-sized-types
2178    ///
2179    /// # Compile-Time Assertions
2180    ///
2181    /// This method cannot yet be used on unsized types whose dynamically-sized
2182    /// component is zero-sized. Attempting to use this method on such types
2183    /// results in a compile-time assertion error; e.g.:
2184    ///
2185    /// ```compile_fail,E0080
2186    /// use zerocopy::*;
2187    /// # use zerocopy_derive::*;
2188    ///
2189    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2190    /// #[repr(C, packed)]
2191    /// struct ZSTy {
2192    ///     leading_sized: [u8; 2],
2193    ///     trailing_dst: [()],
2194    /// }
2195    ///
2196    /// let mut source = [85, 85];
2197    /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš  Compile Error!
2198    /// ```
2199    ///
2200    /// # Examples
2201    ///
2202    /// ```
2203    /// use zerocopy::TryFromBytes;
2204    /// # use zerocopy_derive::*;
2205    ///
2206    /// // The only valid value of this type is the byte `0xC0`
2207    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2208    /// #[repr(u8)]
2209    /// enum C0 { xC0 = 0xC0 }
2210    ///
2211    /// // The only valid value of this type is the bytes `0xC0C0`.
2212    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2213    /// #[repr(C)]
2214    /// struct C0C0(C0, C0);
2215    ///
2216    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2217    /// #[repr(C, packed)]
2218    /// struct Packet {
2219    ///     magic_number: C0C0,
2220    ///     mug_size: u8,
2221    ///     temperature: u8,
2222    ///     marshmallows: [[u8; 2]],
2223    /// }
2224    ///
2225    /// // These are more bytes than are needed to encode a `Packet`.
2226    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2227    ///
2228    /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
2229    ///
2230    /// assert_eq!(packet.mug_size, 240);
2231    /// assert_eq!(packet.temperature, 77);
2232    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2233    /// assert_eq!(suffix, &[6u8][..]);
2234    ///
2235    /// packet.temperature = 111;
2236    /// suffix[0] = 222;
2237    ///
2238    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
2239    ///
2240    /// // These bytes are not valid instance of `Packet`.
2241    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2242    /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
2243    /// ```
2244    ///
2245    #[doc = codegen_header!("h5", "try_mut_from_prefix")]
2246    ///
2247    /// See [`TryFromBytes::try_ref_from_prefix`](#method.try_ref_from_prefix.codegen).
2248    #[must_use = "has no side effects"]
2249    #[inline]
2250    fn try_mut_from_prefix(
2251        source: &mut [u8],
2252    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2253    where
2254        Self: KnownLayout + IntoBytes,
2255    {
2256        static_assert_dst_is_not_zst!(Self);
2257        try_mut_from_prefix_suffix(source, CastType::Prefix, None)
2258    }
2259
2260    /// Attempts to interpret the suffix of the given `source` as a `&mut
2261    /// Self`.
2262    ///
2263    /// This method computes the [largest possible size of `Self`][valid-size]
2264    /// that can fit in the trailing bytes of `source`. If that suffix is a
2265    /// valid instance of `Self`, this method returns a reference to those bytes
2266    /// interpreted as `Self`, and a reference to the preceding bytes. If there
2267    /// are insufficient bytes, or if the suffix of `source` would not be
2268    /// appropriately aligned, or if the suffix is not a valid instance of
2269    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
2270    /// can [infallibly discard the alignment error][ConvertError::from].
2271    ///
2272    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2273    ///
2274    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2275    /// [self-unaligned]: Unaligned
2276    /// [slice-dst]: KnownLayout#dynamically-sized-types
2277    ///
2278    /// # Compile-Time Assertions
2279    ///
2280    /// This method cannot yet be used on unsized types whose dynamically-sized
2281    /// component is zero-sized. Attempting to use this method on such types
2282    /// results in a compile-time assertion error; e.g.:
2283    ///
2284    /// ```compile_fail,E0080
2285    /// use zerocopy::*;
2286    /// # use zerocopy_derive::*;
2287    ///
2288    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2289    /// #[repr(C, packed)]
2290    /// struct ZSTy {
2291    ///     leading_sized: u16,
2292    ///     trailing_dst: [()],
2293    /// }
2294    ///
2295    /// let mut source = [85, 85];
2296    /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš  Compile Error!
2297    /// ```
2298    ///
2299    /// # Examples
2300    ///
2301    /// ```
2302    /// use zerocopy::TryFromBytes;
2303    /// # use zerocopy_derive::*;
2304    ///
2305    /// // The only valid value of this type is the byte `0xC0`
2306    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2307    /// #[repr(u8)]
2308    /// enum C0 { xC0 = 0xC0 }
2309    ///
2310    /// // The only valid value of this type is the bytes `0xC0C0`.
2311    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2312    /// #[repr(C)]
2313    /// struct C0C0(C0, C0);
2314    ///
2315    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2316    /// #[repr(C, packed)]
2317    /// struct Packet {
2318    ///     magic_number: C0C0,
2319    ///     mug_size: u8,
2320    ///     temperature: u8,
2321    ///     marshmallows: [[u8; 2]],
2322    /// }
2323    ///
2324    /// // These are more bytes than are needed to encode a `Packet`.
2325    /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2326    ///
2327    /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
2328    ///
2329    /// assert_eq!(packet.mug_size, 240);
2330    /// assert_eq!(packet.temperature, 77);
2331    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2332    /// assert_eq!(prefix, &[0u8][..]);
2333    ///
2334    /// prefix[0] = 111;
2335    /// packet.temperature = 222;
2336    ///
2337    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2338    ///
2339    /// // These bytes are not valid instance of `Packet`.
2340    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2341    /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2342    /// ```
2343    ///
2344    #[doc = codegen_header!("h5", "try_mut_from_suffix")]
2345    ///
2346    /// See [`TryFromBytes::try_ref_from_suffix`](#method.try_ref_from_suffix.codegen).
2347    #[must_use = "has no side effects"]
2348    #[inline]
2349    fn try_mut_from_suffix(
2350        source: &mut [u8],
2351    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2352    where
2353        Self: KnownLayout + IntoBytes,
2354    {
2355        static_assert_dst_is_not_zst!(Self);
2356        try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2357    }
2358
2359    /// Attempts to interpret the given `source` as a `&Self` with a DST length
2360    /// equal to `count`.
2361    ///
2362    /// This method attempts to return a reference to `source` interpreted as a
2363    /// `Self` with `count` trailing elements. If the length of `source` is not
2364    /// equal to the size of `Self` with `count` elements, if `source` is not
2365    /// appropriately aligned, or if `source` does not contain a valid instance
2366    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2367    /// you can [infallibly discard the alignment error][ConvertError::from].
2368    ///
2369    /// [self-unaligned]: Unaligned
2370    /// [slice-dst]: KnownLayout#dynamically-sized-types
2371    ///
2372    /// # Examples
2373    ///
2374    /// ```
2375    /// # #![allow(non_camel_case_types)] // For C0::xC0
2376    /// use zerocopy::TryFromBytes;
2377    /// # use zerocopy_derive::*;
2378    ///
2379    /// // The only valid value of this type is the byte `0xC0`
2380    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2381    /// #[repr(u8)]
2382    /// enum C0 { xC0 = 0xC0 }
2383    ///
2384    /// // The only valid value of this type is the bytes `0xC0C0`.
2385    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2386    /// #[repr(C)]
2387    /// struct C0C0(C0, C0);
2388    ///
2389    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2390    /// #[repr(C)]
2391    /// struct Packet {
2392    ///     magic_number: C0C0,
2393    ///     mug_size: u8,
2394    ///     temperature: u8,
2395    ///     marshmallows: [[u8; 2]],
2396    /// }
2397    ///
2398    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2399    ///
2400    /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2401    ///
2402    /// assert_eq!(packet.mug_size, 240);
2403    /// assert_eq!(packet.temperature, 77);
2404    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2405    ///
2406    /// // These bytes are not valid instance of `Packet`.
2407    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2408    /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2409    /// ```
2410    ///
2411    /// Since an explicit `count` is provided, this method supports types with
2412    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2413    /// which do not take an explicit count do not support such types.
2414    ///
2415    /// ```
2416    /// use core::num::NonZeroU16;
2417    /// use zerocopy::*;
2418    /// # use zerocopy_derive::*;
2419    ///
2420    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2421    /// #[repr(C)]
2422    /// struct ZSTy {
2423    ///     leading_sized: NonZeroU16,
2424    ///     trailing_dst: [()],
2425    /// }
2426    ///
2427    /// let src = 0xCAFEu16.as_bytes();
2428    /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2429    /// assert_eq!(zsty.trailing_dst.len(), 42);
2430    /// ```
2431    ///
2432    /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2433    ///
2434    #[doc = codegen_section!(
2435        header = "h5",
2436        bench = "try_ref_from_bytes_with_elems",
2437        format = "coco",
2438        arity = 2,
2439        [
2440            open
2441            @index 1
2442            @title "Unsized"
2443            @variant "dynamic_size"
2444        ],
2445        [
2446            @index 2
2447            @title "Dynamically Padded"
2448            @variant "dynamic_padding"
2449        ]
2450    )]
2451    #[must_use = "has no side effects"]
2452    #[inline]
2453    fn try_ref_from_bytes_with_elems(
2454        source: &[u8],
2455        count: usize,
2456    ) -> Result<&Self, TryCastError<&[u8], Self>>
2457    where
2458        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2459    {
2460        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2461        {
2462            Ok(source) => {
2463                // This call may panic. If that happens, it doesn't cause any soundness
2464                // issues, as we have not generated any invalid state which we need to
2465                // fix before returning.
2466                match source.try_into_valid() {
2467                    Ok(source) => Ok(source.as_ref()),
2468                    Err(e) => {
2469                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2470                    }
2471                }
2472            }
2473            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2474        }
2475    }
2476
2477    /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2478    /// a DST length equal to `count`.
2479    ///
2480    /// This method attempts to return a reference to the prefix of `source`
2481    /// interpreted as a `Self` with `count` trailing elements, and a reference
2482    /// to the remaining bytes. If the length of `source` is less than the size
2483    /// of `Self` with `count` elements, if `source` is not appropriately
2484    /// aligned, or if the prefix of `source` does not contain a valid instance
2485    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2486    /// you can [infallibly discard the alignment error][ConvertError::from].
2487    ///
2488    /// [self-unaligned]: Unaligned
2489    /// [slice-dst]: KnownLayout#dynamically-sized-types
2490    ///
2491    /// # Examples
2492    ///
2493    /// ```
2494    /// # #![allow(non_camel_case_types)] // For C0::xC0
2495    /// use zerocopy::TryFromBytes;
2496    /// # use zerocopy_derive::*;
2497    ///
2498    /// // The only valid value of this type is the byte `0xC0`
2499    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2500    /// #[repr(u8)]
2501    /// enum C0 { xC0 = 0xC0 }
2502    ///
2503    /// // The only valid value of this type is the bytes `0xC0C0`.
2504    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2505    /// #[repr(C)]
2506    /// struct C0C0(C0, C0);
2507    ///
2508    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2509    /// #[repr(C)]
2510    /// struct Packet {
2511    ///     magic_number: C0C0,
2512    ///     mug_size: u8,
2513    ///     temperature: u8,
2514    ///     marshmallows: [[u8; 2]],
2515    /// }
2516    ///
2517    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2518    ///
2519    /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2520    ///
2521    /// assert_eq!(packet.mug_size, 240);
2522    /// assert_eq!(packet.temperature, 77);
2523    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2524    /// assert_eq!(suffix, &[8u8][..]);
2525    ///
2526    /// // These bytes are not valid instance of `Packet`.
2527    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2528    /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2529    /// ```
2530    ///
2531    /// Since an explicit `count` is provided, this method supports types with
2532    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2533    /// which do not take an explicit count do not support such types.
2534    ///
2535    /// ```
2536    /// use core::num::NonZeroU16;
2537    /// use zerocopy::*;
2538    /// # use zerocopy_derive::*;
2539    ///
2540    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2541    /// #[repr(C)]
2542    /// struct ZSTy {
2543    ///     leading_sized: NonZeroU16,
2544    ///     trailing_dst: [()],
2545    /// }
2546    ///
2547    /// let src = 0xCAFEu16.as_bytes();
2548    /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2549    /// assert_eq!(zsty.trailing_dst.len(), 42);
2550    /// ```
2551    ///
2552    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2553    ///
2554    #[doc = codegen_section!(
2555        header = "h5",
2556        bench = "try_ref_from_prefix_with_elems",
2557        format = "coco",
2558        arity = 2,
2559        [
2560            open
2561            @index 1
2562            @title "Unsized"
2563            @variant "dynamic_size"
2564        ],
2565        [
2566            @index 2
2567            @title "Dynamically Padded"
2568            @variant "dynamic_padding"
2569        ]
2570    )]
2571    #[must_use = "has no side effects"]
2572    #[inline]
2573    fn try_ref_from_prefix_with_elems(
2574        source: &[u8],
2575        count: usize,
2576    ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2577    where
2578        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2579    {
2580        try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2581    }
2582
2583    /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2584    /// a DST length equal to `count`.
2585    ///
2586    /// This method attempts to return a reference to the suffix of `source`
2587    /// interpreted as a `Self` with `count` trailing elements, and a reference
2588    /// to the preceding bytes. If the length of `source` is less than the size
2589    /// of `Self` with `count` elements, if the suffix of `source` is not
2590    /// appropriately aligned, or if the suffix of `source` does not contain a
2591    /// valid instance of `Self`, this returns `Err`. If [`Self:
2592    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2593    /// error][ConvertError::from].
2594    ///
2595    /// [self-unaligned]: Unaligned
2596    /// [slice-dst]: KnownLayout#dynamically-sized-types
2597    ///
2598    /// # Examples
2599    ///
2600    /// ```
2601    /// # #![allow(non_camel_case_types)] // For C0::xC0
2602    /// use zerocopy::TryFromBytes;
2603    /// # use zerocopy_derive::*;
2604    ///
2605    /// // The only valid value of this type is the byte `0xC0`
2606    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2607    /// #[repr(u8)]
2608    /// enum C0 { xC0 = 0xC0 }
2609    ///
2610    /// // The only valid value of this type is the bytes `0xC0C0`.
2611    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2612    /// #[repr(C)]
2613    /// struct C0C0(C0, C0);
2614    ///
2615    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2616    /// #[repr(C)]
2617    /// struct Packet {
2618    ///     magic_number: C0C0,
2619    ///     mug_size: u8,
2620    ///     temperature: u8,
2621    ///     marshmallows: [[u8; 2]],
2622    /// }
2623    ///
2624    /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2625    ///
2626    /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2627    ///
2628    /// assert_eq!(packet.mug_size, 240);
2629    /// assert_eq!(packet.temperature, 77);
2630    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2631    /// assert_eq!(prefix, &[123u8][..]);
2632    ///
2633    /// // These bytes are not valid instance of `Packet`.
2634    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2635    /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2636    /// ```
2637    ///
2638    /// Since an explicit `count` is provided, this method supports types with
2639    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2640    /// which do not take an explicit count do not support such types.
2641    ///
2642    /// ```
2643    /// use core::num::NonZeroU16;
2644    /// use zerocopy::*;
2645    /// # use zerocopy_derive::*;
2646    ///
2647    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2648    /// #[repr(C)]
2649    /// struct ZSTy {
2650    ///     leading_sized: NonZeroU16,
2651    ///     trailing_dst: [()],
2652    /// }
2653    ///
2654    /// let src = 0xCAFEu16.as_bytes();
2655    /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2656    /// assert_eq!(zsty.trailing_dst.len(), 42);
2657    /// ```
2658    ///
2659    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2660    ///
2661    #[doc = codegen_section!(
2662        header = "h5",
2663        bench = "try_ref_from_suffix_with_elems",
2664        format = "coco",
2665        arity = 2,
2666        [
2667            open
2668            @index 1
2669            @title "Unsized"
2670            @variant "dynamic_size"
2671        ],
2672        [
2673            @index 2
2674            @title "Dynamically Padded"
2675            @variant "dynamic_padding"
2676        ]
2677    )]
2678    #[must_use = "has no side effects"]
2679    #[inline]
2680    fn try_ref_from_suffix_with_elems(
2681        source: &[u8],
2682        count: usize,
2683    ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2684    where
2685        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2686    {
2687        try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2688    }
2689
2690    /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2691    /// length equal to `count`.
2692    ///
2693    /// This method attempts to return a reference to `source` interpreted as a
2694    /// `Self` with `count` trailing elements. If the length of `source` is not
2695    /// equal to the size of `Self` with `count` elements, if `source` is not
2696    /// appropriately aligned, or if `source` does not contain a valid instance
2697    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2698    /// you can [infallibly discard the alignment error][ConvertError::from].
2699    ///
2700    /// [self-unaligned]: Unaligned
2701    /// [slice-dst]: KnownLayout#dynamically-sized-types
2702    ///
2703    /// # Examples
2704    ///
2705    /// ```
2706    /// # #![allow(non_camel_case_types)] // For C0::xC0
2707    /// use zerocopy::TryFromBytes;
2708    /// # use zerocopy_derive::*;
2709    ///
2710    /// // The only valid value of this type is the byte `0xC0`
2711    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2712    /// #[repr(u8)]
2713    /// enum C0 { xC0 = 0xC0 }
2714    ///
2715    /// // The only valid value of this type is the bytes `0xC0C0`.
2716    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2717    /// #[repr(C)]
2718    /// struct C0C0(C0, C0);
2719    ///
2720    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2721    /// #[repr(C, packed)]
2722    /// struct Packet {
2723    ///     magic_number: C0C0,
2724    ///     mug_size: u8,
2725    ///     temperature: u8,
2726    ///     marshmallows: [[u8; 2]],
2727    /// }
2728    ///
2729    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2730    ///
2731    /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2732    ///
2733    /// assert_eq!(packet.mug_size, 240);
2734    /// assert_eq!(packet.temperature, 77);
2735    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2736    ///
2737    /// packet.temperature = 111;
2738    ///
2739    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2740    ///
2741    /// // These bytes are not valid instance of `Packet`.
2742    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2743    /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2744    /// ```
2745    ///
2746    /// Since an explicit `count` is provided, this method supports types with
2747    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2748    /// which do not take an explicit count do not support such types.
2749    ///
2750    /// ```
2751    /// use core::num::NonZeroU16;
2752    /// use zerocopy::*;
2753    /// # use zerocopy_derive::*;
2754    ///
2755    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2756    /// #[repr(C, packed)]
2757    /// struct ZSTy {
2758    ///     leading_sized: NonZeroU16,
2759    ///     trailing_dst: [()],
2760    /// }
2761    ///
2762    /// let mut src = 0xCAFEu16;
2763    /// let src = src.as_mut_bytes();
2764    /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2765    /// assert_eq!(zsty.trailing_dst.len(), 42);
2766    /// ```
2767    ///
2768    /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2769    ///  
2770    #[doc = codegen_header!("h5", "try_mut_from_bytes_with_elems")]
2771    ///
2772    /// See [`TryFromBytes::try_ref_from_bytes_with_elems`](#method.try_ref_from_bytes_with_elems.codegen).
2773    #[must_use = "has no side effects"]
2774    #[inline]
2775    fn try_mut_from_bytes_with_elems(
2776        source: &mut [u8],
2777        count: usize,
2778    ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2779    where
2780        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2781    {
2782        match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2783        {
2784            Ok(source) => {
2785                // This call may panic. If that happens, it doesn't cause any soundness
2786                // issues, as we have not generated any invalid state which we need to
2787                // fix before returning.
2788                match source.try_into_valid() {
2789                    Ok(source) => Ok(source.as_mut()),
2790                    Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
2791                }
2792            }
2793            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2794        }
2795    }
2796
2797    /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2798    /// with a DST length equal to `count`.
2799    ///
2800    /// This method attempts to return a reference to the prefix of `source`
2801    /// interpreted as a `Self` with `count` trailing elements, and a reference
2802    /// to the remaining bytes. If the length of `source` is less than the size
2803    /// of `Self` with `count` elements, if `source` is not appropriately
2804    /// aligned, or if the prefix of `source` does not contain a valid instance
2805    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2806    /// you can [infallibly discard the alignment error][ConvertError::from].
2807    ///
2808    /// [self-unaligned]: Unaligned
2809    /// [slice-dst]: KnownLayout#dynamically-sized-types
2810    ///
2811    /// # Examples
2812    ///
2813    /// ```
2814    /// # #![allow(non_camel_case_types)] // For C0::xC0
2815    /// use zerocopy::TryFromBytes;
2816    /// # use zerocopy_derive::*;
2817    ///
2818    /// // The only valid value of this type is the byte `0xC0`
2819    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2820    /// #[repr(u8)]
2821    /// enum C0 { xC0 = 0xC0 }
2822    ///
2823    /// // The only valid value of this type is the bytes `0xC0C0`.
2824    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2825    /// #[repr(C)]
2826    /// struct C0C0(C0, C0);
2827    ///
2828    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2829    /// #[repr(C, packed)]
2830    /// struct Packet {
2831    ///     magic_number: C0C0,
2832    ///     mug_size: u8,
2833    ///     temperature: u8,
2834    ///     marshmallows: [[u8; 2]],
2835    /// }
2836    ///
2837    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2838    ///
2839    /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2840    ///
2841    /// assert_eq!(packet.mug_size, 240);
2842    /// assert_eq!(packet.temperature, 77);
2843    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2844    /// assert_eq!(suffix, &[8u8][..]);
2845    ///
2846    /// packet.temperature = 111;
2847    /// suffix[0] = 222;
2848    ///
2849    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2850    ///
2851    /// // These bytes are not valid instance of `Packet`.
2852    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2853    /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2854    /// ```
2855    ///
2856    /// Since an explicit `count` is provided, this method supports types with
2857    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2858    /// which do not take an explicit count do not support such types.
2859    ///
2860    /// ```
2861    /// use core::num::NonZeroU16;
2862    /// use zerocopy::*;
2863    /// # use zerocopy_derive::*;
2864    ///
2865    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2866    /// #[repr(C, packed)]
2867    /// struct ZSTy {
2868    ///     leading_sized: NonZeroU16,
2869    ///     trailing_dst: [()],
2870    /// }
2871    ///
2872    /// let mut src = 0xCAFEu16;
2873    /// let src = src.as_mut_bytes();
2874    /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2875    /// assert_eq!(zsty.trailing_dst.len(), 42);
2876    /// ```
2877    ///
2878    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2879    ///
2880    #[doc = codegen_header!("h5", "try_mut_from_prefix_with_elems")]
2881    ///
2882    /// See [`TryFromBytes::try_ref_from_prefix_with_elems`](#method.try_ref_from_prefix_with_elems.codegen).
2883    #[must_use = "has no side effects"]
2884    #[inline]
2885    fn try_mut_from_prefix_with_elems(
2886        source: &mut [u8],
2887        count: usize,
2888    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2889    where
2890        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2891    {
2892        try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2893    }
2894
2895    /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2896    /// with a DST length equal to `count`.
2897    ///
2898    /// This method attempts to return a reference to the suffix of `source`
2899    /// interpreted as a `Self` with `count` trailing elements, and a reference
2900    /// to the preceding bytes. If the length of `source` is less than the size
2901    /// of `Self` with `count` elements, if the suffix of `source` is not
2902    /// appropriately aligned, or if the suffix of `source` does not contain a
2903    /// valid instance of `Self`, this returns `Err`. If [`Self:
2904    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2905    /// error][ConvertError::from].
2906    ///
2907    /// [self-unaligned]: Unaligned
2908    /// [slice-dst]: KnownLayout#dynamically-sized-types
2909    ///
2910    /// # Examples
2911    ///
2912    /// ```
2913    /// # #![allow(non_camel_case_types)] // For C0::xC0
2914    /// use zerocopy::TryFromBytes;
2915    /// # use zerocopy_derive::*;
2916    ///
2917    /// // The only valid value of this type is the byte `0xC0`
2918    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2919    /// #[repr(u8)]
2920    /// enum C0 { xC0 = 0xC0 }
2921    ///
2922    /// // The only valid value of this type is the bytes `0xC0C0`.
2923    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2924    /// #[repr(C)]
2925    /// struct C0C0(C0, C0);
2926    ///
2927    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2928    /// #[repr(C, packed)]
2929    /// struct Packet {
2930    ///     magic_number: C0C0,
2931    ///     mug_size: u8,
2932    ///     temperature: u8,
2933    ///     marshmallows: [[u8; 2]],
2934    /// }
2935    ///
2936    /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2937    ///
2938    /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2939    ///
2940    /// assert_eq!(packet.mug_size, 240);
2941    /// assert_eq!(packet.temperature, 77);
2942    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2943    /// assert_eq!(prefix, &[123u8][..]);
2944    ///
2945    /// prefix[0] = 111;
2946    /// packet.temperature = 222;
2947    ///
2948    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2949    ///
2950    /// // These bytes are not valid instance of `Packet`.
2951    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2952    /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2953    /// ```
2954    ///
2955    /// Since an explicit `count` is provided, this method supports types with
2956    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2957    /// which do not take an explicit count do not support such types.
2958    ///
2959    /// ```
2960    /// use core::num::NonZeroU16;
2961    /// use zerocopy::*;
2962    /// # use zerocopy_derive::*;
2963    ///
2964    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2965    /// #[repr(C, packed)]
2966    /// struct ZSTy {
2967    ///     leading_sized: NonZeroU16,
2968    ///     trailing_dst: [()],
2969    /// }
2970    ///
2971    /// let mut src = 0xCAFEu16;
2972    /// let src = src.as_mut_bytes();
2973    /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2974    /// assert_eq!(zsty.trailing_dst.len(), 42);
2975    /// ```
2976    ///
2977    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2978    ///
2979    #[doc = codegen_header!("h5", "try_mut_from_suffix_with_elems")]
2980    ///
2981    /// See [`TryFromBytes::try_ref_from_suffix_with_elems`](#method.try_ref_from_suffix_with_elems.codegen).
2982    #[must_use = "has no side effects"]
2983    #[inline]
2984    fn try_mut_from_suffix_with_elems(
2985        source: &mut [u8],
2986        count: usize,
2987    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2988    where
2989        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2990    {
2991        try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2992    }
2993
2994    /// Attempts to read the given `source` as a `Self`.
2995    ///
2996    /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2997    /// instance of `Self`, this returns `Err`.
2998    ///
2999    /// # Examples
3000    ///
3001    /// ```
3002    /// use zerocopy::TryFromBytes;
3003    /// # use zerocopy_derive::*;
3004    ///
3005    /// // The only valid value of this type is the byte `0xC0`
3006    /// #[derive(TryFromBytes)]
3007    /// #[repr(u8)]
3008    /// enum C0 { xC0 = 0xC0 }
3009    ///
3010    /// // The only valid value of this type is the bytes `0xC0C0`.
3011    /// #[derive(TryFromBytes)]
3012    /// #[repr(C)]
3013    /// struct C0C0(C0, C0);
3014    ///
3015    /// #[derive(TryFromBytes)]
3016    /// #[repr(C)]
3017    /// struct Packet {
3018    ///     magic_number: C0C0,
3019    ///     mug_size: u8,
3020    ///     temperature: u8,
3021    /// }
3022    ///
3023    /// let bytes = &[0xC0, 0xC0, 240, 77][..];
3024    ///
3025    /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
3026    ///
3027    /// assert_eq!(packet.mug_size, 240);
3028    /// assert_eq!(packet.temperature, 77);
3029    ///
3030    /// // These bytes are not valid instance of `Packet`.
3031    /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
3032    /// assert!(Packet::try_read_from_bytes(bytes).is_err());
3033    /// ```
3034    ///
3035    /// # Performance Considerations
3036    ///
3037    /// In this version of zerocopy, this method reads the `source` into a
3038    /// well-aligned stack allocation and *then* validates that the allocation
3039    /// is a valid `Self`. This ensures that validation can be performed using
3040    /// aligned reads (which carry a performance advantage over unaligned reads
3041    /// on many platforms) at the cost of an unconditional copy.
3042    ///
3043    #[doc = codegen_section!(
3044        header = "h5",
3045        bench = "try_read_from_bytes",
3046        format = "coco_static_size",
3047    )]
3048    #[must_use = "has no side effects"]
3049    #[inline]
3050    fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
3051    where
3052        Self: Sized,
3053    {
3054        // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3055
3056        let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
3057            Ok(candidate) => candidate,
3058            Err(e) => {
3059                return Err(TryReadError::Size(e.with_dst()));
3060            }
3061        };
3062        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3063        // its bytes are initialized.
3064        unsafe { try_read_from(source, candidate) }
3065    }
3066
3067    /// Attempts to read a `Self` from the prefix of the given `source`.
3068    ///
3069    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
3070    /// of `source`, returning that `Self` and any remaining bytes. If
3071    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
3072    /// of `Self`, it returns `Err`.
3073    ///
3074    /// # Examples
3075    ///
3076    /// ```
3077    /// use zerocopy::TryFromBytes;
3078    /// # use zerocopy_derive::*;
3079    ///
3080    /// // The only valid value of this type is the byte `0xC0`
3081    /// #[derive(TryFromBytes)]
3082    /// #[repr(u8)]
3083    /// enum C0 { xC0 = 0xC0 }
3084    ///
3085    /// // The only valid value of this type is the bytes `0xC0C0`.
3086    /// #[derive(TryFromBytes)]
3087    /// #[repr(C)]
3088    /// struct C0C0(C0, C0);
3089    ///
3090    /// #[derive(TryFromBytes)]
3091    /// #[repr(C)]
3092    /// struct Packet {
3093    ///     magic_number: C0C0,
3094    ///     mug_size: u8,
3095    ///     temperature: u8,
3096    /// }
3097    ///
3098    /// // These are more bytes than are needed to encode a `Packet`.
3099    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
3100    ///
3101    /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
3102    ///
3103    /// assert_eq!(packet.mug_size, 240);
3104    /// assert_eq!(packet.temperature, 77);
3105    /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
3106    ///
3107    /// // These bytes are not valid instance of `Packet`.
3108    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
3109    /// assert!(Packet::try_read_from_prefix(bytes).is_err());
3110    /// ```
3111    ///
3112    /// # Performance Considerations
3113    ///
3114    /// In this version of zerocopy, this method reads the `source` into a
3115    /// well-aligned stack allocation and *then* validates that the allocation
3116    /// is a valid `Self`. This ensures that validation can be performed using
3117    /// aligned reads (which carry a performance advantage over unaligned reads
3118    /// on many platforms) at the cost of an unconditional copy.
3119    ///
3120    #[doc = codegen_section!(
3121        header = "h5",
3122        bench = "try_read_from_prefix",
3123        format = "coco_static_size",
3124    )]
3125    #[must_use = "has no side effects"]
3126    #[inline]
3127    fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
3128    where
3129        Self: Sized,
3130    {
3131        // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3132
3133        let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
3134            Ok(candidate) => candidate,
3135            Err(e) => {
3136                return Err(TryReadError::Size(e.with_dst()));
3137            }
3138        };
3139        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3140        // its bytes are initialized.
3141        unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
3142    }
3143
3144    /// Attempts to read a `Self` from the suffix of the given `source`.
3145    ///
3146    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
3147    /// of `source`, returning that `Self` and any preceding bytes. If
3148    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
3149    /// of `Self`, it returns `Err`.
3150    ///
3151    /// # Examples
3152    ///
3153    /// ```
3154    /// # #![allow(non_camel_case_types)] // For C0::xC0
3155    /// use zerocopy::TryFromBytes;
3156    /// # use zerocopy_derive::*;
3157    ///
3158    /// // The only valid value of this type is the byte `0xC0`
3159    /// #[derive(TryFromBytes)]
3160    /// #[repr(u8)]
3161    /// enum C0 { xC0 = 0xC0 }
3162    ///
3163    /// // The only valid value of this type is the bytes `0xC0C0`.
3164    /// #[derive(TryFromBytes)]
3165    /// #[repr(C)]
3166    /// struct C0C0(C0, C0);
3167    ///
3168    /// #[derive(TryFromBytes)]
3169    /// #[repr(C)]
3170    /// struct Packet {
3171    ///     magic_number: C0C0,
3172    ///     mug_size: u8,
3173    ///     temperature: u8,
3174    /// }
3175    ///
3176    /// // These are more bytes than are needed to encode a `Packet`.
3177    /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
3178    ///
3179    /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
3180    ///
3181    /// assert_eq!(packet.mug_size, 240);
3182    /// assert_eq!(packet.temperature, 77);
3183    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
3184    ///
3185    /// // These bytes are not valid instance of `Packet`.
3186    /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
3187    /// assert!(Packet::try_read_from_suffix(bytes).is_err());
3188    /// ```
3189    ///
3190    /// # Performance Considerations
3191    ///
3192    /// In this version of zerocopy, this method reads the `source` into a
3193    /// well-aligned stack allocation and *then* validates that the allocation
3194    /// is a valid `Self`. This ensures that validation can be performed using
3195    /// aligned reads (which carry a performance advantage over unaligned reads
3196    /// on many platforms) at the cost of an unconditional copy.
3197    ///
3198    #[doc = codegen_section!(
3199        header = "h5",
3200        bench = "try_read_from_suffix",
3201        format = "coco_static_size",
3202    )]
3203    #[must_use = "has no side effects"]
3204    #[inline]
3205    fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
3206    where
3207        Self: Sized,
3208    {
3209        // FIXME(#2981): If `align_of::<Self>() == 1`, validate `source` in-place.
3210
3211        let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
3212            Ok(candidate) => candidate,
3213            Err(e) => {
3214                return Err(TryReadError::Size(e.with_dst()));
3215            }
3216        };
3217        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
3218        // its bytes are initialized.
3219        unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
3220    }
3221}
3222
3223#[inline(always)]
3224fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
3225    source: &[u8],
3226    cast_type: CastType,
3227    meta: Option<T::PointerMetadata>,
3228) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
3229    match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
3230        Ok((source, prefix_suffix)) => {
3231            // This call may panic. If that happens, it doesn't cause any soundness
3232            // issues, as we have not generated any invalid state which we need to
3233            // fix before returning.
3234            match source.try_into_valid() {
3235                Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
3236                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
3237            }
3238        }
3239        Err(e) => Err(e.map_src(Ptr::as_ref).into()),
3240    }
3241}
3242
3243#[inline(always)]
3244fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
3245    candidate: &mut [u8],
3246    cast_type: CastType,
3247    meta: Option<T::PointerMetadata>,
3248) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
3249    match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
3250        Ok((candidate, prefix_suffix)) => {
3251            // This call may panic. If that happens, it doesn't cause any soundness
3252            // issues, as we have not generated any invalid state which we need to
3253            // fix before returning.
3254            match candidate.try_into_valid() {
3255                Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
3256                Err(e) => Err(e.map_src(|src| src.as_bytes().as_mut()).into()),
3257            }
3258        }
3259        Err(e) => Err(e.map_src(Ptr::as_mut).into()),
3260    }
3261}
3262
3263#[inline(always)]
3264fn swap<T, U>((t, u): (T, U)) -> (U, T) {
3265    (u, t)
3266}
3267
3268/// # Safety
3269///
3270/// All bytes of `candidate` must be initialized.
3271#[inline(always)]
3272unsafe fn try_read_from<S, T: TryFromBytes>(
3273    source: S,
3274    mut candidate: CoreMaybeUninit<T>,
3275) -> Result<T, TryReadError<S, T>> {
3276    // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
3277    // to add a `T: Immutable` bound.
3278    let c_ptr = Ptr::from_mut(&mut candidate);
3279    // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
3280    // `candidate`, which the caller promises is entirely initialized. Since
3281    // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
3282    // no values written to an `Initialized` `c_ptr` can violate its validity.
3283    // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
3284    // via `c_ptr` so long as it is live, so we don't need to worry about the
3285    // fact that `c_ptr` may have more restricted validity than `candidate`.
3286    let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
3287    let mut c_ptr = c_ptr.cast::<_, crate::pointer::cast::CastSized, _>();
3288
3289    // Since we don't have `T: KnownLayout`, we hack around that by using
3290    // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
3291    //
3292    // This call may panic. If that happens, it doesn't cause any soundness
3293    // issues, as we have not generated any invalid state which we need to fix
3294    // before returning.
3295    if !Wrapping::<T>::is_bit_valid(c_ptr.reborrow_shared().forget_aligned()) {
3296        return Err(ValidityError::new(source).into());
3297    }
3298
3299    fn _assert_same_size_and_validity<T>()
3300    where
3301        Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
3302        T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
3303    {
3304    }
3305
3306    _assert_same_size_and_validity::<T>();
3307
3308    // SAFETY: We just validated that `candidate` contains a valid
3309    // `Wrapping<T>`, which has the same size and bit validity as `T`, as
3310    // guaranteed by the preceding type assertion.
3311    Ok(unsafe { candidate.assume_init() })
3312}
3313
3314/// Types for which a sequence of `0` bytes is a valid instance.
3315///
3316/// Any memory region of the appropriate length which is guaranteed to contain
3317/// only zero bytes can be viewed as any `FromZeros` type with no runtime
3318/// overhead. This is useful whenever memory is known to be in a zeroed state,
3319/// such memory returned from some allocation routines.
3320///
3321/// # Warning: Padding bytes
3322///
3323/// Note that, when a value is moved or copied, only the non-padding bytes of
3324/// that value are guaranteed to be preserved. It is unsound to assume that
3325/// values written to padding bytes are preserved after a move or copy. For more
3326/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
3327///
3328/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
3329///
3330/// # Implementation
3331///
3332/// **Do not implement this trait yourself!** Instead, use
3333/// [`#[derive(FromZeros)]`][derive]; e.g.:
3334///
3335/// ```
3336/// # use zerocopy_derive::{FromZeros, Immutable};
3337/// #[derive(FromZeros)]
3338/// struct MyStruct {
3339/// # /*
3340///     ...
3341/// # */
3342/// }
3343///
3344/// #[derive(FromZeros)]
3345/// #[repr(u8)]
3346/// enum MyEnum {
3347/// #   Variant0,
3348/// # /*
3349///     ...
3350/// # */
3351/// }
3352///
3353/// #[derive(FromZeros, Immutable)]
3354/// union MyUnion {
3355/// #   variant: u8,
3356/// # /*
3357///     ...
3358/// # */
3359/// }
3360/// ```
3361///
3362/// This derive performs a sophisticated, compile-time safety analysis to
3363/// determine whether a type is `FromZeros`.
3364///
3365/// # Safety
3366///
3367/// *This section describes what is required in order for `T: FromZeros`, and
3368/// what unsafe code may assume of such types. If you don't plan on implementing
3369/// `FromZeros` manually, and you don't plan on writing unsafe code that
3370/// operates on `FromZeros` types, then you don't need to read this section.*
3371///
3372/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
3373/// `T` whose bytes are all initialized to zero. If a type is marked as
3374/// `FromZeros` which violates this contract, it may cause undefined behavior.
3375///
3376/// `#[derive(FromZeros)]` only permits [types which satisfy these
3377/// requirements][derive-analysis].
3378///
3379#[cfg_attr(
3380    feature = "derive",
3381    doc = "[derive]: zerocopy_derive::FromZeros",
3382    doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
3383)]
3384#[cfg_attr(
3385    not(feature = "derive"),
3386    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
3387    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
3388)]
3389#[cfg_attr(
3390    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3391    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
3392)]
3393pub unsafe trait FromZeros: TryFromBytes {
3394    // The `Self: Sized` bound makes it so that `FromZeros` is still object
3395    // safe.
3396    #[doc(hidden)]
3397    fn only_derive_is_allowed_to_implement_this_trait()
3398    where
3399        Self: Sized;
3400
3401    /// Overwrites `self` with zeros.
3402    ///
3403    /// Sets every byte in `self` to 0. While this is similar to doing `*self =
3404    /// Self::new_zeroed()`, it differs in that `zero` does not semantically
3405    /// drop the current value and replace it with a new one — it simply
3406    /// modifies the bytes of the existing value.
3407    ///
3408    /// # Examples
3409    ///
3410    /// ```
3411    /// # use zerocopy::FromZeros;
3412    /// # use zerocopy_derive::*;
3413    /// #
3414    /// #[derive(FromZeros)]
3415    /// #[repr(C)]
3416    /// struct PacketHeader {
3417    ///     src_port: [u8; 2],
3418    ///     dst_port: [u8; 2],
3419    ///     length: [u8; 2],
3420    ///     checksum: [u8; 2],
3421    /// }
3422    ///
3423    /// let mut header = PacketHeader {
3424    ///     src_port: 100u16.to_be_bytes(),
3425    ///     dst_port: 200u16.to_be_bytes(),
3426    ///     length: 300u16.to_be_bytes(),
3427    ///     checksum: 400u16.to_be_bytes(),
3428    /// };
3429    ///
3430    /// header.zero();
3431    ///
3432    /// assert_eq!(header.src_port, [0, 0]);
3433    /// assert_eq!(header.dst_port, [0, 0]);
3434    /// assert_eq!(header.length, [0, 0]);
3435    /// assert_eq!(header.checksum, [0, 0]);
3436    /// ```
3437    #[inline(always)]
3438    fn zero(&mut self) {
3439        let slf: *mut Self = self;
3440        let len = mem::size_of_val(self);
3441        // SAFETY:
3442        // - `self` is guaranteed by the type system to be valid for writes of
3443        //   size `size_of_val(self)`.
3444        // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3445        //   as required by `u8`.
3446        // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3447        //   of `Self.`
3448        //
3449        // FIXME(#429): Add references to docs and quotes.
3450        unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3451    }
3452
3453    /// Creates an instance of `Self` from zeroed bytes.
3454    ///
3455    /// # Examples
3456    ///
3457    /// ```
3458    /// # use zerocopy::FromZeros;
3459    /// # use zerocopy_derive::*;
3460    /// #
3461    /// #[derive(FromZeros)]
3462    /// #[repr(C)]
3463    /// struct PacketHeader {
3464    ///     src_port: [u8; 2],
3465    ///     dst_port: [u8; 2],
3466    ///     length: [u8; 2],
3467    ///     checksum: [u8; 2],
3468    /// }
3469    ///
3470    /// let header: PacketHeader = FromZeros::new_zeroed();
3471    ///
3472    /// assert_eq!(header.src_port, [0, 0]);
3473    /// assert_eq!(header.dst_port, [0, 0]);
3474    /// assert_eq!(header.length, [0, 0]);
3475    /// assert_eq!(header.checksum, [0, 0]);
3476    /// ```
3477    #[must_use = "has no side effects"]
3478    #[inline(always)]
3479    fn new_zeroed() -> Self
3480    where
3481        Self: Sized,
3482    {
3483        // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3484        unsafe { mem::zeroed() }
3485    }
3486
3487    /// Creates a `Box<Self>` from zeroed bytes.
3488    ///
3489    /// This function is useful for allocating large values on the heap and
3490    /// zero-initializing them, without ever creating a temporary instance of
3491    /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3492    /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3493    /// storing `[u8; 1048576]` in a temporary variable on the stack.
3494    ///
3495    /// On systems that use a heap implementation that supports allocating from
3496    /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3497    /// have performance benefits.
3498    ///
3499    /// # Errors
3500    ///
3501    /// Returns an error on allocation failure. Allocation failure is guaranteed
3502    /// never to cause a panic or an abort.
3503    #[must_use = "has no side effects (other than allocation)"]
3504    #[cfg(any(feature = "alloc", test))]
3505    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3506    #[inline]
3507    fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3508    where
3509        Self: Sized,
3510    {
3511        // If `T` is a ZST, then return a proper boxed instance of it. There is
3512        // no allocation, but `Box` does require a correct dangling pointer.
3513        let layout = Layout::new::<Self>();
3514        if layout.size() == 0 {
3515            // Construct the `Box` from a dangling pointer to avoid calling
3516            // `Self::new_zeroed`. This ensures that stack space is never
3517            // allocated for `Self` even on lower opt-levels where this branch
3518            // might not get optimized out.
3519
3520            // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3521            // requirements are that the pointer is non-null and sufficiently
3522            // aligned. Per [2], `NonNull::dangling` produces a pointer which
3523            // is sufficiently aligned. Since the produced pointer is a
3524            // `NonNull`, it is non-null.
3525            //
3526            // [1] Per https://doc.rust-lang.org/1.81.0/std/boxed/index.html#memory-layout:
3527            //
3528            //   For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3529            //
3530            // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3531            //
3532            //   Creates a new `NonNull` that is dangling, but well-aligned.
3533            return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3534        }
3535
3536        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3537        #[allow(clippy::undocumented_unsafe_blocks)]
3538        let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3539        if ptr.is_null() {
3540            return Err(AllocError);
3541        }
3542        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3543        #[allow(clippy::undocumented_unsafe_blocks)]
3544        Ok(unsafe { Box::from_raw(ptr) })
3545    }
3546
3547    /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3548    ///
3549    /// This function is useful for allocating large values of `[Self]` on the
3550    /// heap and zero-initializing them, without ever creating a temporary
3551    /// instance of `[Self; _]` on the stack. For example,
3552    /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3553    /// the heap; it does not require storing the slice on the stack.
3554    ///
3555    /// On systems that use a heap implementation that supports allocating from
3556    /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3557    /// benefits.
3558    ///
3559    /// If `Self` is a zero-sized type, then this function will return a
3560    /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3561    /// actual information, but its `len()` property will report the correct
3562    /// value.
3563    ///
3564    /// # Errors
3565    ///
3566    /// Returns an error on allocation failure. Allocation failure is
3567    /// guaranteed never to cause a panic or an abort.
3568    #[must_use = "has no side effects (other than allocation)"]
3569    #[cfg(feature = "alloc")]
3570    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3571    #[inline]
3572    fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3573    where
3574        Self: KnownLayout<PointerMetadata = usize>,
3575    {
3576        // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3577        // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3578        // (and, consequently, the `Box` derived from it) is a valid instance of
3579        // `Self`, because `Self` is `FromZeros`.
3580        unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3581    }
3582
3583    #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3584    #[doc(hidden)]
3585    #[cfg(feature = "alloc")]
3586    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3587    #[must_use = "has no side effects (other than allocation)"]
3588    #[inline(always)]
3589    fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3590    where
3591        Self: Sized,
3592    {
3593        <[Self]>::new_box_zeroed_with_elems(len)
3594    }
3595
3596    /// Creates a `Vec<Self>` from zeroed bytes.
3597    ///
3598    /// This function is useful for allocating large values of `Vec`s and
3599    /// zero-initializing them, without ever creating a temporary instance of
3600    /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3601    /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3602    /// heap; it does not require storing intermediate values on the stack.
3603    ///
3604    /// On systems that use a heap implementation that supports allocating from
3605    /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3606    ///
3607    /// If `Self` is a zero-sized type, then this function will return a
3608    /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3609    /// actual information, but its `len()` property will report the correct
3610    /// value.
3611    ///
3612    /// # Errors
3613    ///
3614    /// Returns an error on allocation failure. Allocation failure is
3615    /// guaranteed never to cause a panic or an abort.
3616    #[must_use = "has no side effects (other than allocation)"]
3617    #[cfg(feature = "alloc")]
3618    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3619    #[inline(always)]
3620    fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3621    where
3622        Self: Sized,
3623    {
3624        <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3625    }
3626
3627    /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3628    /// the vector. The new items are initialized with zeros.
3629    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3630    #[cfg(feature = "alloc")]
3631    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3632    #[inline(always)]
3633    fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3634    where
3635        Self: Sized,
3636    {
3637        // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3638        // panic condition is not satisfied.
3639        <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3640    }
3641
3642    /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3643    /// items are initialized with zeros.
3644    ///
3645    /// # Panics
3646    ///
3647    /// Panics if `position > v.len()`.
3648    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3649    #[cfg(feature = "alloc")]
3650    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3651    #[inline]
3652    fn insert_vec_zeroed(
3653        v: &mut Vec<Self>,
3654        position: usize,
3655        additional: usize,
3656    ) -> Result<(), AllocError>
3657    where
3658        Self: Sized,
3659    {
3660        assert!(position <= v.len());
3661        // We only conditionally compile on versions on which `try_reserve` is
3662        // stable; the Clippy lint is a false positive.
3663        v.try_reserve(additional).map_err(|_| AllocError)?;
3664        // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3665        // * `ptr.add(position)`
3666        // * `position + additional`
3667        // * `v.len() + additional`
3668        //
3669        // `v.len() - position` cannot overflow because we asserted that
3670        // `position <= v.len()`.
3671        #[allow(clippy::multiple_unsafe_ops_per_block)]
3672        unsafe {
3673            // This is a potentially overlapping copy.
3674            let ptr = v.as_mut_ptr();
3675            #[allow(clippy::arithmetic_side_effects)]
3676            ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3677            ptr.add(position).write_bytes(0, additional);
3678            #[allow(clippy::arithmetic_side_effects)]
3679            v.set_len(v.len() + additional);
3680        }
3681
3682        Ok(())
3683    }
3684}
3685
3686/// Analyzes whether a type is [`FromBytes`].
3687///
3688/// This derive analyzes, at compile time, whether the annotated type satisfies
3689/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3690/// supertraits if it is sound to do so. This derive can be applied to structs,
3691/// enums, and unions;
3692/// e.g.:
3693///
3694/// ```
3695/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3696/// #[derive(FromBytes)]
3697/// struct MyStruct {
3698/// # /*
3699///     ...
3700/// # */
3701/// }
3702///
3703/// #[derive(FromBytes)]
3704/// #[repr(u8)]
3705/// enum MyEnum {
3706/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3707/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3708/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3709/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3710/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3711/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3712/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3713/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3714/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3715/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3716/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3717/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3718/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3719/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3720/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3721/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3722/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3723/// #   VFF,
3724/// # /*
3725///     ...
3726/// # */
3727/// }
3728///
3729/// #[derive(FromBytes, Immutable)]
3730/// union MyUnion {
3731/// #   variant: u8,
3732/// # /*
3733///     ...
3734/// # */
3735/// }
3736/// ```
3737///
3738/// [safety conditions]: trait@FromBytes#safety
3739///
3740/// # Analysis
3741///
3742/// *This section describes, roughly, the analysis performed by this derive to
3743/// determine whether it is sound to implement `FromBytes` for a given type.
3744/// Unless you are modifying the implementation of this derive, or attempting to
3745/// manually implement `FromBytes` for a type yourself, you don't need to read
3746/// this section.*
3747///
3748/// If a type has the following properties, then this derive can implement
3749/// `FromBytes` for that type:
3750///
3751/// - If the type is a struct, all of its fields must be `FromBytes`.
3752/// - If the type is an enum:
3753///   - It must have a defined representation which is one of `u8`, `u16`, `i8`,
3754///     or `i16`.
3755///   - The maximum number of discriminants must be used (so that every possible
3756///     bit pattern is a valid one).
3757///   - Its fields must be `FromBytes`.
3758///
3759/// This analysis is subject to change. Unsafe code may *only* rely on the
3760/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3761/// implementation details of this derive.
3762///
3763/// ## Why isn't an explicit representation required for structs?
3764///
3765/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3766/// that structs are marked with `#[repr(C)]`.
3767///
3768/// Per the [Rust reference](reference),
3769///
3770/// > The representation of a type can change the padding between fields, but
3771/// > does not change the layout of the fields themselves.
3772///
3773/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3774///
3775/// Since the layout of structs only consists of padding bytes and field bytes,
3776/// a struct is soundly `FromBytes` if:
3777/// 1. its padding is soundly `FromBytes`, and
3778/// 2. its fields are soundly `FromBytes`.
3779///
3780/// The answer to the first question is always yes: padding bytes do not have
3781/// any validity constraints. A [discussion] of this question in the Unsafe Code
3782/// Guidelines Working Group concluded that it would be virtually unimaginable
3783/// for future versions of rustc to add validity constraints to padding bytes.
3784///
3785/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3786///
3787/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3788/// its fields are `FromBytes`.
3789#[cfg(any(feature = "derive", test))]
3790#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3791pub use zerocopy_derive::FromBytes;
3792
3793/// Types for which any bit pattern is valid.
3794///
3795/// Any memory region of the appropriate length which contains initialized bytes
3796/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3797/// useful for efficiently parsing bytes as structured data.
3798///
3799/// # Warning: Padding bytes
3800///
3801/// Note that, when a value is moved or copied, only the non-padding bytes of
3802/// that value are guaranteed to be preserved. It is unsound to assume that
3803/// values written to padding bytes are preserved after a move or copy. For
3804/// example, the following is unsound:
3805///
3806/// ```rust,no_run
3807/// use core::mem::{size_of, transmute};
3808/// use zerocopy::FromZeros;
3809/// # use zerocopy_derive::*;
3810///
3811/// // Assume `Foo` is a type with padding bytes.
3812/// #[derive(FromZeros, Default)]
3813/// struct Foo {
3814/// # /*
3815///     ...
3816/// # */
3817/// }
3818///
3819/// let mut foo: Foo = Foo::default();
3820/// FromZeros::zero(&mut foo);
3821/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3822/// // those writes are not guaranteed to be preserved in padding bytes when
3823/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3824/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3825/// ```
3826///
3827/// # Implementation
3828///
3829/// **Do not implement this trait yourself!** Instead, use
3830/// [`#[derive(FromBytes)]`][derive]; e.g.:
3831///
3832/// ```
3833/// # use zerocopy_derive::{FromBytes, Immutable};
3834/// #[derive(FromBytes)]
3835/// struct MyStruct {
3836/// # /*
3837///     ...
3838/// # */
3839/// }
3840///
3841/// #[derive(FromBytes)]
3842/// #[repr(u8)]
3843/// enum MyEnum {
3844/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3845/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3846/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3847/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3848/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3849/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3850/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3851/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3852/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3853/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3854/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3855/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3856/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3857/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3858/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3859/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3860/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3861/// #   VFF,
3862/// # /*
3863///     ...
3864/// # */
3865/// }
3866///
3867/// #[derive(FromBytes, Immutable)]
3868/// union MyUnion {
3869/// #   variant: u8,
3870/// # /*
3871///     ...
3872/// # */
3873/// }
3874/// ```
3875///
3876/// This derive performs a sophisticated, compile-time safety analysis to
3877/// determine whether a type is `FromBytes`.
3878///
3879/// # Safety
3880///
3881/// *This section describes what is required in order for `T: FromBytes`, and
3882/// what unsafe code may assume of such types. If you don't plan on implementing
3883/// `FromBytes` manually, and you don't plan on writing unsafe code that
3884/// operates on `FromBytes` types, then you don't need to read this section.*
3885///
3886/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3887/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3888/// words, any byte value which is not uninitialized). If a type is marked as
3889/// `FromBytes` which violates this contract, it may cause undefined behavior.
3890///
3891/// `#[derive(FromBytes)]` only permits [types which satisfy these
3892/// requirements][derive-analysis].
3893///
3894#[cfg_attr(
3895    feature = "derive",
3896    doc = "[derive]: zerocopy_derive::FromBytes",
3897    doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3898)]
3899#[cfg_attr(
3900    not(feature = "derive"),
3901    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3902    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3903)]
3904#[cfg_attr(
3905    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3906    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3907)]
3908pub unsafe trait FromBytes: FromZeros {
3909    // The `Self: Sized` bound makes it so that `FromBytes` is still object
3910    // safe.
3911    #[doc(hidden)]
3912    fn only_derive_is_allowed_to_implement_this_trait()
3913    where
3914        Self: Sized;
3915
3916    /// Interprets the given `source` as a `&Self`.
3917    ///
3918    /// This method attempts to return a reference to `source` interpreted as a
3919    /// `Self`. If the length of `source` is not a [valid size of
3920    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3921    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3922    /// [infallibly discard the alignment error][size-error-from].
3923    ///
3924    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3925    ///
3926    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3927    /// [self-unaligned]: Unaligned
3928    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3929    /// [slice-dst]: KnownLayout#dynamically-sized-types
3930    ///
3931    /// # Compile-Time Assertions
3932    ///
3933    /// This method cannot yet be used on unsized types whose dynamically-sized
3934    /// component is zero-sized. Attempting to use this method on such types
3935    /// results in a compile-time assertion error; e.g.:
3936    ///
3937    /// ```compile_fail,E0080
3938    /// use zerocopy::*;
3939    /// # use zerocopy_derive::*;
3940    ///
3941    /// #[derive(FromBytes, Immutable, KnownLayout)]
3942    /// #[repr(C)]
3943    /// struct ZSTy {
3944    ///     leading_sized: u16,
3945    ///     trailing_dst: [()],
3946    /// }
3947    ///
3948    /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
3949    /// ```
3950    ///
3951    /// # Examples
3952    ///
3953    /// ```
3954    /// use zerocopy::FromBytes;
3955    /// # use zerocopy_derive::*;
3956    ///
3957    /// #[derive(FromBytes, KnownLayout, Immutable)]
3958    /// #[repr(C)]
3959    /// struct PacketHeader {
3960    ///     src_port: [u8; 2],
3961    ///     dst_port: [u8; 2],
3962    ///     length: [u8; 2],
3963    ///     checksum: [u8; 2],
3964    /// }
3965    ///
3966    /// #[derive(FromBytes, KnownLayout, Immutable)]
3967    /// #[repr(C)]
3968    /// struct Packet {
3969    ///     header: PacketHeader,
3970    ///     body: [u8],
3971    /// }
3972    ///
3973    /// // These bytes encode a `Packet`.
3974    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3975    ///
3976    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3977    ///
3978    /// assert_eq!(packet.header.src_port, [0, 1]);
3979    /// assert_eq!(packet.header.dst_port, [2, 3]);
3980    /// assert_eq!(packet.header.length, [4, 5]);
3981    /// assert_eq!(packet.header.checksum, [6, 7]);
3982    /// assert_eq!(packet.body, [8, 9, 10, 11]);
3983    /// ```
3984    ///
3985    #[doc = codegen_section!(
3986        header = "h5",
3987        bench = "ref_from_bytes",
3988        format = "coco",
3989        arity = 3,
3990        [
3991            open
3992            @index 1
3993            @title "Sized"
3994            @variant "static_size"
3995        ],
3996        [
3997            @index 2
3998            @title "Unsized"
3999            @variant "dynamic_size"
4000        ],
4001        [
4002            @index 3
4003            @title "Dynamically Padded"
4004            @variant "dynamic_padding"
4005        ]
4006    )]
4007    #[must_use = "has no side effects"]
4008    #[inline(always)]
4009    fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
4010    where
4011        Self: KnownLayout + Immutable,
4012    {
4013        static_assert_dst_is_not_zst!(Self);
4014        match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
4015            Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
4016            Err(err) => Err(err.map_src(|src| src.as_ref())),
4017        }
4018    }
4019
4020    /// Interprets the prefix of the given `source` as a `&Self` without
4021    /// copying.
4022    ///
4023    /// This method computes the [largest possible size of `Self`][valid-size]
4024    /// that can fit in the leading bytes of `source`, then attempts to return
4025    /// both a reference to those bytes interpreted as a `Self`, and a reference
4026    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4027    /// is not appropriately aligned, this returns `Err`. If [`Self:
4028    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4029    /// error][size-error-from].
4030    ///
4031    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4032    ///
4033    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4034    /// [self-unaligned]: Unaligned
4035    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4036    /// [slice-dst]: KnownLayout#dynamically-sized-types
4037    ///
4038    /// # Compile-Time Assertions
4039    ///
4040    /// This method cannot yet be used on unsized types whose dynamically-sized
4041    /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
4042    /// support such types. Attempting to use this method on such types results
4043    /// in a compile-time assertion error; e.g.:
4044    ///
4045    /// ```compile_fail,E0080
4046    /// use zerocopy::*;
4047    /// # use zerocopy_derive::*;
4048    ///
4049    /// #[derive(FromBytes, Immutable, KnownLayout)]
4050    /// #[repr(C)]
4051    /// struct ZSTy {
4052    ///     leading_sized: u16,
4053    ///     trailing_dst: [()],
4054    /// }
4055    ///
4056    /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
4057    /// ```
4058    ///
4059    /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
4060    ///
4061    /// # Examples
4062    ///
4063    /// ```
4064    /// use zerocopy::FromBytes;
4065    /// # use zerocopy_derive::*;
4066    ///
4067    /// #[derive(FromBytes, KnownLayout, Immutable)]
4068    /// #[repr(C)]
4069    /// struct PacketHeader {
4070    ///     src_port: [u8; 2],
4071    ///     dst_port: [u8; 2],
4072    ///     length: [u8; 2],
4073    ///     checksum: [u8; 2],
4074    /// }
4075    ///
4076    /// #[derive(FromBytes, KnownLayout, Immutable)]
4077    /// #[repr(C)]
4078    /// struct Packet {
4079    ///     header: PacketHeader,
4080    ///     body: [[u8; 2]],
4081    /// }
4082    ///
4083    /// // These are more bytes than are needed to encode a `Packet`.
4084    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
4085    ///
4086    /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
4087    ///
4088    /// assert_eq!(packet.header.src_port, [0, 1]);
4089    /// assert_eq!(packet.header.dst_port, [2, 3]);
4090    /// assert_eq!(packet.header.length, [4, 5]);
4091    /// assert_eq!(packet.header.checksum, [6, 7]);
4092    /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
4093    /// assert_eq!(suffix, &[14u8][..]);
4094    /// ```
4095    ///
4096    #[doc = codegen_section!(
4097        header = "h5",
4098        bench = "ref_from_prefix",
4099        format = "coco",
4100        arity = 3,
4101        [
4102            open
4103            @index 1
4104            @title "Sized"
4105            @variant "static_size"
4106        ],
4107        [
4108            @index 2
4109            @title "Unsized"
4110            @variant "dynamic_size"
4111        ],
4112        [
4113            @index 3
4114            @title "Dynamically Padded"
4115            @variant "dynamic_padding"
4116        ]
4117    )]
4118    #[must_use = "has no side effects"]
4119    #[inline]
4120    fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4121    where
4122        Self: KnownLayout + Immutable,
4123    {
4124        static_assert_dst_is_not_zst!(Self);
4125        ref_from_prefix_suffix(source, None, CastType::Prefix)
4126    }
4127
4128    /// Interprets the suffix of the given bytes as a `&Self`.
4129    ///
4130    /// This method computes the [largest possible size of `Self`][valid-size]
4131    /// that can fit in the trailing bytes of `source`, then attempts to return
4132    /// both a reference to those bytes interpreted as a `Self`, and a reference
4133    /// to the preceding bytes. If there are insufficient bytes, or if that
4134    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4135    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4136    /// alignment error][size-error-from].
4137    ///
4138    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4139    ///
4140    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4141    /// [self-unaligned]: Unaligned
4142    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4143    /// [slice-dst]: KnownLayout#dynamically-sized-types
4144    ///
4145    /// # Compile-Time Assertions
4146    ///
4147    /// This method cannot yet be used on unsized types whose dynamically-sized
4148    /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
4149    /// support such types. Attempting to use this method on such types results
4150    /// in a compile-time assertion error; e.g.:
4151    ///
4152    /// ```compile_fail,E0080
4153    /// use zerocopy::*;
4154    /// # use zerocopy_derive::*;
4155    ///
4156    /// #[derive(FromBytes, Immutable, KnownLayout)]
4157    /// #[repr(C)]
4158    /// struct ZSTy {
4159    ///     leading_sized: u16,
4160    ///     trailing_dst: [()],
4161    /// }
4162    ///
4163    /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
4164    /// ```
4165    ///
4166    /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
4167    ///
4168    /// # Examples
4169    ///
4170    /// ```
4171    /// use zerocopy::FromBytes;
4172    /// # use zerocopy_derive::*;
4173    ///
4174    /// #[derive(FromBytes, Immutable, KnownLayout)]
4175    /// #[repr(C)]
4176    /// struct PacketTrailer {
4177    ///     frame_check_sequence: [u8; 4],
4178    /// }
4179    ///
4180    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4181    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4182    ///
4183    /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
4184    ///
4185    /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
4186    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4187    /// ```
4188    ///
4189    #[doc = codegen_section!(
4190        header = "h5",
4191        bench = "ref_from_suffix",
4192        format = "coco",
4193        arity = 3,
4194        [
4195            open
4196            @index 1
4197            @title "Sized"
4198            @variant "static_size"
4199        ],
4200        [
4201            @index 2
4202            @title "Unsized"
4203            @variant "dynamic_size"
4204        ],
4205        [
4206            @index 3
4207            @title "Dynamically Padded"
4208            @variant "dynamic_padding"
4209        ]
4210    )]
4211    #[must_use = "has no side effects"]
4212    #[inline]
4213    fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4214    where
4215        Self: Immutable + KnownLayout,
4216    {
4217        static_assert_dst_is_not_zst!(Self);
4218        ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4219    }
4220
4221    /// Interprets the given `source` as a `&mut Self`.
4222    ///
4223    /// This method attempts to return a reference to `source` interpreted as a
4224    /// `Self`. If the length of `source` is not a [valid size of
4225    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
4226    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
4227    /// [infallibly discard the alignment error][size-error-from].
4228    ///
4229    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4230    ///
4231    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4232    /// [self-unaligned]: Unaligned
4233    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4234    /// [slice-dst]: KnownLayout#dynamically-sized-types
4235    ///
4236    /// # Compile-Time Assertions
4237    ///
4238    /// This method cannot yet be used on unsized types whose dynamically-sized
4239    /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
4240    /// support such types. Attempting to use this method on such types results
4241    /// in a compile-time assertion error; e.g.:
4242    ///
4243    /// ```compile_fail,E0080
4244    /// use zerocopy::*;
4245    /// # use zerocopy_derive::*;
4246    ///
4247    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4248    /// #[repr(C, packed)]
4249    /// struct ZSTy {
4250    ///     leading_sized: [u8; 2],
4251    ///     trailing_dst: [()],
4252    /// }
4253    ///
4254    /// let mut source = [85, 85];
4255    /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš  Compile Error!
4256    /// ```
4257    ///
4258    /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
4259    ///
4260    /// # Examples
4261    ///
4262    /// ```
4263    /// use zerocopy::FromBytes;
4264    /// # use zerocopy_derive::*;
4265    ///
4266    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4267    /// #[repr(C)]
4268    /// struct PacketHeader {
4269    ///     src_port: [u8; 2],
4270    ///     dst_port: [u8; 2],
4271    ///     length: [u8; 2],
4272    ///     checksum: [u8; 2],
4273    /// }
4274    ///
4275    /// // These bytes encode a `PacketHeader`.
4276    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4277    ///
4278    /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
4279    ///
4280    /// assert_eq!(header.src_port, [0, 1]);
4281    /// assert_eq!(header.dst_port, [2, 3]);
4282    /// assert_eq!(header.length, [4, 5]);
4283    /// assert_eq!(header.checksum, [6, 7]);
4284    ///
4285    /// header.checksum = [0, 0];
4286    ///
4287    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
4288    ///
4289    /// ```
4290    ///
4291    #[doc = codegen_header!("h5", "mut_from_bytes")]
4292    ///
4293    /// See [`FromBytes::ref_from_bytes`](#method.ref_from_bytes.codegen).
4294    #[must_use = "has no side effects"]
4295    #[inline]
4296    fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
4297    where
4298        Self: IntoBytes + KnownLayout,
4299    {
4300        static_assert_dst_is_not_zst!(Self);
4301        match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
4302            Ok(ptr) => Ok(ptr.recall_validity::<_, (_, (_, _))>().as_mut()),
4303            Err(err) => Err(err.map_src(|src| src.as_mut())),
4304        }
4305    }
4306
4307    /// Interprets the prefix of the given `source` as a `&mut Self` without
4308    /// copying.
4309    ///
4310    /// This method computes the [largest possible size of `Self`][valid-size]
4311    /// that can fit in the leading bytes of `source`, then attempts to return
4312    /// both a reference to those bytes interpreted as a `Self`, and a reference
4313    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4314    /// is not appropriately aligned, this returns `Err`. If [`Self:
4315    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4316    /// error][size-error-from].
4317    ///
4318    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4319    ///
4320    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4321    /// [self-unaligned]: Unaligned
4322    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4323    /// [slice-dst]: KnownLayout#dynamically-sized-types
4324    ///
4325    /// # Compile-Time Assertions
4326    ///
4327    /// This method cannot yet be used on unsized types whose dynamically-sized
4328    /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
4329    /// support such types. Attempting to use this method on such types results
4330    /// in a compile-time assertion error; e.g.:
4331    ///
4332    /// ```compile_fail,E0080
4333    /// use zerocopy::*;
4334    /// # use zerocopy_derive::*;
4335    ///
4336    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4337    /// #[repr(C, packed)]
4338    /// struct ZSTy {
4339    ///     leading_sized: [u8; 2],
4340    ///     trailing_dst: [()],
4341    /// }
4342    ///
4343    /// let mut source = [85, 85];
4344    /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš  Compile Error!
4345    /// ```
4346    ///
4347    /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
4348    ///
4349    /// # Examples
4350    ///
4351    /// ```
4352    /// use zerocopy::FromBytes;
4353    /// # use zerocopy_derive::*;
4354    ///
4355    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4356    /// #[repr(C)]
4357    /// struct PacketHeader {
4358    ///     src_port: [u8; 2],
4359    ///     dst_port: [u8; 2],
4360    ///     length: [u8; 2],
4361    ///     checksum: [u8; 2],
4362    /// }
4363    ///
4364    /// // These are more bytes than are needed to encode a `PacketHeader`.
4365    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4366    ///
4367    /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
4368    ///
4369    /// assert_eq!(header.src_port, [0, 1]);
4370    /// assert_eq!(header.dst_port, [2, 3]);
4371    /// assert_eq!(header.length, [4, 5]);
4372    /// assert_eq!(header.checksum, [6, 7]);
4373    /// assert_eq!(body, &[8, 9][..]);
4374    ///
4375    /// header.checksum = [0, 0];
4376    /// body.fill(1);
4377    ///
4378    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
4379    /// ```
4380    ///
4381    #[doc = codegen_header!("h5", "mut_from_prefix")]
4382    ///
4383    /// See [`FromBytes::ref_from_prefix`](#method.ref_from_prefix.codegen).
4384    #[must_use = "has no side effects"]
4385    #[inline]
4386    fn mut_from_prefix(
4387        source: &mut [u8],
4388    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4389    where
4390        Self: IntoBytes + KnownLayout,
4391    {
4392        static_assert_dst_is_not_zst!(Self);
4393        mut_from_prefix_suffix(source, None, CastType::Prefix)
4394    }
4395
4396    /// Interprets the suffix of the given `source` as a `&mut Self` without
4397    /// copying.
4398    ///
4399    /// This method computes the [largest possible size of `Self`][valid-size]
4400    /// that can fit in the trailing bytes of `source`, then attempts to return
4401    /// both a reference to those bytes interpreted as a `Self`, and a reference
4402    /// to the preceding bytes. If there are insufficient bytes, or if that
4403    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4404    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4405    /// alignment error][size-error-from].
4406    ///
4407    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4408    ///
4409    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4410    /// [self-unaligned]: Unaligned
4411    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4412    /// [slice-dst]: KnownLayout#dynamically-sized-types
4413    ///
4414    /// # Compile-Time Assertions
4415    ///
4416    /// This method cannot yet be used on unsized types whose dynamically-sized
4417    /// component is zero-sized. Attempting to use this method on such types
4418    /// results in a compile-time assertion error; e.g.:
4419    ///
4420    /// ```compile_fail,E0080
4421    /// use zerocopy::*;
4422    /// # use zerocopy_derive::*;
4423    ///
4424    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4425    /// #[repr(C, packed)]
4426    /// struct ZSTy {
4427    ///     leading_sized: [u8; 2],
4428    ///     trailing_dst: [()],
4429    /// }
4430    ///
4431    /// let mut source = [85, 85];
4432    /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš  Compile Error!
4433    /// ```
4434    ///
4435    /// # Examples
4436    ///
4437    /// ```
4438    /// use zerocopy::FromBytes;
4439    /// # use zerocopy_derive::*;
4440    ///
4441    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4442    /// #[repr(C)]
4443    /// struct PacketTrailer {
4444    ///     frame_check_sequence: [u8; 4],
4445    /// }
4446    ///
4447    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4448    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4449    ///
4450    /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
4451    ///
4452    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
4453    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4454    ///
4455    /// prefix.fill(0);
4456    /// trailer.frame_check_sequence.fill(1);
4457    ///
4458    /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
4459    /// ```
4460    ///
4461    #[doc = codegen_header!("h5", "mut_from_suffix")]
4462    ///
4463    /// See [`FromBytes::ref_from_suffix`](#method.ref_from_suffix.codegen).
4464    #[must_use = "has no side effects"]
4465    #[inline]
4466    fn mut_from_suffix(
4467        source: &mut [u8],
4468    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4469    where
4470        Self: IntoBytes + KnownLayout,
4471    {
4472        static_assert_dst_is_not_zst!(Self);
4473        mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4474    }
4475
4476    /// Interprets the given `source` as a `&Self` with a DST length equal to
4477    /// `count`.
4478    ///
4479    /// This method attempts to return a reference to `source` interpreted as a
4480    /// `Self` with `count` trailing elements. If the length of `source` is not
4481    /// equal to the size of `Self` with `count` elements, or if `source` is not
4482    /// appropriately aligned, this returns `Err`. If [`Self:
4483    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4484    /// error][size-error-from].
4485    ///
4486    /// [self-unaligned]: Unaligned
4487    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4488    ///
4489    /// # Examples
4490    ///
4491    /// ```
4492    /// use zerocopy::FromBytes;
4493    /// # use zerocopy_derive::*;
4494    ///
4495    /// # #[derive(Debug, PartialEq, Eq)]
4496    /// #[derive(FromBytes, Immutable)]
4497    /// #[repr(C)]
4498    /// struct Pixel {
4499    ///     r: u8,
4500    ///     g: u8,
4501    ///     b: u8,
4502    ///     a: u8,
4503    /// }
4504    ///
4505    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4506    ///
4507    /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4508    ///
4509    /// assert_eq!(pixels, &[
4510    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4511    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4512    /// ]);
4513    ///
4514    /// ```
4515    ///
4516    /// Since an explicit `count` is provided, this method supports types with
4517    /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4518    /// which do not take an explicit count do not support such types.
4519    ///
4520    /// ```
4521    /// use zerocopy::*;
4522    /// # use zerocopy_derive::*;
4523    ///
4524    /// #[derive(FromBytes, Immutable, KnownLayout)]
4525    /// #[repr(C)]
4526    /// struct ZSTy {
4527    ///     leading_sized: [u8; 2],
4528    ///     trailing_dst: [()],
4529    /// }
4530    ///
4531    /// let src = &[85, 85][..];
4532    /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4533    /// assert_eq!(zsty.trailing_dst.len(), 42);
4534    /// ```
4535    ///
4536    /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4537    ///
4538    #[doc = codegen_section!(
4539        header = "h5",
4540        bench = "ref_from_bytes_with_elems",
4541        format = "coco",
4542        arity = 2,
4543        [
4544            open
4545            @index 1
4546            @title "Unsized"
4547            @variant "dynamic_size"
4548        ],
4549        [
4550            @index 2
4551            @title "Dynamically Padded"
4552            @variant "dynamic_padding"
4553        ]
4554    )]
4555    #[must_use = "has no side effects"]
4556    #[inline]
4557    fn ref_from_bytes_with_elems(
4558        source: &[u8],
4559        count: usize,
4560    ) -> Result<&Self, CastError<&[u8], Self>>
4561    where
4562        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4563    {
4564        let source = Ptr::from_ref(source);
4565        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4566        match maybe_slf {
4567            Ok(slf) => Ok(slf.recall_validity().as_ref()),
4568            Err(err) => Err(err.map_src(|s| s.as_ref())),
4569        }
4570    }
4571
4572    /// Interprets the prefix of the given `source` as a DST `&Self` with length
4573    /// equal to `count`.
4574    ///
4575    /// This method attempts to return a reference to the prefix of `source`
4576    /// interpreted as a `Self` with `count` trailing elements, and a reference
4577    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4578    /// is not appropriately aligned, this returns `Err`. If [`Self:
4579    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4580    /// error][size-error-from].
4581    ///
4582    /// [self-unaligned]: Unaligned
4583    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4584    ///
4585    /// # Examples
4586    ///
4587    /// ```
4588    /// use zerocopy::FromBytes;
4589    /// # use zerocopy_derive::*;
4590    ///
4591    /// # #[derive(Debug, PartialEq, Eq)]
4592    /// #[derive(FromBytes, Immutable)]
4593    /// #[repr(C)]
4594    /// struct Pixel {
4595    ///     r: u8,
4596    ///     g: u8,
4597    ///     b: u8,
4598    ///     a: u8,
4599    /// }
4600    ///
4601    /// // These are more bytes than are needed to encode two `Pixel`s.
4602    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4603    ///
4604    /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4605    ///
4606    /// assert_eq!(pixels, &[
4607    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4608    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4609    /// ]);
4610    ///
4611    /// assert_eq!(suffix, &[8, 9]);
4612    /// ```
4613    ///
4614    /// Since an explicit `count` is provided, this method supports types with
4615    /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4616    /// which do not take an explicit count do not support such types.
4617    ///
4618    /// ```
4619    /// use zerocopy::*;
4620    /// # use zerocopy_derive::*;
4621    ///
4622    /// #[derive(FromBytes, Immutable, KnownLayout)]
4623    /// #[repr(C)]
4624    /// struct ZSTy {
4625    ///     leading_sized: [u8; 2],
4626    ///     trailing_dst: [()],
4627    /// }
4628    ///
4629    /// let src = &[85, 85][..];
4630    /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4631    /// assert_eq!(zsty.trailing_dst.len(), 42);
4632    /// ```
4633    ///
4634    /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4635    ///
4636    #[doc = codegen_section!(
4637        header = "h5",
4638        bench = "ref_from_prefix_with_elems",
4639        format = "coco",
4640        arity = 2,
4641        [
4642            open
4643            @index 1
4644            @title "Unsized"
4645            @variant "dynamic_size"
4646        ],
4647        [
4648            @index 2
4649            @title "Dynamically Padded"
4650            @variant "dynamic_padding"
4651        ]
4652    )]
4653    #[must_use = "has no side effects"]
4654    #[inline]
4655    fn ref_from_prefix_with_elems(
4656        source: &[u8],
4657        count: usize,
4658    ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4659    where
4660        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4661    {
4662        ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4663    }
4664
4665    /// Interprets the suffix of the given `source` as a DST `&Self` with length
4666    /// equal to `count`.
4667    ///
4668    /// This method attempts to return a reference to the suffix of `source`
4669    /// interpreted as a `Self` with `count` trailing elements, and a reference
4670    /// to the preceding bytes. If there are insufficient bytes, or if that
4671    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4672    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4673    /// alignment error][size-error-from].
4674    ///
4675    /// [self-unaligned]: Unaligned
4676    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4677    ///
4678    /// # Examples
4679    ///
4680    /// ```
4681    /// use zerocopy::FromBytes;
4682    /// # use zerocopy_derive::*;
4683    ///
4684    /// # #[derive(Debug, PartialEq, Eq)]
4685    /// #[derive(FromBytes, Immutable)]
4686    /// #[repr(C)]
4687    /// struct Pixel {
4688    ///     r: u8,
4689    ///     g: u8,
4690    ///     b: u8,
4691    ///     a: u8,
4692    /// }
4693    ///
4694    /// // These are more bytes than are needed to encode two `Pixel`s.
4695    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4696    ///
4697    /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4698    ///
4699    /// assert_eq!(prefix, &[0, 1]);
4700    ///
4701    /// assert_eq!(pixels, &[
4702    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4703    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4704    /// ]);
4705    /// ```
4706    ///
4707    /// Since an explicit `count` is provided, this method supports types with
4708    /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4709    /// which do not take an explicit count do not support such types.
4710    ///
4711    /// ```
4712    /// use zerocopy::*;
4713    /// # use zerocopy_derive::*;
4714    ///
4715    /// #[derive(FromBytes, Immutable, KnownLayout)]
4716    /// #[repr(C)]
4717    /// struct ZSTy {
4718    ///     leading_sized: [u8; 2],
4719    ///     trailing_dst: [()],
4720    /// }
4721    ///
4722    /// let src = &[85, 85][..];
4723    /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4724    /// assert_eq!(zsty.trailing_dst.len(), 42);
4725    /// ```
4726    ///
4727    /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4728    ///
4729    #[doc = codegen_section!(
4730        header = "h5",
4731        bench = "ref_from_suffix_with_elems",
4732        format = "coco",
4733        arity = 2,
4734        [
4735            open
4736            @index 1
4737            @title "Unsized"
4738            @variant "dynamic_size"
4739        ],
4740        [
4741            @index 2
4742            @title "Dynamically Padded"
4743            @variant "dynamic_padding"
4744        ]
4745    )]
4746    #[must_use = "has no side effects"]
4747    #[inline]
4748    fn ref_from_suffix_with_elems(
4749        source: &[u8],
4750        count: usize,
4751    ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4752    where
4753        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4754    {
4755        ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4756    }
4757
4758    /// Interprets the given `source` as a `&mut Self` with a DST length equal
4759    /// to `count`.
4760    ///
4761    /// This method attempts to return a reference to `source` interpreted as a
4762    /// `Self` with `count` trailing elements. If the length of `source` is not
4763    /// equal to the size of `Self` with `count` elements, or if `source` is not
4764    /// appropriately aligned, this returns `Err`. If [`Self:
4765    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4766    /// error][size-error-from].
4767    ///
4768    /// [self-unaligned]: Unaligned
4769    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4770    ///
4771    /// # Examples
4772    ///
4773    /// ```
4774    /// use zerocopy::FromBytes;
4775    /// # use zerocopy_derive::*;
4776    ///
4777    /// # #[derive(Debug, PartialEq, Eq)]
4778    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4779    /// #[repr(C)]
4780    /// struct Pixel {
4781    ///     r: u8,
4782    ///     g: u8,
4783    ///     b: u8,
4784    ///     a: u8,
4785    /// }
4786    ///
4787    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4788    ///
4789    /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4790    ///
4791    /// assert_eq!(pixels, &[
4792    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4793    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4794    /// ]);
4795    ///
4796    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4797    ///
4798    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4799    /// ```
4800    ///
4801    /// Since an explicit `count` is provided, this method supports types with
4802    /// zero-sized trailing slice elements. Methods such as [`mut_from_bytes`]
4803    /// which do not take an explicit count do not support such types.
4804    ///
4805    /// ```
4806    /// use zerocopy::*;
4807    /// # use zerocopy_derive::*;
4808    ///
4809    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4810    /// #[repr(C, packed)]
4811    /// struct ZSTy {
4812    ///     leading_sized: [u8; 2],
4813    ///     trailing_dst: [()],
4814    /// }
4815    ///
4816    /// let src = &mut [85, 85][..];
4817    /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4818    /// assert_eq!(zsty.trailing_dst.len(), 42);
4819    /// ```
4820    ///
4821    /// [`mut_from_bytes`]: FromBytes::mut_from_bytes
4822    ///
4823    #[doc = codegen_header!("h5", "mut_from_bytes_with_elems")]
4824    ///
4825    /// See [`TryFromBytes::ref_from_bytes_with_elems`](#method.ref_from_bytes_with_elems.codegen).
4826    #[must_use = "has no side effects"]
4827    #[inline]
4828    fn mut_from_bytes_with_elems(
4829        source: &mut [u8],
4830        count: usize,
4831    ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4832    where
4833        Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4834    {
4835        let source = Ptr::from_mut(source);
4836        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4837        match maybe_slf {
4838            Ok(slf) => Ok(slf.recall_validity::<_, (_, (_, BecauseExclusive))>().as_mut()),
4839            Err(err) => Err(err.map_src(|s| s.as_mut())),
4840        }
4841    }
4842
4843    /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4844    /// length equal to `count`.
4845    ///
4846    /// This method attempts to return a reference to the prefix of `source`
4847    /// interpreted as a `Self` with `count` trailing elements, and a reference
4848    /// to the preceding bytes. If there are insufficient bytes, or if `source`
4849    /// is not appropriately aligned, this returns `Err`. If [`Self:
4850    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4851    /// error][size-error-from].
4852    ///
4853    /// [self-unaligned]: Unaligned
4854    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4855    ///
4856    /// # Examples
4857    ///
4858    /// ```
4859    /// use zerocopy::FromBytes;
4860    /// # use zerocopy_derive::*;
4861    ///
4862    /// # #[derive(Debug, PartialEq, Eq)]
4863    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4864    /// #[repr(C)]
4865    /// struct Pixel {
4866    ///     r: u8,
4867    ///     g: u8,
4868    ///     b: u8,
4869    ///     a: u8,
4870    /// }
4871    ///
4872    /// // These are more bytes than are needed to encode two `Pixel`s.
4873    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4874    ///
4875    /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4876    ///
4877    /// assert_eq!(pixels, &[
4878    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4879    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4880    /// ]);
4881    ///
4882    /// assert_eq!(suffix, &[8, 9]);
4883    ///
4884    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4885    /// suffix.fill(1);
4886    ///
4887    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4888    /// ```
4889    ///
4890    /// Since an explicit `count` is provided, this method supports types with
4891    /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4892    /// which do not take an explicit count do not support such types.
4893    ///
4894    /// ```
4895    /// use zerocopy::*;
4896    /// # use zerocopy_derive::*;
4897    ///
4898    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4899    /// #[repr(C, packed)]
4900    /// struct ZSTy {
4901    ///     leading_sized: [u8; 2],
4902    ///     trailing_dst: [()],
4903    /// }
4904    ///
4905    /// let src = &mut [85, 85][..];
4906    /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4907    /// assert_eq!(zsty.trailing_dst.len(), 42);
4908    /// ```
4909    ///
4910    /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4911    ///
4912    #[doc = codegen_header!("h5", "mut_from_prefix_with_elems")]
4913    ///
4914    /// See [`TryFromBytes::ref_from_prefix_with_elems`](#method.ref_from_prefix_with_elems.codegen).
4915    #[must_use = "has no side effects"]
4916    #[inline]
4917    fn mut_from_prefix_with_elems(
4918        source: &mut [u8],
4919        count: usize,
4920    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4921    where
4922        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4923    {
4924        mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4925    }
4926
4927    /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4928    /// length equal to `count`.
4929    ///
4930    /// This method attempts to return a reference to the suffix of `source`
4931    /// interpreted as a `Self` with `count` trailing elements, and a reference
4932    /// to the remaining bytes. If there are insufficient bytes, or if that
4933    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4934    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4935    /// alignment error][size-error-from].
4936    ///
4937    /// [self-unaligned]: Unaligned
4938    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4939    ///
4940    /// # Examples
4941    ///
4942    /// ```
4943    /// use zerocopy::FromBytes;
4944    /// # use zerocopy_derive::*;
4945    ///
4946    /// # #[derive(Debug, PartialEq, Eq)]
4947    /// #[derive(FromBytes, IntoBytes, Immutable)]
4948    /// #[repr(C)]
4949    /// struct Pixel {
4950    ///     r: u8,
4951    ///     g: u8,
4952    ///     b: u8,
4953    ///     a: u8,
4954    /// }
4955    ///
4956    /// // These are more bytes than are needed to encode two `Pixel`s.
4957    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4958    ///
4959    /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4960    ///
4961    /// assert_eq!(prefix, &[0, 1]);
4962    ///
4963    /// assert_eq!(pixels, &[
4964    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4965    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4966    /// ]);
4967    ///
4968    /// prefix.fill(9);
4969    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4970    ///
4971    /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4972    /// ```
4973    ///
4974    /// Since an explicit `count` is provided, this method supports types with
4975    /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4976    /// which do not take an explicit count do not support such types.
4977    ///
4978    /// ```
4979    /// use zerocopy::*;
4980    /// # use zerocopy_derive::*;
4981    ///
4982    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4983    /// #[repr(C, packed)]
4984    /// struct ZSTy {
4985    ///     leading_sized: [u8; 2],
4986    ///     trailing_dst: [()],
4987    /// }
4988    ///
4989    /// let src = &mut [85, 85][..];
4990    /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4991    /// assert_eq!(zsty.trailing_dst.len(), 42);
4992    /// ```
4993    ///
4994    /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4995    ///
4996    #[doc = codegen_header!("h5", "mut_from_suffix_with_elems")]
4997    ///
4998    /// See [`TryFromBytes::ref_from_suffix_with_elems`](#method.ref_from_suffix_with_elems.codegen).
4999    #[must_use = "has no side effects"]
5000    #[inline]
5001    fn mut_from_suffix_with_elems(
5002        source: &mut [u8],
5003        count: usize,
5004    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
5005    where
5006        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
5007    {
5008        mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
5009    }
5010
5011    /// Reads a copy of `Self` from the given `source`.
5012    ///
5013    /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
5014    ///
5015    /// # Examples
5016    ///
5017    /// ```
5018    /// use zerocopy::FromBytes;
5019    /// # use zerocopy_derive::*;
5020    ///
5021    /// #[derive(FromBytes)]
5022    /// #[repr(C)]
5023    /// struct PacketHeader {
5024    ///     src_port: [u8; 2],
5025    ///     dst_port: [u8; 2],
5026    ///     length: [u8; 2],
5027    ///     checksum: [u8; 2],
5028    /// }
5029    ///
5030    /// // These bytes encode a `PacketHeader`.
5031    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
5032    ///
5033    /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
5034    ///
5035    /// assert_eq!(header.src_port, [0, 1]);
5036    /// assert_eq!(header.dst_port, [2, 3]);
5037    /// assert_eq!(header.length, [4, 5]);
5038    /// assert_eq!(header.checksum, [6, 7]);
5039    /// ```
5040    ///
5041    #[doc = codegen_section!(
5042        header = "h5",
5043        bench = "read_from_bytes",
5044        format = "coco_static_size",
5045    )]
5046    #[must_use = "has no side effects"]
5047    #[inline]
5048    fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
5049    where
5050        Self: Sized,
5051    {
5052        match Ref::<_, Unalign<Self>>::sized_from(source) {
5053            Ok(r) => Ok(Ref::read(&r).into_inner()),
5054            Err(CastError::Size(e)) => Err(e.with_dst()),
5055            Err(CastError::Alignment(_)) => {
5056                // SAFETY: `Unalign<Self>` is trivially aligned, so
5057                // `Ref::sized_from` cannot fail due to unmet alignment
5058                // requirements.
5059                unsafe { core::hint::unreachable_unchecked() }
5060            }
5061            Err(CastError::Validity(i)) => match i {},
5062        }
5063    }
5064
5065    /// Reads a copy of `Self` from the prefix of the given `source`.
5066    ///
5067    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
5068    /// of `source`, returning that `Self` and any remaining bytes. If
5069    /// `source.len() < size_of::<Self>()`, it returns `Err`.
5070    ///
5071    /// # Examples
5072    ///
5073    /// ```
5074    /// use zerocopy::FromBytes;
5075    /// # use zerocopy_derive::*;
5076    ///
5077    /// #[derive(FromBytes)]
5078    /// #[repr(C)]
5079    /// struct PacketHeader {
5080    ///     src_port: [u8; 2],
5081    ///     dst_port: [u8; 2],
5082    ///     length: [u8; 2],
5083    ///     checksum: [u8; 2],
5084    /// }
5085    ///
5086    /// // These are more bytes than are needed to encode a `PacketHeader`.
5087    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
5088    ///
5089    /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
5090    ///
5091    /// assert_eq!(header.src_port, [0, 1]);
5092    /// assert_eq!(header.dst_port, [2, 3]);
5093    /// assert_eq!(header.length, [4, 5]);
5094    /// assert_eq!(header.checksum, [6, 7]);
5095    /// assert_eq!(body, [8, 9]);
5096    /// ```
5097    ///
5098    #[doc = codegen_section!(
5099        header = "h5",
5100        bench = "read_from_prefix",
5101        format = "coco_static_size",
5102    )]
5103    #[must_use = "has no side effects"]
5104    #[inline]
5105    fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
5106    where
5107        Self: Sized,
5108    {
5109        match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
5110            Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
5111            Err(CastError::Size(e)) => Err(e.with_dst()),
5112            Err(CastError::Alignment(_)) => {
5113                // SAFETY: `Unalign<Self>` is trivially aligned, so
5114                // `Ref::sized_from_prefix` cannot fail due to unmet alignment
5115                // requirements.
5116                unsafe { core::hint::unreachable_unchecked() }
5117            }
5118            Err(CastError::Validity(i)) => match i {},
5119        }
5120    }
5121
5122    /// Reads a copy of `Self` from the suffix of the given `source`.
5123    ///
5124    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
5125    /// of `source`, returning that `Self` and any preceding bytes. If
5126    /// `source.len() < size_of::<Self>()`, it returns `Err`.
5127    ///
5128    /// # Examples
5129    ///
5130    /// ```
5131    /// use zerocopy::FromBytes;
5132    /// # use zerocopy_derive::*;
5133    ///
5134    /// #[derive(FromBytes)]
5135    /// #[repr(C)]
5136    /// struct PacketTrailer {
5137    ///     frame_check_sequence: [u8; 4],
5138    /// }
5139    ///
5140    /// // These are more bytes than are needed to encode a `PacketTrailer`.
5141    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
5142    ///
5143    /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
5144    ///
5145    /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
5146    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
5147    /// ```
5148    ///
5149    #[doc = codegen_section!(
5150        header = "h5",
5151        bench = "read_from_suffix",
5152        format = "coco_static_size",
5153    )]
5154    #[must_use = "has no side effects"]
5155    #[inline]
5156    fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
5157    where
5158        Self: Sized,
5159    {
5160        match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
5161            Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
5162            Err(CastError::Size(e)) => Err(e.with_dst()),
5163            Err(CastError::Alignment(_)) => {
5164                // SAFETY: `Unalign<Self>` is trivially aligned, so
5165                // `Ref::sized_from_suffix` cannot fail due to unmet alignment
5166                // requirements.
5167                unsafe { core::hint::unreachable_unchecked() }
5168            }
5169            Err(CastError::Validity(i)) => match i {},
5170        }
5171    }
5172
5173    /// Reads a copy of `self` from an `io::Read`.
5174    ///
5175    /// This is useful for interfacing with operating system byte sinks (files,
5176    /// sockets, etc.).
5177    ///
5178    /// # Examples
5179    ///
5180    /// ```no_run
5181    /// use zerocopy::{byteorder::big_endian::*, FromBytes};
5182    /// use std::fs::File;
5183    /// # use zerocopy_derive::*;
5184    ///
5185    /// #[derive(FromBytes)]
5186    /// #[repr(C)]
5187    /// struct BitmapFileHeader {
5188    ///     signature: [u8; 2],
5189    ///     size: U32,
5190    ///     reserved: U64,
5191    ///     offset: U64,
5192    /// }
5193    ///
5194    /// let mut file = File::open("image.bin").unwrap();
5195    /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
5196    /// ```
5197    #[cfg(feature = "std")]
5198    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
5199    #[inline(always)]
5200    fn read_from_io<R>(mut src: R) -> io::Result<Self>
5201    where
5202        Self: Sized,
5203        R: io::Read,
5204    {
5205        // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
5206        // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
5207        // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
5208        // will not necessarily preserve zeros written to those padding byte
5209        // locations, and so `buf` could contain uninitialized bytes.
5210        let mut buf = CoreMaybeUninit::<Self>::uninit();
5211        buf.zero();
5212
5213        let ptr = Ptr::from_mut(&mut buf);
5214        // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
5215        // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
5216        // cannot be used to write values which will violate `buf`'s bit
5217        // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
5218        // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
5219        // cannot be violated even though `buf` may have more permissive bit
5220        // validity than `ptr`.
5221        let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
5222        let ptr = ptr.as_bytes();
5223        src.read_exact(ptr.as_mut())?;
5224        // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
5225        // `FromBytes`.
5226        Ok(unsafe { buf.assume_init() })
5227    }
5228
5229    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
5230    #[doc(hidden)]
5231    #[must_use = "has no side effects"]
5232    #[inline(always)]
5233    fn ref_from(source: &[u8]) -> Option<&Self>
5234    where
5235        Self: KnownLayout + Immutable,
5236    {
5237        Self::ref_from_bytes(source).ok()
5238    }
5239
5240    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
5241    #[doc(hidden)]
5242    #[must_use = "has no side effects"]
5243    #[inline(always)]
5244    fn mut_from(source: &mut [u8]) -> Option<&mut Self>
5245    where
5246        Self: KnownLayout + IntoBytes,
5247    {
5248        Self::mut_from_bytes(source).ok()
5249    }
5250
5251    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
5252    #[doc(hidden)]
5253    #[must_use = "has no side effects"]
5254    #[inline(always)]
5255    fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
5256    where
5257        Self: Sized + Immutable,
5258    {
5259        <[Self]>::ref_from_prefix_with_elems(source, count).ok()
5260    }
5261
5262    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
5263    #[doc(hidden)]
5264    #[must_use = "has no side effects"]
5265    #[inline(always)]
5266    fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
5267    where
5268        Self: Sized + Immutable,
5269    {
5270        <[Self]>::ref_from_suffix_with_elems(source, count).ok()
5271    }
5272
5273    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
5274    #[doc(hidden)]
5275    #[must_use = "has no side effects"]
5276    #[inline(always)]
5277    fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
5278    where
5279        Self: Sized + IntoBytes,
5280    {
5281        <[Self]>::mut_from_prefix_with_elems(source, count).ok()
5282    }
5283
5284    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
5285    #[doc(hidden)]
5286    #[must_use = "has no side effects"]
5287    #[inline(always)]
5288    fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
5289    where
5290        Self: Sized + IntoBytes,
5291    {
5292        <[Self]>::mut_from_suffix_with_elems(source, count).ok()
5293    }
5294
5295    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
5296    #[doc(hidden)]
5297    #[must_use = "has no side effects"]
5298    #[inline(always)]
5299    fn read_from(source: &[u8]) -> Option<Self>
5300    where
5301        Self: Sized,
5302    {
5303        Self::read_from_bytes(source).ok()
5304    }
5305}
5306
5307/// Interprets the given affix of the given bytes as a `&Self`.
5308///
5309/// This method computes the largest possible size of `Self` that can fit in the
5310/// prefix or suffix bytes of `source`, then attempts to return both a reference
5311/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
5312/// If there are insufficient bytes, or if that affix of `source` is not
5313/// appropriately aligned, this returns `Err`.
5314#[inline(always)]
5315fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
5316    source: &[u8],
5317    meta: Option<T::PointerMetadata>,
5318    cast_type: CastType,
5319) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
5320    let (slf, prefix_suffix) = Ptr::from_ref(source)
5321        .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
5322        .map_err(|err| err.map_src(|s| s.as_ref()))?;
5323    Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
5324}
5325
5326/// Interprets the given affix of the given bytes as a `&mut Self` without
5327/// copying.
5328///
5329/// This method computes the largest possible size of `Self` that can fit in the
5330/// prefix or suffix bytes of `source`, then attempts to return both a reference
5331/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
5332/// If there are insufficient bytes, or if that affix of `source` is not
5333/// appropriately aligned, this returns `Err`.
5334#[inline(always)]
5335fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
5336    source: &mut [u8],
5337    meta: Option<T::PointerMetadata>,
5338    cast_type: CastType,
5339) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
5340    let (slf, prefix_suffix) = Ptr::from_mut(source)
5341        .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
5342        .map_err(|err| err.map_src(|s| s.as_mut()))?;
5343    Ok((slf.recall_validity::<_, (_, (_, _))>().as_mut(), prefix_suffix.as_mut()))
5344}
5345
5346/// Analyzes whether a type is [`IntoBytes`].
5347///
5348/// This derive analyzes, at compile time, whether the annotated type satisfies
5349/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
5350/// sound to do so. This derive can be applied to structs and enums (see below
5351/// for union support); e.g.:
5352///
5353/// ```
5354/// # use zerocopy_derive::{IntoBytes};
5355/// #[derive(IntoBytes)]
5356/// #[repr(C)]
5357/// struct MyStruct {
5358/// # /*
5359///     ...
5360/// # */
5361/// }
5362///
5363/// #[derive(IntoBytes)]
5364/// #[repr(u8)]
5365/// enum MyEnum {
5366/// #   Variant,
5367/// # /*
5368///     ...
5369/// # */
5370/// }
5371/// ```
5372///
5373/// [safety conditions]: trait@IntoBytes#safety
5374///
5375/// # Error Messages
5376///
5377/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
5378/// for `IntoBytes` is implemented, you may get an error like this:
5379///
5380/// ```text
5381/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
5382///   --> lib.rs:23:10
5383///    |
5384///  1 | #[derive(IntoBytes)]
5385///    |          ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
5386///    |
5387///    = help: the following implementations were found:
5388///                   <() as PaddingFree<T, false>>
5389/// ```
5390///
5391/// This error indicates that the type being annotated has padding bytes, which
5392/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
5393/// fields by using types in the [`byteorder`] module, wrapping field types in
5394/// [`Unalign`], adding explicit struct fields where those padding bytes would
5395/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
5396/// layout] for more information about type layout and padding.
5397///
5398/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
5399///
5400/// # Unions
5401///
5402/// Currently, union bit validity is [up in the air][union-validity], and so
5403/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
5404/// However, implementing `IntoBytes` on a union type is likely sound on all
5405/// existing Rust toolchains - it's just that it may become unsound in the
5406/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
5407/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
5408///
5409/// ```shell
5410/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
5411/// ```
5412///
5413/// However, it is your responsibility to ensure that this derive is sound on
5414/// the specific versions of the Rust toolchain you are using! We make no
5415/// stability or soundness guarantees regarding this cfg, and may remove it at
5416/// any point.
5417///
5418/// We are actively working with Rust to stabilize the necessary language
5419/// guarantees to support this in a forwards-compatible way, which will enable
5420/// us to remove the cfg gate. As part of this effort, we need to know how much
5421/// demand there is for this feature. If you would like to use `IntoBytes` on
5422/// unions, [please let us know][discussion].
5423///
5424/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
5425/// [discussion]: https://github.com/google/zerocopy/discussions/1802
5426///
5427/// # Analysis
5428///
5429/// *This section describes, roughly, the analysis performed by this derive to
5430/// determine whether it is sound to implement `IntoBytes` for a given type.
5431/// Unless you are modifying the implementation of this derive, or attempting to
5432/// manually implement `IntoBytes` for a type yourself, you don't need to read
5433/// this section.*
5434///
5435/// If a type has the following properties, then this derive can implement
5436/// `IntoBytes` for that type:
5437///
5438/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
5439///     - if the type is `repr(transparent)` or `repr(packed)`, it is
5440///       [`IntoBytes`] if its fields are [`IntoBytes`]; else,
5441///     - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
5442///       if its field is [`IntoBytes`]; else,
5443///     - if the type has no generic parameters, it is [`IntoBytes`] if the type
5444///       is sized and has no padding bytes; else,
5445///     - if the type is `repr(C)`, its fields must be [`Unaligned`].
5446/// - If the type is an enum:
5447///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
5448///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
5449///   - It must have no padding bytes.
5450///   - Its fields must be [`IntoBytes`].
5451///
5452/// This analysis is subject to change. Unsafe code may *only* rely on the
5453/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
5454/// implementation details of this derive.
5455///
5456/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
5457#[cfg(any(feature = "derive", test))]
5458#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5459pub use zerocopy_derive::IntoBytes;
5460
5461/// Types that can be converted to an immutable slice of initialized bytes.
5462///
5463/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
5464/// same size. This is useful for efficiently serializing structured data as raw
5465/// bytes.
5466///
5467/// # Implementation
5468///
5469/// **Do not implement this trait yourself!** Instead, use
5470/// [`#[derive(IntoBytes)]`][derive]; e.g.:
5471///
5472/// ```
5473/// # use zerocopy_derive::IntoBytes;
5474/// #[derive(IntoBytes)]
5475/// #[repr(C)]
5476/// struct MyStruct {
5477/// # /*
5478///     ...
5479/// # */
5480/// }
5481///
5482/// #[derive(IntoBytes)]
5483/// #[repr(u8)]
5484/// enum MyEnum {
5485/// #   Variant0,
5486/// # /*
5487///     ...
5488/// # */
5489/// }
5490/// ```
5491///
5492/// This derive performs a sophisticated, compile-time safety analysis to
5493/// determine whether a type is `IntoBytes`. See the [derive
5494/// documentation][derive] for guidance on how to interpret error messages
5495/// produced by the derive's analysis.
5496///
5497/// # Safety
5498///
5499/// *This section describes what is required in order for `T: IntoBytes`, and
5500/// what unsafe code may assume of such types. If you don't plan on implementing
5501/// `IntoBytes` manually, and you don't plan on writing unsafe code that
5502/// operates on `IntoBytes` types, then you don't need to read this section.*
5503///
5504/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
5505/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
5506/// marked as `IntoBytes` which violates this contract, it may cause undefined
5507/// behavior.
5508///
5509/// `#[derive(IntoBytes)]` only permits [types which satisfy these
5510/// requirements][derive-analysis].
5511///
5512#[cfg_attr(
5513    feature = "derive",
5514    doc = "[derive]: zerocopy_derive::IntoBytes",
5515    doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
5516)]
5517#[cfg_attr(
5518    not(feature = "derive"),
5519    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
5520    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
5521)]
5522#[cfg_attr(
5523    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5524    diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
5525)]
5526pub unsafe trait IntoBytes {
5527    // The `Self: Sized` bound makes it so that this function doesn't prevent
5528    // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
5529    // prevent object safety, but those provide a benefit in exchange for object
5530    // safety. If at some point we remove those methods, change their type
5531    // signatures, or move them out of this trait so that `IntoBytes` is object
5532    // safe again, it's important that this function not prevent object safety.
5533    #[doc(hidden)]
5534    fn only_derive_is_allowed_to_implement_this_trait()
5535    where
5536        Self: Sized;
5537
5538    /// Gets the bytes of this value.
5539    ///
5540    /// # Examples
5541    ///
5542    /// ```
5543    /// use zerocopy::IntoBytes;
5544    /// # use zerocopy_derive::*;
5545    ///
5546    /// #[derive(IntoBytes, Immutable)]
5547    /// #[repr(C)]
5548    /// struct PacketHeader {
5549    ///     src_port: [u8; 2],
5550    ///     dst_port: [u8; 2],
5551    ///     length: [u8; 2],
5552    ///     checksum: [u8; 2],
5553    /// }
5554    ///
5555    /// let header = PacketHeader {
5556    ///     src_port: [0, 1],
5557    ///     dst_port: [2, 3],
5558    ///     length: [4, 5],
5559    ///     checksum: [6, 7],
5560    /// };
5561    ///
5562    /// let bytes = header.as_bytes();
5563    ///
5564    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5565    /// ```
5566    ///
5567    #[doc = codegen_section!(
5568        header = "h5",
5569        bench = "as_bytes",
5570        format = "coco",
5571        arity = 2,
5572        [
5573            open
5574            @index 1
5575            @title "Sized"
5576            @variant "static_size"
5577        ],
5578        [
5579            @index 2
5580            @title "Unsized"
5581            @variant "dynamic_size"
5582        ]
5583    )]
5584    #[must_use = "has no side effects"]
5585    #[inline(always)]
5586    fn as_bytes(&self) -> &[u8]
5587    where
5588        Self: Immutable,
5589    {
5590        // Note that this method does not have a `Self: Sized` bound;
5591        // `size_of_val` works for unsized values too.
5592        let len = mem::size_of_val(self);
5593        let slf: *const Self = self;
5594
5595        // SAFETY:
5596        // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
5597        //   many bytes because...
5598        //   - `slf` is the same pointer as `self`, and `self` is a reference
5599        //     which points to an object whose size is `len`. Thus...
5600        //     - The entire region of `len` bytes starting at `slf` is contained
5601        //       within a single allocation.
5602        //     - `slf` is non-null.
5603        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5604        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5605        //   initialized.
5606        // - Since `slf` is derived from `self`, and `self` is an immutable
5607        //   reference, the only other references to this memory region that
5608        //   could exist are other immutable references, which by `Self:
5609        //   Immutable` don't permit mutation.
5610        // - The total size of the resulting slice is no larger than
5611        //   `isize::MAX` because no allocation produced by safe code can be
5612        //   larger than `isize::MAX`.
5613        //
5614        // FIXME(#429): Add references to docs and quotes.
5615        unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5616    }
5617
5618    /// Gets the bytes of this value mutably.
5619    ///
5620    /// # Examples
5621    ///
5622    /// ```
5623    /// use zerocopy::IntoBytes;
5624    /// # use zerocopy_derive::*;
5625    ///
5626    /// # #[derive(Eq, PartialEq, Debug)]
5627    /// #[derive(FromBytes, IntoBytes, Immutable)]
5628    /// #[repr(C)]
5629    /// struct PacketHeader {
5630    ///     src_port: [u8; 2],
5631    ///     dst_port: [u8; 2],
5632    ///     length: [u8; 2],
5633    ///     checksum: [u8; 2],
5634    /// }
5635    ///
5636    /// let mut header = PacketHeader {
5637    ///     src_port: [0, 1],
5638    ///     dst_port: [2, 3],
5639    ///     length: [4, 5],
5640    ///     checksum: [6, 7],
5641    /// };
5642    ///
5643    /// let bytes = header.as_mut_bytes();
5644    ///
5645    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5646    ///
5647    /// bytes.reverse();
5648    ///
5649    /// assert_eq!(header, PacketHeader {
5650    ///     src_port: [7, 6],
5651    ///     dst_port: [5, 4],
5652    ///     length: [3, 2],
5653    ///     checksum: [1, 0],
5654    /// });
5655    /// ```
5656    ///
5657    #[doc = codegen_header!("h5", "as_mut_bytes")]
5658    ///
5659    /// See [`IntoBytes::as_bytes`](#method.as_bytes.codegen).
5660    #[must_use = "has no side effects"]
5661    #[inline(always)]
5662    fn as_mut_bytes(&mut self) -> &mut [u8]
5663    where
5664        Self: FromBytes,
5665    {
5666        // Note that this method does not have a `Self: Sized` bound;
5667        // `size_of_val` works for unsized values too.
5668        let len = mem::size_of_val(self);
5669        let slf: *mut Self = self;
5670
5671        // SAFETY:
5672        // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5673        //   size_of::<u8>()` many bytes because...
5674        //   - `slf` is the same pointer as `self`, and `self` is a reference
5675        //     which points to an object whose size is `len`. Thus...
5676        //     - The entire region of `len` bytes starting at `slf` is contained
5677        //       within a single allocation.
5678        //     - `slf` is non-null.
5679        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5680        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5681        //   initialized.
5682        // - `Self: FromBytes` ensures that no write to this memory region
5683        //   could result in it containing an invalid `Self`.
5684        // - Since `slf` is derived from `self`, and `self` is a mutable
5685        //   reference, no other references to this memory region can exist.
5686        // - The total size of the resulting slice is no larger than
5687        //   `isize::MAX` because no allocation produced by safe code can be
5688        //   larger than `isize::MAX`.
5689        //
5690        // FIXME(#429): Add references to docs and quotes.
5691        unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5692    }
5693
5694    /// Writes a copy of `self` to `dst`.
5695    ///
5696    /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5697    ///
5698    /// # Examples
5699    ///
5700    /// ```
5701    /// use zerocopy::IntoBytes;
5702    /// # use zerocopy_derive::*;
5703    ///
5704    /// #[derive(IntoBytes, Immutable)]
5705    /// #[repr(C)]
5706    /// struct PacketHeader {
5707    ///     src_port: [u8; 2],
5708    ///     dst_port: [u8; 2],
5709    ///     length: [u8; 2],
5710    ///     checksum: [u8; 2],
5711    /// }
5712    ///
5713    /// let header = PacketHeader {
5714    ///     src_port: [0, 1],
5715    ///     dst_port: [2, 3],
5716    ///     length: [4, 5],
5717    ///     checksum: [6, 7],
5718    /// };
5719    ///
5720    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5721    ///
5722    /// header.write_to(&mut bytes[..]);
5723    ///
5724    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5725    /// ```
5726    ///
5727    /// If too many or too few target bytes are provided, `write_to` returns
5728    /// `Err` and leaves the target bytes unmodified:
5729    ///
5730    /// ```
5731    /// # use zerocopy::IntoBytes;
5732    /// # let header = u128::MAX;
5733    /// let mut excessive_bytes = &mut [0u8; 128][..];
5734    ///
5735    /// let write_result = header.write_to(excessive_bytes);
5736    ///
5737    /// assert!(write_result.is_err());
5738    /// assert_eq!(excessive_bytes, [0u8; 128]);
5739    /// ```
5740    ///
5741    #[doc = codegen_section!(
5742        header = "h5",
5743        bench = "write_to",
5744        format = "coco",
5745        arity = 2,
5746        [
5747            open
5748            @index 1
5749            @title "Sized"
5750            @variant "static_size"
5751        ],
5752        [
5753            @index 2
5754            @title "Unsized"
5755            @variant "dynamic_size"
5756        ]
5757    )]
5758    #[must_use = "callers should check the return value to see if the operation succeeded"]
5759    #[inline]
5760    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5761    fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5762    where
5763        Self: Immutable,
5764    {
5765        let src = self.as_bytes();
5766        if dst.len() == src.len() {
5767            // SAFETY: Within this branch of the conditional, we have ensured
5768            // that `dst.len()` is equal to `src.len()`. Neither the size of the
5769            // source nor the size of the destination change between the above
5770            // size check and the invocation of `copy_unchecked`.
5771            unsafe { util::copy_unchecked(src, dst) }
5772            Ok(())
5773        } else {
5774            Err(SizeError::new(self))
5775        }
5776    }
5777
5778    /// Writes a copy of `self` to the prefix of `dst`.
5779    ///
5780    /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5781    /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5782    ///
5783    /// # Examples
5784    ///
5785    /// ```
5786    /// use zerocopy::IntoBytes;
5787    /// # use zerocopy_derive::*;
5788    ///
5789    /// #[derive(IntoBytes, Immutable)]
5790    /// #[repr(C)]
5791    /// struct PacketHeader {
5792    ///     src_port: [u8; 2],
5793    ///     dst_port: [u8; 2],
5794    ///     length: [u8; 2],
5795    ///     checksum: [u8; 2],
5796    /// }
5797    ///
5798    /// let header = PacketHeader {
5799    ///     src_port: [0, 1],
5800    ///     dst_port: [2, 3],
5801    ///     length: [4, 5],
5802    ///     checksum: [6, 7],
5803    /// };
5804    ///
5805    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5806    ///
5807    /// header.write_to_prefix(&mut bytes[..]);
5808    ///
5809    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5810    /// ```
5811    ///
5812    /// If insufficient target bytes are provided, `write_to_prefix` returns
5813    /// `Err` and leaves the target bytes unmodified:
5814    ///
5815    /// ```
5816    /// # use zerocopy::IntoBytes;
5817    /// # let header = u128::MAX;
5818    /// let mut insufficient_bytes = &mut [0, 0][..];
5819    ///
5820    /// let write_result = header.write_to_suffix(insufficient_bytes);
5821    ///
5822    /// assert!(write_result.is_err());
5823    /// assert_eq!(insufficient_bytes, [0, 0]);
5824    /// ```
5825    ///
5826    #[doc = codegen_section!(
5827        header = "h5",
5828        bench = "write_to_prefix",
5829        format = "coco",
5830        arity = 2,
5831        [
5832            open
5833            @index 1
5834            @title "Sized"
5835            @variant "static_size"
5836        ],
5837        [
5838            @index 2
5839            @title "Unsized"
5840            @variant "dynamic_size"
5841        ]
5842    )]
5843    #[must_use = "callers should check the return value to see if the operation succeeded"]
5844    #[inline]
5845    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5846    fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5847    where
5848        Self: Immutable,
5849    {
5850        let src = self.as_bytes();
5851        match dst.get_mut(..src.len()) {
5852            Some(dst) => {
5853                // SAFETY: Within this branch of the `match`, we have ensured
5854                // through fallible subslicing that `dst.len()` is equal to
5855                // `src.len()`. Neither the size of the source nor the size of
5856                // the destination change between the above subslicing operation
5857                // and the invocation of `copy_unchecked`.
5858                unsafe { util::copy_unchecked(src, dst) }
5859                Ok(())
5860            }
5861            None => Err(SizeError::new(self)),
5862        }
5863    }
5864
5865    /// Writes a copy of `self` to the suffix of `dst`.
5866    ///
5867    /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5868    /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5869    ///
5870    /// # Examples
5871    ///
5872    /// ```
5873    /// use zerocopy::IntoBytes;
5874    /// # use zerocopy_derive::*;
5875    ///
5876    /// #[derive(IntoBytes, Immutable)]
5877    /// #[repr(C)]
5878    /// struct PacketHeader {
5879    ///     src_port: [u8; 2],
5880    ///     dst_port: [u8; 2],
5881    ///     length: [u8; 2],
5882    ///     checksum: [u8; 2],
5883    /// }
5884    ///
5885    /// let header = PacketHeader {
5886    ///     src_port: [0, 1],
5887    ///     dst_port: [2, 3],
5888    ///     length: [4, 5],
5889    ///     checksum: [6, 7],
5890    /// };
5891    ///
5892    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5893    ///
5894    /// header.write_to_suffix(&mut bytes[..]);
5895    ///
5896    /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5897    ///
5898    /// let mut insufficient_bytes = &mut [0, 0][..];
5899    ///
5900    /// let write_result = header.write_to_suffix(insufficient_bytes);
5901    ///
5902    /// assert!(write_result.is_err());
5903    /// assert_eq!(insufficient_bytes, [0, 0]);
5904    /// ```
5905    ///
5906    /// If insufficient target bytes are provided, `write_to_suffix` returns
5907    /// `Err` and leaves the target bytes unmodified:
5908    ///
5909    /// ```
5910    /// # use zerocopy::IntoBytes;
5911    /// # let header = u128::MAX;
5912    /// let mut insufficient_bytes = &mut [0, 0][..];
5913    ///
5914    /// let write_result = header.write_to_suffix(insufficient_bytes);
5915    ///
5916    /// assert!(write_result.is_err());
5917    /// assert_eq!(insufficient_bytes, [0, 0]);
5918    /// ```
5919    ///
5920    #[doc = codegen_section!(
5921        header = "h5",
5922        bench = "write_to_suffix",
5923        format = "coco",
5924        arity = 2,
5925        [
5926            open
5927            @index 1
5928            @title "Sized"
5929            @variant "static_size"
5930        ],
5931        [
5932            @index 2
5933            @title "Unsized"
5934            @variant "dynamic_size"
5935        ]
5936    )]
5937    #[must_use = "callers should check the return value to see if the operation succeeded"]
5938    #[inline]
5939    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5940    fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5941    where
5942        Self: Immutable,
5943    {
5944        let src = self.as_bytes();
5945        let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5946            start
5947        } else {
5948            return Err(SizeError::new(self));
5949        };
5950        let dst = if let Some(dst) = dst.get_mut(start..) {
5951            dst
5952        } else {
5953            // get_mut() should never return None here. We return a `SizeError`
5954            // rather than .unwrap() because in the event the branch is not
5955            // optimized away, returning a value is generally lighter-weight
5956            // than panicking.
5957            return Err(SizeError::new(self));
5958        };
5959        // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5960        // `dst.len()` is equal to `src.len()`. Neither the size of the source
5961        // nor the size of the destination change between the above subslicing
5962        // operation and the invocation of `copy_unchecked`.
5963        unsafe {
5964            util::copy_unchecked(src, dst);
5965        }
5966        Ok(())
5967    }
5968
5969    /// Writes a copy of `self` to an `io::Write`.
5970    ///
5971    /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
5972    /// for interfacing with operating system byte sinks (files, sockets, etc.).
5973    ///
5974    /// # Examples
5975    ///
5976    /// ```no_run
5977    /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
5978    /// use std::fs::File;
5979    /// # use zerocopy_derive::*;
5980    ///
5981    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5982    /// #[repr(C, packed)]
5983    /// struct GrayscaleImage {
5984    ///     height: U16,
5985    ///     width: U16,
5986    ///     pixels: [U16],
5987    /// }
5988    ///
5989    /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
5990    /// let mut file = File::create("image.bin").unwrap();
5991    /// image.write_to_io(&mut file).unwrap();
5992    /// ```
5993    ///
5994    /// If the write fails, `write_to_io` returns `Err` and a partial write may
5995    /// have occurred; e.g.:
5996    ///
5997    /// ```
5998    /// # use zerocopy::IntoBytes;
5999    ///
6000    /// let src = u128::MAX;
6001    /// let mut dst = [0u8; 2];
6002    ///
6003    /// let write_result = src.write_to_io(&mut dst[..]);
6004    ///
6005    /// assert!(write_result.is_err());
6006    /// assert_eq!(dst, [255, 255]);
6007    /// ```
6008    #[cfg(feature = "std")]
6009    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
6010    #[inline(always)]
6011    fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
6012    where
6013        Self: Immutable,
6014        W: io::Write,
6015    {
6016        dst.write_all(self.as_bytes())
6017    }
6018
6019    #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
6020    #[doc(hidden)]
6021    #[inline]
6022    fn as_bytes_mut(&mut self) -> &mut [u8]
6023    where
6024        Self: FromBytes,
6025    {
6026        self.as_mut_bytes()
6027    }
6028}
6029
6030/// Analyzes whether a type is [`Unaligned`].
6031///
6032/// This derive analyzes, at compile time, whether the annotated type satisfies
6033/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
6034/// sound to do so. This derive can be applied to structs, enums, and unions;
6035/// e.g.:
6036///
6037/// ```
6038/// # use zerocopy_derive::Unaligned;
6039/// #[derive(Unaligned)]
6040/// #[repr(C)]
6041/// struct MyStruct {
6042/// # /*
6043///     ...
6044/// # */
6045/// }
6046///
6047/// #[derive(Unaligned)]
6048/// #[repr(u8)]
6049/// enum MyEnum {
6050/// #   Variant0,
6051/// # /*
6052///     ...
6053/// # */
6054/// }
6055///
6056/// #[derive(Unaligned)]
6057/// #[repr(packed)]
6058/// union MyUnion {
6059/// #   variant: u8,
6060/// # /*
6061///     ...
6062/// # */
6063/// }
6064/// ```
6065///
6066/// # Analysis
6067///
6068/// *This section describes, roughly, the analysis performed by this derive to
6069/// determine whether it is sound to implement `Unaligned` for a given type.
6070/// Unless you are modifying the implementation of this derive, or attempting to
6071/// manually implement `Unaligned` for a type yourself, you don't need to read
6072/// this section.*
6073///
6074/// If a type has the following properties, then this derive can implement
6075/// `Unaligned` for that type:
6076///
6077/// - If the type is a struct or union:
6078///   - If `repr(align(N))` is provided, `N` must equal 1.
6079///   - If the type is `repr(C)` or `repr(transparent)`, all fields must be
6080///     [`Unaligned`].
6081///   - If the type is not `repr(C)` or `repr(transparent)`, it must be
6082///     `repr(packed)` or `repr(packed(1))`.
6083/// - If the type is an enum:
6084///   - If `repr(align(N))` is provided, `N` must equal 1.
6085///   - It must be a field-less enum (meaning that all variants have no fields).
6086///   - It must be `repr(i8)` or `repr(u8)`.
6087///
6088/// [safety conditions]: trait@Unaligned#safety
6089#[cfg(any(feature = "derive", test))]
6090#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6091pub use zerocopy_derive::Unaligned;
6092
6093/// Types with no alignment requirement.
6094///
6095/// If `T: Unaligned`, then `align_of::<T>() == 1`.
6096///
6097/// # Implementation
6098///
6099/// **Do not implement this trait yourself!** Instead, use
6100/// [`#[derive(Unaligned)]`][derive]; e.g.:
6101///
6102/// ```
6103/// # use zerocopy_derive::Unaligned;
6104/// #[derive(Unaligned)]
6105/// #[repr(C)]
6106/// struct MyStruct {
6107/// # /*
6108///     ...
6109/// # */
6110/// }
6111///
6112/// #[derive(Unaligned)]
6113/// #[repr(u8)]
6114/// enum MyEnum {
6115/// #   Variant0,
6116/// # /*
6117///     ...
6118/// # */
6119/// }
6120///
6121/// #[derive(Unaligned)]
6122/// #[repr(packed)]
6123/// union MyUnion {
6124/// #   variant: u8,
6125/// # /*
6126///     ...
6127/// # */
6128/// }
6129/// ```
6130///
6131/// This derive performs a sophisticated, compile-time safety analysis to
6132/// determine whether a type is `Unaligned`.
6133///
6134/// # Safety
6135///
6136/// *This section describes what is required in order for `T: Unaligned`, and
6137/// what unsafe code may assume of such types. If you don't plan on implementing
6138/// `Unaligned` manually, and you don't plan on writing unsafe code that
6139/// operates on `Unaligned` types, then you don't need to read this section.*
6140///
6141/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
6142/// reference to `T` at any memory location regardless of alignment. If a type
6143/// is marked as `Unaligned` which violates this contract, it may cause
6144/// undefined behavior.
6145///
6146/// `#[derive(Unaligned)]` only permits [types which satisfy these
6147/// requirements][derive-analysis].
6148///
6149#[cfg_attr(
6150    feature = "derive",
6151    doc = "[derive]: zerocopy_derive::Unaligned",
6152    doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
6153)]
6154#[cfg_attr(
6155    not(feature = "derive"),
6156    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
6157    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
6158)]
6159#[cfg_attr(
6160    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
6161    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
6162)]
6163pub unsafe trait Unaligned {
6164    // The `Self: Sized` bound makes it so that `Unaligned` is still object
6165    // safe.
6166    #[doc(hidden)]
6167    fn only_derive_is_allowed_to_implement_this_trait()
6168    where
6169        Self: Sized;
6170}
6171
6172/// Derives optimized [`PartialEq`] and [`Eq`] implementations.
6173///
6174/// This derive can be applied to structs and enums implementing both
6175/// [`Immutable`] and [`IntoBytes`]; e.g.:
6176///
6177/// ```
6178/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
6179/// #[derive(ByteEq, Immutable, IntoBytes)]
6180/// #[repr(C)]
6181/// struct MyStruct {
6182/// # /*
6183///     ...
6184/// # */
6185/// }
6186///
6187/// #[derive(ByteEq, Immutable, IntoBytes)]
6188/// #[repr(u8)]
6189/// enum MyEnum {
6190/// #   Variant,
6191/// # /*
6192///     ...
6193/// # */
6194/// }
6195/// ```
6196///
6197/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes
6198/// equality by individually comparing each field. Instead, the implementation
6199/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirety of
6200/// `self` and `other` to byte slices and compares those slices for equality.
6201/// This may have performance advantages.
6202#[cfg(any(feature = "derive", test))]
6203#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6204pub use zerocopy_derive::ByteEq;
6205/// Derives an optimized [`Hash`] implementation.
6206///
6207/// This derive can be applied to structs and enums implementing both
6208/// [`Immutable`] and [`IntoBytes`]; e.g.:
6209///
6210/// ```
6211/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes};
6212/// #[derive(ByteHash, Immutable, IntoBytes)]
6213/// #[repr(C)]
6214/// struct MyStruct {
6215/// # /*
6216///     ...
6217/// # */
6218/// }
6219///
6220/// #[derive(ByteHash, Immutable, IntoBytes)]
6221/// #[repr(u8)]
6222/// enum MyEnum {
6223/// #   Variant,
6224/// # /*
6225///     ...
6226/// # */
6227/// }
6228/// ```
6229///
6230/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by
6231/// individually hashing each field and combining the results. Instead, the
6232/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by
6233/// `derive(ByteHash)` convert the entirety of `self` to a byte slice and hashes
6234/// it in a single call to [`Hasher::write()`]. This may have performance
6235/// advantages.
6236///
6237/// [`Hash`]: core::hash::Hash
6238/// [`Hash::hash()`]: core::hash::Hash::hash()
6239/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
6240#[cfg(any(feature = "derive", test))]
6241#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6242pub use zerocopy_derive::ByteHash;
6243/// Implements [`SplitAt`].
6244///
6245/// This derive can be applied to structs; e.g.:
6246///
6247/// ```
6248/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
6249/// #[derive(ByteEq, Immutable, IntoBytes)]
6250/// #[repr(C)]
6251/// struct MyStruct {
6252/// # /*
6253///     ...
6254/// # */
6255/// }
6256/// ```
6257#[cfg(any(feature = "derive", test))]
6258#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
6259pub use zerocopy_derive::SplitAt;
6260
6261#[cfg(feature = "alloc")]
6262#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
6263#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6264mod alloc_support {
6265    use super::*;
6266
6267    /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
6268    /// vector. The new items are initialized with zeros.
6269    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6270    #[doc(hidden)]
6271    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
6272    #[inline(always)]
6273    pub fn extend_vec_zeroed<T: FromZeros>(
6274        v: &mut Vec<T>,
6275        additional: usize,
6276    ) -> Result<(), AllocError> {
6277        <T as FromZeros>::extend_vec_zeroed(v, additional)
6278    }
6279
6280    /// Inserts `additional` new items into `Vec<T>` at `position`. The new
6281    /// items are initialized with zeros.
6282    ///
6283    /// # Panics
6284    ///
6285    /// Panics if `position > v.len()`.
6286    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6287    #[doc(hidden)]
6288    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
6289    #[inline(always)]
6290    pub fn insert_vec_zeroed<T: FromZeros>(
6291        v: &mut Vec<T>,
6292        position: usize,
6293        additional: usize,
6294    ) -> Result<(), AllocError> {
6295        <T as FromZeros>::insert_vec_zeroed(v, position, additional)
6296    }
6297}
6298
6299#[cfg(feature = "alloc")]
6300#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6301#[doc(hidden)]
6302pub use alloc_support::*;
6303
6304#[cfg(test)]
6305#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
6306mod tests {
6307    use static_assertions::assert_impl_all;
6308
6309    use super::*;
6310    use crate::util::testutil::*;
6311
6312    // An unsized type.
6313    //
6314    // This is used to test the custom derives of our traits. The `[u8]` type
6315    // gets a hand-rolled impl, so it doesn't exercise our custom derives.
6316    #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
6317    #[repr(transparent)]
6318    struct Unsized([u8]);
6319
6320    impl Unsized {
6321        fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
6322            // SAFETY: This *probably* sound - since the layouts of `[u8]` and
6323            // `Unsized` are the same, so are the layouts of `&mut [u8]` and
6324            // `&mut Unsized`. [1] Even if it turns out that this isn't actually
6325            // guaranteed by the language spec, we can just change this since
6326            // it's in test code.
6327            //
6328            // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
6329            unsafe { mem::transmute(slc) }
6330        }
6331    }
6332
6333    #[test]
6334    fn test_known_layout() {
6335        // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
6336        // Test that `PhantomData<$ty>` has the same layout as `()` regardless
6337        // of `$ty`.
6338        macro_rules! test {
6339            ($ty:ty, $expect:expr) => {
6340                let expect = $expect;
6341                assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
6342                assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
6343                assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
6344            };
6345        }
6346
6347        let layout =
6348            |offset, align, trailing_slice_elem_size, statically_shallow_unpadded| DstLayout {
6349                align: NonZeroUsize::new(align).unwrap(),
6350                size_info: match trailing_slice_elem_size {
6351                    None => SizeInfo::Sized { size: offset },
6352                    Some(elem_size) => {
6353                        SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size })
6354                    }
6355                },
6356                statically_shallow_unpadded,
6357            };
6358
6359        test!((), layout(0, 1, None, false));
6360        test!(u8, layout(1, 1, None, false));
6361        // Use `align_of` because `u64` alignment may be smaller than 8 on some
6362        // platforms.
6363        test!(u64, layout(8, mem::align_of::<u64>(), None, false));
6364        test!(AU64, layout(8, 8, None, false));
6365
6366        test!(Option<&'static ()>, usize::LAYOUT);
6367
6368        test!([()], layout(0, 1, Some(0), true));
6369        test!([u8], layout(0, 1, Some(1), true));
6370        test!(str, layout(0, 1, Some(1), true));
6371    }
6372
6373    #[cfg(feature = "derive")]
6374    #[test]
6375    fn test_known_layout_derive() {
6376        // In this and other files (`late_compile_pass.rs`,
6377        // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
6378        // modes of `derive(KnownLayout)` for the following combination of
6379        // properties:
6380        //
6381        // +------------+--------------------------------------+-----------+
6382        // |            |      trailing field properties       |           |
6383        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6384        // |------------+----------+----------------+----------+-----------|
6385        // |          N |        N |              N |        N |      KL00 |
6386        // |          N |        N |              N |        Y |      KL01 |
6387        // |          N |        N |              Y |        N |      KL02 |
6388        // |          N |        N |              Y |        Y |      KL03 |
6389        // |          N |        Y |              N |        N |      KL04 |
6390        // |          N |        Y |              N |        Y |      KL05 |
6391        // |          N |        Y |              Y |        N |      KL06 |
6392        // |          N |        Y |              Y |        Y |      KL07 |
6393        // |          Y |        N |              N |        N |      KL08 |
6394        // |          Y |        N |              N |        Y |      KL09 |
6395        // |          Y |        N |              Y |        N |      KL10 |
6396        // |          Y |        N |              Y |        Y |      KL11 |
6397        // |          Y |        Y |              N |        N |      KL12 |
6398        // |          Y |        Y |              N |        Y |      KL13 |
6399        // |          Y |        Y |              Y |        N |      KL14 |
6400        // |          Y |        Y |              Y |        Y |      KL15 |
6401        // +------------+----------+----------------+----------+-----------+
6402
6403        struct NotKnownLayout<T = ()> {
6404            _t: T,
6405        }
6406
6407        #[derive(KnownLayout)]
6408        #[repr(C)]
6409        struct AlignSize<const ALIGN: usize, const SIZE: usize>
6410        where
6411            elain::Align<ALIGN>: elain::Alignment,
6412        {
6413            _align: elain::Align<ALIGN>,
6414            size: [u8; SIZE],
6415        }
6416
6417        type AU16 = AlignSize<2, 2>;
6418        type AU32 = AlignSize<4, 4>;
6419
6420        fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
6421
6422        let sized_layout = |align, size| DstLayout {
6423            align: NonZeroUsize::new(align).unwrap(),
6424            size_info: SizeInfo::Sized { size },
6425            statically_shallow_unpadded: false,
6426        };
6427
6428        let unsized_layout = |align, elem_size, offset, statically_shallow_unpadded| DstLayout {
6429            align: NonZeroUsize::new(align).unwrap(),
6430            size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
6431            statically_shallow_unpadded,
6432        };
6433
6434        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6435        // |          N |        N |              N |        Y |      KL01 |
6436        #[allow(dead_code)]
6437        #[derive(KnownLayout)]
6438        struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6439
6440        let expected = DstLayout::for_type::<KL01>();
6441
6442        assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
6443        assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
6444
6445        // ...with `align(N)`:
6446        #[allow(dead_code)]
6447        #[derive(KnownLayout)]
6448        #[repr(align(64))]
6449        struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6450
6451        let expected = DstLayout::for_type::<KL01Align>();
6452
6453        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
6454        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6455
6456        // ...with `packed`:
6457        #[allow(dead_code)]
6458        #[derive(KnownLayout)]
6459        #[repr(packed)]
6460        struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6461
6462        let expected = DstLayout::for_type::<KL01Packed>();
6463
6464        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
6465        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
6466
6467        // ...with `packed(N)`:
6468        #[allow(dead_code)]
6469        #[derive(KnownLayout)]
6470        #[repr(packed(2))]
6471        struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6472
6473        assert_impl_all!(KL01PackedN: KnownLayout);
6474
6475        let expected = DstLayout::for_type::<KL01PackedN>();
6476
6477        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
6478        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6479
6480        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6481        // |          N |        N |              Y |        Y |      KL03 |
6482        #[allow(dead_code)]
6483        #[derive(KnownLayout)]
6484        struct KL03(NotKnownLayout, u8);
6485
6486        let expected = DstLayout::for_type::<KL03>();
6487
6488        assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
6489        assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
6490
6491        // ... with `align(N)`
6492        #[allow(dead_code)]
6493        #[derive(KnownLayout)]
6494        #[repr(align(64))]
6495        struct KL03Align(NotKnownLayout<AU32>, u8);
6496
6497        let expected = DstLayout::for_type::<KL03Align>();
6498
6499        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
6500        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6501
6502        // ... with `packed`:
6503        #[allow(dead_code)]
6504        #[derive(KnownLayout)]
6505        #[repr(packed)]
6506        struct KL03Packed(NotKnownLayout<AU32>, u8);
6507
6508        let expected = DstLayout::for_type::<KL03Packed>();
6509
6510        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
6511        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
6512
6513        // ... with `packed(N)`
6514        #[allow(dead_code)]
6515        #[derive(KnownLayout)]
6516        #[repr(packed(2))]
6517        struct KL03PackedN(NotKnownLayout<AU32>, u8);
6518
6519        assert_impl_all!(KL03PackedN: KnownLayout);
6520
6521        let expected = DstLayout::for_type::<KL03PackedN>();
6522
6523        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
6524        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6525
6526        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6527        // |          N |        Y |              N |        Y |      KL05 |
6528        #[allow(dead_code)]
6529        #[derive(KnownLayout)]
6530        struct KL05<T>(u8, T);
6531
6532        fn _test_kl05<T>(t: T) -> impl KnownLayout {
6533            KL05(0u8, t)
6534        }
6535
6536        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6537        // |          N |        Y |              Y |        Y |      KL07 |
6538        #[allow(dead_code)]
6539        #[derive(KnownLayout)]
6540        struct KL07<T: KnownLayout>(u8, T);
6541
6542        fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6543            let _ = KL07(0u8, t);
6544        }
6545
6546        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6547        // |          Y |        N |              Y |        N |      KL10 |
6548        #[allow(dead_code)]
6549        #[derive(KnownLayout)]
6550        #[repr(C)]
6551        struct KL10(NotKnownLayout<AU32>, [u8]);
6552
6553        let expected = DstLayout::new_zst(None)
6554            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6555            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6556            .pad_to_align();
6557
6558        assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6559        assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4, false));
6560
6561        // ...with `align(N)`:
6562        #[allow(dead_code)]
6563        #[derive(KnownLayout)]
6564        #[repr(C, align(64))]
6565        struct KL10Align(NotKnownLayout<AU32>, [u8]);
6566
6567        let repr_align = NonZeroUsize::new(64);
6568
6569        let expected = DstLayout::new_zst(repr_align)
6570            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6571            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6572            .pad_to_align();
6573
6574        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6575        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4, false));
6576
6577        // ...with `packed`:
6578        #[allow(dead_code)]
6579        #[derive(KnownLayout)]
6580        #[repr(C, packed)]
6581        struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6582
6583        let repr_packed = NonZeroUsize::new(1);
6584
6585        let expected = DstLayout::new_zst(None)
6586            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6587            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6588            .pad_to_align();
6589
6590        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6591        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4, false));
6592
6593        // ...with `packed(N)`:
6594        #[allow(dead_code)]
6595        #[derive(KnownLayout)]
6596        #[repr(C, packed(2))]
6597        struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6598
6599        let repr_packed = NonZeroUsize::new(2);
6600
6601        let expected = DstLayout::new_zst(None)
6602            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6603            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6604            .pad_to_align();
6605
6606        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6607        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6608
6609        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6610        // |          Y |        N |              Y |        Y |      KL11 |
6611        #[allow(dead_code)]
6612        #[derive(KnownLayout)]
6613        #[repr(C)]
6614        struct KL11(NotKnownLayout<AU64>, u8);
6615
6616        let expected = DstLayout::new_zst(None)
6617            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6618            .extend(<u8 as KnownLayout>::LAYOUT, None)
6619            .pad_to_align();
6620
6621        assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6622        assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6623
6624        // ...with `align(N)`:
6625        #[allow(dead_code)]
6626        #[derive(KnownLayout)]
6627        #[repr(C, align(64))]
6628        struct KL11Align(NotKnownLayout<AU64>, u8);
6629
6630        let repr_align = NonZeroUsize::new(64);
6631
6632        let expected = DstLayout::new_zst(repr_align)
6633            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6634            .extend(<u8 as KnownLayout>::LAYOUT, None)
6635            .pad_to_align();
6636
6637        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6638        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6639
6640        // ...with `packed`:
6641        #[allow(dead_code)]
6642        #[derive(KnownLayout)]
6643        #[repr(C, packed)]
6644        struct KL11Packed(NotKnownLayout<AU64>, u8);
6645
6646        let repr_packed = NonZeroUsize::new(1);
6647
6648        let expected = DstLayout::new_zst(None)
6649            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6650            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6651            .pad_to_align();
6652
6653        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6654        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6655
6656        // ...with `packed(N)`:
6657        #[allow(dead_code)]
6658        #[derive(KnownLayout)]
6659        #[repr(C, packed(2))]
6660        struct KL11PackedN(NotKnownLayout<AU64>, u8);
6661
6662        let repr_packed = NonZeroUsize::new(2);
6663
6664        let expected = DstLayout::new_zst(None)
6665            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6666            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6667            .pad_to_align();
6668
6669        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6670        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6671
6672        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6673        // |          Y |        Y |              Y |        N |      KL14 |
6674        #[allow(dead_code)]
6675        #[derive(KnownLayout)]
6676        #[repr(C)]
6677        struct KL14<T: ?Sized + KnownLayout>(u8, T);
6678
6679        fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6680            _assert_kl(kl)
6681        }
6682
6683        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6684        // |          Y |        Y |              Y |        Y |      KL15 |
6685        #[allow(dead_code)]
6686        #[derive(KnownLayout)]
6687        #[repr(C)]
6688        struct KL15<T: KnownLayout>(u8, T);
6689
6690        fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6691            let _ = KL15(0u8, t);
6692        }
6693
6694        // Test a variety of combinations of field types:
6695        //  - ()
6696        //  - u8
6697        //  - AU16
6698        //  - [()]
6699        //  - [u8]
6700        //  - [AU16]
6701
6702        #[allow(clippy::upper_case_acronyms, dead_code)]
6703        #[derive(KnownLayout)]
6704        #[repr(C)]
6705        struct KLTU<T, U: ?Sized>(T, U);
6706
6707        assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6708
6709        assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6710
6711        assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6712
6713        assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0, false));
6714
6715        assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, false));
6716
6717        assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0, false));
6718
6719        assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6720
6721        assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6722
6723        assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6724
6725        assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1, false));
6726
6727        assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6728
6729        assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6730
6731        assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6732
6733        assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6734
6735        assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6736
6737        assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2, false));
6738
6739        assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2, false));
6740
6741        assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6742
6743        // Test a variety of field counts.
6744
6745        #[derive(KnownLayout)]
6746        #[repr(C)]
6747        struct KLF0;
6748
6749        assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6750
6751        #[derive(KnownLayout)]
6752        #[repr(C)]
6753        struct KLF1([u8]);
6754
6755        assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, true));
6756
6757        #[derive(KnownLayout)]
6758        #[repr(C)]
6759        struct KLF2(NotKnownLayout<u8>, [u8]);
6760
6761        assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6762
6763        #[derive(KnownLayout)]
6764        #[repr(C)]
6765        struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6766
6767        assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6768
6769        #[derive(KnownLayout)]
6770        #[repr(C)]
6771        struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6772
6773        assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8, false));
6774    }
6775
6776    #[test]
6777    fn test_object_safety() {
6778        fn _takes_immutable(_: &dyn Immutable) {}
6779        fn _takes_unaligned(_: &dyn Unaligned) {}
6780    }
6781
6782    #[test]
6783    fn test_from_zeros_only() {
6784        // Test types that implement `FromZeros` but not `FromBytes`.
6785
6786        assert!(!bool::new_zeroed());
6787        assert_eq!(char::new_zeroed(), '\0');
6788
6789        #[cfg(feature = "alloc")]
6790        {
6791            assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6792            assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6793
6794            assert_eq!(
6795                <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6796                [false, false, false]
6797            );
6798            assert_eq!(
6799                <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6800                ['\0', '\0', '\0']
6801            );
6802
6803            assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6804            assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6805        }
6806
6807        let mut string = "hello".to_string();
6808        let s: &mut str = string.as_mut();
6809        assert_eq!(s, "hello");
6810        s.zero();
6811        assert_eq!(s, "\0\0\0\0\0");
6812    }
6813
6814    #[test]
6815    fn test_zst_count_preserved() {
6816        // Test that, when an explicit count is provided to for a type with a
6817        // ZST trailing slice element, that count is preserved. This is
6818        // important since, for such types, all element counts result in objects
6819        // of the same size, and so the correct behavior is ambiguous. However,
6820        // preserving the count as requested by the user is the behavior that we
6821        // document publicly.
6822
6823        // FromZeros methods
6824        #[cfg(feature = "alloc")]
6825        assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6826        #[cfg(feature = "alloc")]
6827        assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6828
6829        // FromBytes methods
6830        assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6831        assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6832        assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6833        assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6834        assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6835        assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6836    }
6837
6838    #[test]
6839    fn test_read_write() {
6840        const VAL: u64 = 0x12345678;
6841        #[cfg(target_endian = "big")]
6842        const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6843        #[cfg(target_endian = "little")]
6844        const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6845        const ZEROS: [u8; 8] = [0u8; 8];
6846
6847        // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6848
6849        assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6850        // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6851        // zeros.
6852        let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6853        assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6854        assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6855        // The first 8 bytes are all zeros and the second 8 bytes are from
6856        // `VAL_BYTES`
6857        let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6858        assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6859        assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6860
6861        // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6862
6863        let mut bytes = [0u8; 8];
6864        assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6865        assert_eq!(bytes, VAL_BYTES);
6866        let mut bytes = [0u8; 16];
6867        assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6868        let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6869        assert_eq!(bytes, want);
6870        let mut bytes = [0u8; 16];
6871        assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6872        let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6873        assert_eq!(bytes, want);
6874    }
6875
6876    #[test]
6877    #[cfg(feature = "std")]
6878    fn test_read_io_with_padding_soundness() {
6879        // This test is designed to exhibit potential UB in
6880        // `FromBytes::read_from_io`. (see #2319, #2320).
6881
6882        // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6883        // will have inter-field padding between `x` and `y`.
6884        #[derive(FromBytes)]
6885        #[repr(C)]
6886        struct WithPadding {
6887            x: u8,
6888            y: u16,
6889        }
6890        struct ReadsInRead;
6891        impl std::io::Read for ReadsInRead {
6892            fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
6893                // This body branches on every byte of `buf`, ensuring that it
6894                // exhibits UB if any byte of `buf` is uninitialized.
6895                if buf.iter().all(|&x| x == 0) {
6896                    Ok(buf.len())
6897                } else {
6898                    buf.iter_mut().for_each(|x| *x = 0);
6899                    Ok(buf.len())
6900                }
6901            }
6902        }
6903        assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
6904    }
6905
6906    #[test]
6907    #[cfg(feature = "std")]
6908    fn test_read_write_io() {
6909        let mut long_buffer = [0, 0, 0, 0];
6910        assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
6911        assert_eq!(long_buffer, [255, 255, 0, 0]);
6912        assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
6913
6914        let mut short_buffer = [0, 0];
6915        assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
6916        assert_eq!(short_buffer, [255, 255]);
6917        assert!(u32::read_from_io(&short_buffer[..]).is_err());
6918    }
6919
6920    #[test]
6921    fn test_try_from_bytes_try_read_from() {
6922        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
6923        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
6924
6925        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
6926        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
6927
6928        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
6929        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
6930
6931        // If we don't pass enough bytes, it fails.
6932        assert!(matches!(
6933            <u8 as TryFromBytes>::try_read_from_bytes(&[]),
6934            Err(TryReadError::Size(_))
6935        ));
6936        assert!(matches!(
6937            <u8 as TryFromBytes>::try_read_from_prefix(&[]),
6938            Err(TryReadError::Size(_))
6939        ));
6940        assert!(matches!(
6941            <u8 as TryFromBytes>::try_read_from_suffix(&[]),
6942            Err(TryReadError::Size(_))
6943        ));
6944
6945        // If we pass too many bytes, it fails.
6946        assert!(matches!(
6947            <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6948            Err(TryReadError::Size(_))
6949        ));
6950
6951        // If we pass an invalid value, it fails.
6952        assert!(matches!(
6953            <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6954            Err(TryReadError::Validity(_))
6955        ));
6956        assert!(matches!(
6957            <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6958            Err(TryReadError::Validity(_))
6959        ));
6960        assert!(matches!(
6961            <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6962            Err(TryReadError::Validity(_))
6963        ));
6964
6965        // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6966        // alignment is 8, and since we read from two adjacent addresses one
6967        // byte apart, it is guaranteed that at least one of them (though
6968        // possibly both) will be misaligned.
6969        let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6970        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6971        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6972
6973        assert_eq!(
6974            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6975            Ok((AU64(0), &[][..]))
6976        );
6977        assert_eq!(
6978            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6979            Ok((AU64(0), &[][..]))
6980        );
6981
6982        assert_eq!(
6983            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6984            Ok((&[][..], AU64(0)))
6985        );
6986        assert_eq!(
6987            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6988            Ok((&[][..], AU64(0)))
6989        );
6990    }
6991
6992    #[test]
6993    fn test_ref_from_mut_from_bytes() {
6994        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6995        // success cases. Exhaustive coverage for these methods is covered by
6996        // the `Ref` tests above, which these helper methods defer to.
6997
6998        let mut buf =
6999            Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
7000
7001        assert_eq!(
7002            AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
7003            [8, 9, 10, 11, 12, 13, 14, 15]
7004        );
7005        let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
7006        suffix.0 = 0x0101010101010101;
7007        // The `[u8:9]` is a non-half size of the full buffer, which would catch
7008        // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
7009        assert_eq!(
7010            <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
7011            (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
7012        );
7013        let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
7014        assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
7015        suffix.0 = 0x0202020202020202;
7016        let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
7017        assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
7018        suffix[0] = 42;
7019        assert_eq!(
7020            <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
7021            (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
7022        );
7023        <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
7024        assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
7025    }
7026
7027    #[test]
7028    fn test_ref_from_mut_from_bytes_error() {
7029        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
7030        // error cases.
7031
7032        // Fail because the buffer is too large.
7033        let mut buf = Align::<[u8; 16], AU64>::default();
7034        // `buf.t` should be aligned to 8, so only the length check should fail.
7035        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
7036        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
7037        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
7038        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
7039
7040        // Fail because the buffer is too small.
7041        let mut buf = Align::<[u8; 4], AU64>::default();
7042        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
7043        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
7044        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
7045        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
7046        assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
7047        assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
7048        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
7049        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
7050        assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
7051        assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
7052        assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
7053        assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
7054
7055        // Fail because the alignment is insufficient.
7056        let mut buf = Align::<[u8; 13], AU64>::default();
7057        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
7058        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
7059        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
7060        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
7061        assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
7062        assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
7063        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
7064        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
7065    }
7066
7067    #[test]
7068    fn test_to_methods() {
7069        /// Run a series of tests by calling `IntoBytes` methods on `t`.
7070        ///
7071        /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
7072        /// before `t` has been modified. `post_mutation` is the expected
7073        /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
7074        /// has had its bits flipped (by applying `^= 0xFF`).
7075        ///
7076        /// `N` is the size of `t` in bytes.
7077        fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
7078            t: &mut T,
7079            bytes: &[u8],
7080            post_mutation: &T,
7081        ) {
7082            // Test that we can access the underlying bytes, and that we get the
7083            // right bytes and the right number of bytes.
7084            assert_eq!(t.as_bytes(), bytes);
7085
7086            // Test that changes to the underlying byte slices are reflected in
7087            // the original object.
7088            t.as_mut_bytes()[0] ^= 0xFF;
7089            assert_eq!(t, post_mutation);
7090            t.as_mut_bytes()[0] ^= 0xFF;
7091
7092            // `write_to` rejects slices that are too small or too large.
7093            assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
7094            assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
7095
7096            // `write_to` works as expected.
7097            let mut bytes = [0; N];
7098            assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
7099            assert_eq!(bytes, t.as_bytes());
7100
7101            // `write_to_prefix` rejects slices that are too small.
7102            assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
7103
7104            // `write_to_prefix` works with exact-sized slices.
7105            let mut bytes = [0; N];
7106            assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
7107            assert_eq!(bytes, t.as_bytes());
7108
7109            // `write_to_prefix` works with too-large slices, and any bytes past
7110            // the prefix aren't modified.
7111            let mut too_many_bytes = vec![0; N + 1];
7112            too_many_bytes[N] = 123;
7113            assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
7114            assert_eq!(&too_many_bytes[..N], t.as_bytes());
7115            assert_eq!(too_many_bytes[N], 123);
7116
7117            // `write_to_suffix` rejects slices that are too small.
7118            assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
7119
7120            // `write_to_suffix` works with exact-sized slices.
7121            let mut bytes = [0; N];
7122            assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
7123            assert_eq!(bytes, t.as_bytes());
7124
7125            // `write_to_suffix` works with too-large slices, and any bytes
7126            // before the suffix aren't modified.
7127            let mut too_many_bytes = vec![0; N + 1];
7128            too_many_bytes[0] = 123;
7129            assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
7130            assert_eq!(&too_many_bytes[1..], t.as_bytes());
7131            assert_eq!(too_many_bytes[0], 123);
7132        }
7133
7134        #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
7135        #[repr(C)]
7136        struct Foo {
7137            a: u32,
7138            b: Wrapping<u32>,
7139            c: Option<NonZeroU32>,
7140        }
7141
7142        let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
7143            vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
7144        } else {
7145            vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
7146        };
7147        let post_mutation_expected_a =
7148            if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
7149        test::<_, 12>(
7150            &mut Foo { a: 1, b: Wrapping(2), c: None },
7151            expected_bytes.as_bytes(),
7152            &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
7153        );
7154        test::<_, 3>(
7155            Unsized::from_mut_slice(&mut [1, 2, 3]),
7156            &[1, 2, 3],
7157            Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
7158        );
7159    }
7160
7161    #[test]
7162    fn test_array() {
7163        #[derive(FromBytes, IntoBytes, Immutable)]
7164        #[repr(C)]
7165        struct Foo {
7166            a: [u16; 33],
7167        }
7168
7169        let foo = Foo { a: [0xFFFF; 33] };
7170        let expected = [0xFFu8; 66];
7171        assert_eq!(foo.as_bytes(), &expected[..]);
7172    }
7173
7174    #[test]
7175    fn test_new_zeroed() {
7176        assert!(!bool::new_zeroed());
7177        assert_eq!(u64::new_zeroed(), 0);
7178        // This test exists in order to exercise unsafe code, especially when
7179        // running under Miri.
7180        #[allow(clippy::unit_cmp)]
7181        {
7182            assert_eq!(<()>::new_zeroed(), ());
7183        }
7184    }
7185
7186    #[test]
7187    fn test_transparent_packed_generic_struct() {
7188        #[derive(IntoBytes, FromBytes, Unaligned)]
7189        #[repr(transparent)]
7190        #[allow(dead_code)] // We never construct this type
7191        struct Foo<T> {
7192            _t: T,
7193            _phantom: PhantomData<()>,
7194        }
7195
7196        assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
7197        assert_impl_all!(Foo<u8>: Unaligned);
7198
7199        #[derive(IntoBytes, FromBytes, Unaligned)]
7200        #[repr(C, packed)]
7201        #[allow(dead_code)] // We never construct this type
7202        struct Bar<T, U> {
7203            _t: T,
7204            _u: U,
7205        }
7206
7207        assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
7208    }
7209
7210    #[cfg(feature = "alloc")]
7211    mod alloc {
7212        use super::*;
7213
7214        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7215        #[test]
7216        fn test_extend_vec_zeroed() {
7217            // Test extending when there is an existing allocation.
7218            let mut v = vec![100u16, 200, 300];
7219            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
7220            assert_eq!(v.len(), 6);
7221            assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
7222            drop(v);
7223
7224            // Test extending when there is no existing allocation.
7225            let mut v: Vec<u64> = Vec::new();
7226            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
7227            assert_eq!(v.len(), 3);
7228            assert_eq!(&*v, &[0, 0, 0]);
7229            drop(v);
7230        }
7231
7232        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7233        #[test]
7234        fn test_extend_vec_zeroed_zst() {
7235            // Test extending when there is an existing (fake) allocation.
7236            let mut v = vec![(), (), ()];
7237            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
7238            assert_eq!(v.len(), 6);
7239            assert_eq!(&*v, &[(), (), (), (), (), ()]);
7240            drop(v);
7241
7242            // Test extending when there is no existing (fake) allocation.
7243            let mut v: Vec<()> = Vec::new();
7244            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
7245            assert_eq!(&*v, &[(), (), ()]);
7246            drop(v);
7247        }
7248
7249        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7250        #[test]
7251        fn test_insert_vec_zeroed() {
7252            // Insert at start (no existing allocation).
7253            let mut v: Vec<u64> = Vec::new();
7254            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7255            assert_eq!(v.len(), 2);
7256            assert_eq!(&*v, &[0, 0]);
7257            drop(v);
7258
7259            // Insert at start.
7260            let mut v = vec![100u64, 200, 300];
7261            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7262            assert_eq!(v.len(), 5);
7263            assert_eq!(&*v, &[0, 0, 100, 200, 300]);
7264            drop(v);
7265
7266            // Insert at middle.
7267            let mut v = vec![100u64, 200, 300];
7268            u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
7269            assert_eq!(v.len(), 4);
7270            assert_eq!(&*v, &[100, 0, 200, 300]);
7271            drop(v);
7272
7273            // Insert at end.
7274            let mut v = vec![100u64, 200, 300];
7275            u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
7276            assert_eq!(v.len(), 4);
7277            assert_eq!(&*v, &[100, 200, 300, 0]);
7278            drop(v);
7279        }
7280
7281        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
7282        #[test]
7283        fn test_insert_vec_zeroed_zst() {
7284            // Insert at start (no existing fake allocation).
7285            let mut v: Vec<()> = Vec::new();
7286            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7287            assert_eq!(v.len(), 2);
7288            assert_eq!(&*v, &[(), ()]);
7289            drop(v);
7290
7291            // Insert at start.
7292            let mut v = vec![(), (), ()];
7293            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
7294            assert_eq!(v.len(), 5);
7295            assert_eq!(&*v, &[(), (), (), (), ()]);
7296            drop(v);
7297
7298            // Insert at middle.
7299            let mut v = vec![(), (), ()];
7300            <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
7301            assert_eq!(v.len(), 4);
7302            assert_eq!(&*v, &[(), (), (), ()]);
7303            drop(v);
7304
7305            // Insert at end.
7306            let mut v = vec![(), (), ()];
7307            <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
7308            assert_eq!(v.len(), 4);
7309            assert_eq!(&*v, &[(), (), (), ()]);
7310            drop(v);
7311        }
7312
7313        #[test]
7314        fn test_new_box_zeroed() {
7315            assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
7316        }
7317
7318        #[test]
7319        fn test_new_box_zeroed_array() {
7320            drop(<[u32; 0x1000]>::new_box_zeroed());
7321        }
7322
7323        #[test]
7324        fn test_new_box_zeroed_zst() {
7325            // This test exists in order to exercise unsafe code, especially
7326            // when running under Miri.
7327            #[allow(clippy::unit_cmp)]
7328            {
7329                assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
7330            }
7331        }
7332
7333        #[test]
7334        fn test_new_box_zeroed_with_elems() {
7335            let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
7336            assert_eq!(s.len(), 3);
7337            assert_eq!(&*s, &[0, 0, 0]);
7338            s[1] = 3;
7339            assert_eq!(&*s, &[0, 3, 0]);
7340        }
7341
7342        #[test]
7343        fn test_new_box_zeroed_with_elems_empty() {
7344            let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
7345            assert_eq!(s.len(), 0);
7346        }
7347
7348        #[test]
7349        fn test_new_box_zeroed_with_elems_zst() {
7350            let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
7351            assert_eq!(s.len(), 3);
7352            assert!(s.get(10).is_none());
7353            // This test exists in order to exercise unsafe code, especially
7354            // when running under Miri.
7355            #[allow(clippy::unit_cmp)]
7356            {
7357                assert_eq!(s[1], ());
7358            }
7359            s[2] = ();
7360        }
7361
7362        #[test]
7363        fn test_new_box_zeroed_with_elems_zst_empty() {
7364            let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
7365            assert_eq!(s.len(), 0);
7366        }
7367
7368        #[test]
7369        fn new_box_zeroed_with_elems_errors() {
7370            assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
7371
7372            let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
7373            assert_eq!(
7374                <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
7375                Err(AllocError)
7376            );
7377        }
7378    }
7379
7380    #[test]
7381    #[allow(deprecated)]
7382    fn test_deprecated_from_bytes() {
7383        let val = 0u32;
7384        let bytes = val.as_bytes();
7385
7386        assert!(u32::ref_from(bytes).is_some());
7387        // mut_from needs mut bytes
7388        let mut val = 0u32;
7389        let mut_bytes = val.as_mut_bytes();
7390        assert!(u32::mut_from(mut_bytes).is_some());
7391
7392        assert!(u32::read_from(bytes).is_some());
7393
7394        let (slc, rest) = <u32>::slice_from_prefix(bytes, 0).unwrap();
7395        assert!(slc.is_empty());
7396        assert_eq!(rest.len(), 4);
7397
7398        let (rest, slc) = <u32>::slice_from_suffix(bytes, 0).unwrap();
7399        assert!(slc.is_empty());
7400        assert_eq!(rest.len(), 4);
7401
7402        let (slc, rest) = <u32>::mut_slice_from_prefix(mut_bytes, 0).unwrap();
7403        assert!(slc.is_empty());
7404        assert_eq!(rest.len(), 4);
7405
7406        let (rest, slc) = <u32>::mut_slice_from_suffix(mut_bytes, 0).unwrap();
7407        assert!(slc.is_empty());
7408        assert_eq!(rest.len(), 4);
7409    }
7410
7411    #[test]
7412    fn test_try_ref_from_prefix_suffix() {
7413        use crate::util::testutil::Align;
7414        let bytes = &Align::<[u8; 4], u32>::new([0u8; 4]).t[..];
7415        let (r, rest): (&u32, &[u8]) = u32::try_ref_from_prefix(bytes).unwrap();
7416        assert_eq!(*r, 0);
7417        assert_eq!(rest.len(), 0);
7418
7419        let (rest, r): (&[u8], &u32) = u32::try_ref_from_suffix(bytes).unwrap();
7420        assert_eq!(*r, 0);
7421        assert_eq!(rest.len(), 0);
7422    }
7423
7424    #[test]
7425    fn test_raw_dangling() {
7426        use crate::util::AsAddress;
7427        let ptr: NonNull<u32> = u32::raw_dangling();
7428        assert_eq!(AsAddress::addr(ptr), 1);
7429
7430        let ptr: NonNull<[u32]> = <[u32]>::raw_dangling();
7431        assert_eq!(AsAddress::addr(ptr), 1);
7432    }
7433
7434    #[test]
7435    fn test_try_ref_from_prefix_with_elems() {
7436        use crate::util::testutil::Align;
7437        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
7438        let (r, rest): (&[u32], &[u8]) = <[u32]>::try_ref_from_prefix_with_elems(bytes, 2).unwrap();
7439        assert_eq!(r.len(), 2);
7440        assert_eq!(rest.len(), 0);
7441    }
7442
7443    #[test]
7444    fn test_try_ref_from_suffix_with_elems() {
7445        use crate::util::testutil::Align;
7446        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
7447        let (rest, r): (&[u8], &[u32]) = <[u32]>::try_ref_from_suffix_with_elems(bytes, 2).unwrap();
7448        assert_eq!(r.len(), 2);
7449        assert_eq!(rest.len(), 0);
7450    }
7451}