zerocopy/util/
mod.rs

1// Copyright 2023 The Fuchsia Authors
2//
3// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
4// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
5// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
6// This file may not be copied, modified, or distributed except according to
7// those terms.
8
9#[macro_use]
10pub(crate) mod macros;
11
12#[doc(hidden)]
13pub mod macro_util;
14
15use core::{
16    marker::PhantomData,
17    mem::{self, ManuallyDrop},
18    num::NonZeroUsize,
19    ptr::NonNull,
20};
21
22use super::*;
23use crate::pointer::{
24    invariant::{Exclusive, Shared, Valid},
25    SizeEq, TransmuteFromPtr,
26};
27
28/// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the
29/// wrapped `T` is.
30pub(crate) struct SendSyncPhantomData<T: ?Sized>(PhantomData<T>);
31
32// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
33// to be called from multiple threads.
34unsafe impl<T: ?Sized> Send for SendSyncPhantomData<T> {}
35// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
36// to be called from multiple threads.
37unsafe impl<T: ?Sized> Sync for SendSyncPhantomData<T> {}
38
39impl<T: ?Sized> Default for SendSyncPhantomData<T> {
40    fn default() -> SendSyncPhantomData<T> {
41        SendSyncPhantomData(PhantomData)
42    }
43}
44
45impl<T: ?Sized> PartialEq for SendSyncPhantomData<T> {
46    fn eq(&self, _other: &Self) -> bool {
47        true
48    }
49}
50
51impl<T: ?Sized> Eq for SendSyncPhantomData<T> {}
52
53impl<T: ?Sized> Clone for SendSyncPhantomData<T> {
54    fn clone(&self) -> Self {
55        SendSyncPhantomData(PhantomData)
56    }
57}
58
59#[cfg(miri)]
60extern "Rust" {
61    /// Miri-provided intrinsic that marks the pointer `ptr` as aligned to
62    /// `align`.
63    ///
64    /// This intrinsic is used to inform Miri's symbolic alignment checker that
65    /// a pointer is aligned, even if Miri cannot statically deduce that fact.
66    /// This is often required when performing raw pointer arithmetic or casts
67    /// where the alignment is guaranteed by runtime checks or invariants that
68    /// Miri is not aware of.
69    pub(crate) fn miri_promise_symbolic_alignment(ptr: *const (), align: usize);
70}
71
72pub(crate) trait AsAddress {
73    fn addr(self) -> usize;
74}
75
76impl<T: ?Sized> AsAddress for &T {
77    #[inline(always)]
78    fn addr(self) -> usize {
79        let ptr: *const T = self;
80        AsAddress::addr(ptr)
81    }
82}
83
84impl<T: ?Sized> AsAddress for &mut T {
85    #[inline(always)]
86    fn addr(self) -> usize {
87        let ptr: *const T = self;
88        AsAddress::addr(ptr)
89    }
90}
91
92impl<T: ?Sized> AsAddress for NonNull<T> {
93    #[inline(always)]
94    fn addr(self) -> usize {
95        AsAddress::addr(self.as_ptr())
96    }
97}
98
99impl<T: ?Sized> AsAddress for *const T {
100    #[inline(always)]
101    fn addr(self) -> usize {
102        // FIXME(#181), FIXME(https://github.com/rust-lang/rust/issues/95228):
103        // Use `.addr()` instead of `as usize` once it's stable, and get rid of
104        // this `allow`. Currently, `as usize` is the only way to accomplish
105        // this.
106        #[allow(clippy::as_conversions)]
107        #[cfg_attr(
108            __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
109            allow(lossy_provenance_casts)
110        )]
111        return self.cast::<()>() as usize;
112    }
113}
114
115impl<T: ?Sized> AsAddress for *mut T {
116    #[inline(always)]
117    fn addr(self) -> usize {
118        let ptr: *const T = self;
119        AsAddress::addr(ptr)
120    }
121}
122
123/// Validates that `t` is aligned to `align_of::<U>()`.
124#[inline(always)]
125pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> {
126    // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in
127    // turn guarantees that this mod operation will not panic.
128    #[allow(clippy::arithmetic_side_effects)]
129    let remainder = t.addr() % mem::align_of::<U>();
130    if remainder == 0 {
131        Ok(())
132    } else {
133        // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`.
134        // That's only possible if `align_of::<U>() > 1`.
135        Err(unsafe { AlignmentError::new_unchecked(()) })
136    }
137}
138
139/// Returns the bytes needed to pad `len` to the next multiple of `align`.
140///
141/// This function assumes that align is a power of two; there are no guarantees
142/// on the answer it gives if this is not the case.
143#[cfg_attr(
144    kani,
145    kani::requires(len <= isize::MAX as usize),
146    kani::requires(align.is_power_of_two()),
147    kani::ensures(|&p| (len + p) % align.get() == 0),
148    // Ensures that we add the minimum required padding.
149    kani::ensures(|&p| p < align.get()),
150)]
151pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize {
152    #[cfg(kani)]
153    #[kani::proof_for_contract(padding_needed_for)]
154    fn proof() {
155        padding_needed_for(kani::any(), kani::any());
156    }
157
158    // Abstractly, we want to compute:
159    //   align - (len % align).
160    // Handling the case where len%align is 0.
161    // Because align is a power of two, len % align = len & (align-1).
162    // Guaranteed not to underflow as align is nonzero.
163    #[allow(clippy::arithmetic_side_effects)]
164    let mask = align.get() - 1;
165
166    // To efficiently subtract this value from align, we can use the bitwise
167    // complement.
168    // Note that ((!len) & (align-1)) gives us a number that with (len &
169    // (align-1)) sums to align-1. So subtracting 1 from x before taking the
170    // complement subtracts `len` from `align`. Some quick inspection of
171    // cases shows that this also handles the case where `len % align = 0`
172    // correctly too: len-1 % align then equals align-1, so the complement mod
173    // align will be 0, as desired.
174    //
175    // The following reasoning can be verified quickly by an SMT solver
176    // supporting the theory of bitvectors:
177    // ```smtlib
178    // ; Naive implementation of padding
179    // (define-fun padding1 (
180    //     (len (_ BitVec 32))
181    //     (align (_ BitVec 32))) (_ BitVec 32)
182    //    (ite
183    //      (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32))))
184    //      (_ bv0 32)
185    //      (bvsub align (bvand len (bvsub align (_ bv1 32))))))
186    //
187    // ; The implementation below
188    // (define-fun padding2 (
189    //     (len (_ BitVec 32))
190    //     (align (_ BitVec 32))) (_ BitVec 32)
191    // (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32))))
192    //
193    // (define-fun is-power-of-two ((x (_ BitVec 32))) Bool
194    //   (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32)))))
195    //
196    // (declare-const len (_ BitVec 32))
197    // (declare-const align (_ BitVec 32))
198    // ; Search for a case where align is a power of two and padding2 disagrees
199    // ; with padding1
200    // (assert (and (is-power-of-two align)
201    //              (not (= (padding1 len align) (padding2 len align)))))
202    // (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20
203    // (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20
204    // (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30
205    // (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30
206    // (simplify (padding1 (_ bv8 32) (_ bv8 32)))    ; 0
207    // (simplify (padding2 (_ bv8 32) (_ bv8 32)))    ; 0
208    // (check-sat) ; unsat, also works for 64-bit bitvectors
209    // ```
210    !(len.wrapping_sub(1)) & mask
211}
212
213/// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align
214/// == 0`.
215///
216/// # Panics
217///
218/// May panic if `align` is not a power of two. Even if it doesn't panic in this
219/// case, it will produce nonsense results.
220#[inline(always)]
221#[cfg_attr(
222    kani,
223    kani::requires(align.is_power_of_two()),
224    kani::ensures(|&m| m <= n && m % align.get() == 0),
225    // Guarantees that `m` is the *largest* value such that `m % align == 0`.
226    kani::ensures(|&m| {
227        // If this `checked_add` fails, then the next multiple would wrap
228        // around, which trivially satisfies the "largest value" requirement.
229        m.checked_add(align.get()).map(|next_mul| next_mul > n).unwrap_or(true)
230    })
231)]
232pub(crate) const fn round_down_to_next_multiple_of_alignment(
233    n: usize,
234    align: NonZeroUsize,
235) -> usize {
236    #[cfg(kani)]
237    #[kani::proof_for_contract(round_down_to_next_multiple_of_alignment)]
238    fn proof() {
239        round_down_to_next_multiple_of_alignment(kani::any(), kani::any());
240    }
241
242    let align = align.get();
243    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
244    debug_assert!(align.is_power_of_two());
245
246    // Subtraction can't underflow because `align.get() >= 1`.
247    #[allow(clippy::arithmetic_side_effects)]
248    let mask = !(align - 1);
249    n & mask
250}
251
252pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
253    if a.get() < b.get() {
254        b
255    } else {
256        a
257    }
258}
259
260pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
261    if a.get() > b.get() {
262        b
263    } else {
264        a
265    }
266}
267
268/// Copies `src` into the prefix of `dst`.
269///
270/// # Safety
271///
272/// The caller guarantees that `src.len() <= dst.len()`.
273#[inline(always)]
274pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) {
275    debug_assert!(src.len() <= dst.len());
276    // SAFETY: This invocation satisfies the safety contract of
277    // copy_nonoverlapping [1]:
278    // - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes
279    // - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the
280    //   caller has promised that `src.len() <= dst.len()`
281    // - `src` and `dst` are, trivially, properly aligned
282    // - the region of memory beginning at `src` with a size of `src.len()`
283    //   bytes does not overlap with the region of memory beginning at `dst`
284    //   with the same size, because `dst` is derived from an exclusive
285    //   reference.
286    unsafe {
287        core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len());
288    };
289}
290
291/// Unsafely transmutes the given `src` into a type `Dst`.
292///
293/// # Safety
294///
295/// The value `src` must be a valid instance of `Dst`.
296#[inline(always)]
297pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst {
298    static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>());
299
300    #[repr(C)]
301    union Transmute<Src, Dst> {
302        src: ManuallyDrop<Src>,
303        dst: ManuallyDrop<Dst>,
304    }
305
306    // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst`
307    // fields both start at the same offset and the types of those fields are
308    // transparent wrappers around `Src` and `Dst` [1]. Consequently,
309    // initializing `Transmute` with with `src` and then reading out `dst` is
310    // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src`
311    // to `Dst` is valid because — by contract on the caller — `src` is a valid
312    // instance of `Dst`.
313    //
314    // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html:
315    //
316    //     `ManuallyDrop<T>` is guaranteed to have the same layout and bit
317    //     validity as `T`, and is subject to the same layout optimizations as
318    //     `T`.
319    //
320    // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields:
321    //
322    //     Effectively, writing to and then reading from a union with the C
323    //     representation is analogous to a transmute from the type used for
324    //     writing to the type used for reading.
325    unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) }
326}
327
328/// # Safety
329///
330/// `Src` must have a greater or equal alignment to `Dst`.
331pub(crate) unsafe fn transmute_ref<Src, Dst, R>(src: &Src) -> &Dst
332where
333    Src: ?Sized,
334    Dst: SizeEq<Src>
335        + TransmuteFromPtr<Src, Shared, Valid, Valid, <Dst as SizeEq<Src>>::CastFrom, R>
336        + ?Sized,
337{
338    let dst = Ptr::from_ref(src).transmute();
339    // SAFETY: The caller promises that `Src`'s alignment is at least as large
340    // as `Dst`'s alignment.
341    let dst = unsafe { dst.assume_alignment() };
342    dst.as_ref()
343}
344
345/// # Safety
346///
347/// `Src` must have a greater or equal alignment to `Dst`.
348pub(crate) unsafe fn transmute_mut<Src, Dst, R>(src: &mut Src) -> &mut Dst
349where
350    Src: ?Sized,
351    Dst: SizeEq<Src>
352        + TransmuteFromPtr<Src, Exclusive, Valid, Valid, <Dst as SizeEq<Src>>::CastFrom, R>
353        + ?Sized,
354{
355    let dst = Ptr::from_mut(src).transmute();
356    // SAFETY: The caller promises that `Src`'s alignment is at least as large
357    // as `Dst`'s alignment.
358    let dst = unsafe { dst.assume_alignment() };
359    dst.as_mut()
360}
361
362/// Uses `allocate` to create a `Box<T>`.
363///
364/// # Errors
365///
366/// Returns an error on allocation failure. Allocation failure is guaranteed
367/// never to cause a panic or an abort.
368///
369/// # Safety
370///
371/// `allocate` must be either `alloc::alloc::alloc` or
372/// `alloc::alloc::alloc_zeroed`. The referent of the box returned by `new_box`
373/// has the same bit-validity as the referent of the pointer returned by the
374/// given `allocate` and sufficient size to store `T` with `meta`.
375#[must_use = "has no side effects (other than allocation)"]
376#[cfg(feature = "alloc")]
377#[inline]
378pub(crate) unsafe fn new_box<T>(
379    meta: T::PointerMetadata,
380    allocate: unsafe fn(core::alloc::Layout) -> *mut u8,
381) -> Result<alloc::boxed::Box<T>, AllocError>
382where
383    T: ?Sized + crate::KnownLayout,
384{
385    let size = match T::size_for_metadata(meta) {
386        Some(size) => size,
387        None => return Err(AllocError),
388    };
389
390    let align = T::LAYOUT.align.get();
391    // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a bug in
392    // which sufficiently-large allocations (those which, when rounded up to the
393    // alignment, overflow `isize`) are not rejected, which can cause undefined
394    // behavior. See #64 for details.
395    //
396    // FIXME(#67): Once our MSRV is > 1.64.0, remove this assertion.
397    #[allow(clippy::as_conversions)]
398    let max_alloc = (isize::MAX as usize).saturating_sub(align);
399    if size > max_alloc {
400        return Err(AllocError);
401    }
402
403    // FIXME(https://github.com/rust-lang/rust/issues/55724): Use
404    // `Layout::repeat` once it's stabilized.
405    let layout = Layout::from_size_align(size, align).or(Err(AllocError))?;
406
407    let ptr = if layout.size() != 0 {
408        // SAFETY: By contract on the caller, `allocate` is either
409        // `alloc::alloc::alloc` or `alloc::alloc::alloc_zeroed`. The above
410        // check ensures their shared safety precondition: that the supplied
411        // layout is not zero-sized type [1].
412        //
413        // [1] Per https://doc.rust-lang.org/1.81.0/std/alloc/trait.GlobalAlloc.html#tymethod.alloc:
414        //
415        //     This function is unsafe because undefined behavior can result if
416        //     the caller does not ensure that layout has non-zero size.
417        let ptr = unsafe { allocate(layout) };
418        match NonNull::new(ptr) {
419            Some(ptr) => ptr,
420            None => return Err(AllocError),
421        }
422    } else {
423        let align = T::LAYOUT.align.get();
424
425        // We use `transmute` instead of an `as` cast since Miri (with strict
426        // provenance enabled) notices and complains that an `as` cast creates a
427        // pointer with no provenance. Miri isn't smart enough to realize that
428        // we're only executing this branch when we're constructing a zero-sized
429        // `Box`, which doesn't require provenance.
430        //
431        // SAFETY: any initialized bit sequence is a bit-valid `*mut u8`. All
432        // bits of a `usize` are initialized.
433        //
434        // `#[allow(unknown_lints)]` is for `integer_to_ptr_transmutes`
435        #[allow(unknown_lints)]
436        #[allow(clippy::useless_transmute, integer_to_ptr_transmutes)]
437        let dangling = unsafe { mem::transmute::<usize, *mut u8>(align) };
438        // SAFETY: `dangling` is constructed from `T::LAYOUT.align`, which is a
439        // `NonZeroUsize`, which is guaranteed to be non-zero.
440        //
441        // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` is
442        // zero, but it does require a non-null dangling pointer for its
443        // allocation.
444        //
445        // FIXME(https://github.com/rust-lang/rust/issues/95228): Use
446        // `std::ptr::without_provenance` once it's stable. That may optimize
447        // better. As written, Rust may assume that this consumes "exposed"
448        // provenance, and thus Rust may have to assume that this may consume
449        // provenance from any pointer whose provenance has been exposed.
450        unsafe { NonNull::new_unchecked(dangling) }
451    };
452
453    let ptr = T::raw_from_ptr_len(ptr, meta);
454
455    // FIXME(#429): Add a "SAFETY" comment and remove this `allow`. Make sure to
456    // include a justification that `ptr.as_ptr()` is validly-aligned in the ZST
457    // case (in which we manually construct a dangling pointer) and to justify
458    // why `Box` is safe to drop (it's because `allocate` uses the system
459    // allocator).
460    #[allow(clippy::undocumented_unsafe_blocks)]
461    Ok(unsafe { alloc::boxed::Box::from_raw(ptr.as_ptr()) })
462}
463
464mod len_of {
465    use super::*;
466
467    /// A witness type for metadata of a valid instance of `&T`.
468    pub(crate) struct MetadataOf<T: ?Sized + KnownLayout> {
469        /// # Safety
470        ///
471        /// The size of an instance of `&T` with the given metadata is not
472        /// larger than `isize::MAX`.
473        meta: T::PointerMetadata,
474        _p: PhantomData<T>,
475    }
476
477    impl<T: ?Sized + KnownLayout> Copy for MetadataOf<T> {}
478    impl<T: ?Sized + KnownLayout> Clone for MetadataOf<T> {
479        fn clone(&self) -> Self {
480            *self
481        }
482    }
483
484    impl<T: ?Sized> MetadataOf<T>
485    where
486        T: KnownLayout,
487    {
488        /// Returns `None` if `meta` is greater than `t`'s metadata.
489        #[inline(always)]
490        pub(crate) fn new_in_bounds(t: &T, meta: usize) -> Option<Self>
491        where
492            T: KnownLayout<PointerMetadata = usize>,
493        {
494            if meta <= Ptr::from_ref(t).len() {
495                // SAFETY: We have checked that `meta` is not greater than `t`'s
496                // metadata, which, by invariant on `&T`, addresses no more than
497                // `isize::MAX` bytes [1][2].
498                //
499                // [1] Per https://doc.rust-lang.org/1.85.0/std/primitive.reference.html#safety:
500                //
501                //    For all types, `T: ?Sized`, and for all `t: &T` or `t:
502                //    &mut T`, when such values cross an API boundary, the
503                //    following invariants must generally be upheld:
504                //
505                //    * `t` is non-null
506                //    * `t` is aligned to `align_of_val(t)`
507                //    * if `size_of_val(t) > 0`, then `t` is dereferenceable for
508                //      `size_of_val(t)` many bytes
509                //
510                //    If `t` points at address `a`, being "dereferenceable" for
511                //    N bytes means that the memory range `[a, a + N)` is all
512                //    contained within a single allocated object.
513                //
514                // [2] Per https://doc.rust-lang.org/1.85.0/std/ptr/index.html#allocated-object:
515                //
516                //    For any allocated object with `base` address, `size`, and
517                //    a set of `addresses`, the following are guaranteed:
518                //    - For all addresses `a` in `addresses`, `a` is in the
519                //      range `base .. (base + size)` (note that this requires
520                //      `a < base + size`, not `a <= base + size`)
521                //    - `base` is not equal to [`null()`] (i.e., the address
522                //      with the numerical value 0)
523                //    - `base + size <= usize::MAX`
524                //    - `size <= isize::MAX`
525                Some(unsafe { Self::new_unchecked(meta) })
526            } else {
527                None
528            }
529        }
530
531        /// # Safety
532        ///
533        /// The size of an instance of `&T` with the given metadata is not
534        /// larger than `isize::MAX`.
535        pub(crate) unsafe fn new_unchecked(meta: T::PointerMetadata) -> Self {
536            // SAFETY: The caller has promised that the size of an instance of
537            // `&T` with the given metadata is not larger than `isize::MAX`.
538            Self { meta, _p: PhantomData }
539        }
540
541        pub(crate) fn get(&self) -> T::PointerMetadata
542        where
543            T::PointerMetadata: Copy,
544        {
545            self.meta
546        }
547
548        #[inline]
549        pub(crate) fn padding_needed_for(&self) -> usize
550        where
551            T: KnownLayout<PointerMetadata = usize>,
552        {
553            let trailing_slice_layout = crate::trailing_slice_layout::<T>();
554
555            // FIXME(#67): Remove this allow. See NumExt for more details.
556            #[allow(
557                unstable_name_collisions,
558                clippy::incompatible_msrv,
559                clippy::multiple_unsafe_ops_per_block
560            )]
561            // SAFETY: By invariant on `self`, a `&T` with metadata `self.meta`
562            // describes an object of size `<= isize::MAX`. This computes the
563            // size of such a `&T` without any trailing padding, and so neither
564            // the multiplication nor the addition will overflow.
565            let unpadded_size = unsafe {
566                let trailing_size = self.meta.unchecked_mul(trailing_slice_layout.elem_size);
567                trailing_size.unchecked_add(trailing_slice_layout.offset)
568            };
569
570            util::padding_needed_for(unpadded_size, T::LAYOUT.align)
571        }
572
573        #[inline(always)]
574        pub(crate) fn validate_cast_and_convert_metadata(
575            addr: usize,
576            bytes_len: MetadataOf<[u8]>,
577            cast_type: CastType,
578            meta: Option<T::PointerMetadata>,
579        ) -> Result<(MetadataOf<T>, MetadataOf<[u8]>), MetadataCastError> {
580            let layout = match meta {
581                None => T::LAYOUT,
582                // This can return `None` if the metadata describes an object
583                // which can't fit in an `isize`.
584                Some(meta) => {
585                    let size = match T::size_for_metadata(meta) {
586                        Some(size) => size,
587                        None => return Err(MetadataCastError::Size),
588                    };
589                    DstLayout {
590                        align: T::LAYOUT.align,
591                        size_info: crate::SizeInfo::Sized { size },
592                        statically_shallow_unpadded: false,
593                    }
594                }
595            };
596            // Lemma 0: By contract on `validate_cast_and_convert_metadata`, if
597            // the result is `Ok(..)`, then a `&T` with `elems` trailing slice
598            // elements is no larger in size than `bytes_len.get()`.
599            let (elems, split_at) =
600                layout.validate_cast_and_convert_metadata(addr, bytes_len.get(), cast_type)?;
601            let elems = T::PointerMetadata::from_elem_count(elems);
602
603            // For a slice DST type, if `meta` is `Some(elems)`, then we
604            // synthesize `layout` to describe a sized type whose size is equal
605            // to the size of the instance that we are asked to cast. For sized
606            // types, `validate_cast_and_convert_metadata` returns `elems == 0`.
607            // Thus, in this case, we need to use the `elems` passed by the
608            // caller, not the one returned by
609            // `validate_cast_and_convert_metadata`.
610            //
611            // Lemma 1: A `&T` with `elems` trailing slice elements is no larger
612            // in size than `bytes_len.get()`. Proof:
613            // - If `meta` is `None`, then `elems` satisfies this condition by
614            //   Lemma 0.
615            // - If `meta` is `Some(meta)`, then `layout` describes an object
616            //   whose size is equal to the size of an `&T` with `meta`
617            //   metadata. By Lemma 0, that size is not larger than
618            //   `bytes_len.get()`.
619            //
620            // Lemma 2: A `&T` with `elems` trailing slice elements is no larger
621            // than `isize::MAX` bytes. Proof: By Lemma 1, a `&T` with metadata
622            // `elems` is not larger in size than `bytes_len.get()`. By
623            // invariant on `MetadataOf<[u8]>`, a `&[u8]` with metadata
624            // `bytes_len` is not larger than `isize::MAX`. Because
625            // `size_of::<u8>()` is `1`, a `&[u8]` with metadata `bytes_len` has
626            // size `bytes_len.get()` bytes. Therefore, a `&T` with metadata
627            // `elems` has size not larger than `isize::MAX`.
628            let elems = meta.unwrap_or(elems);
629
630            // SAFETY: See Lemma 2.
631            let elems = unsafe { MetadataOf::new_unchecked(elems) };
632
633            // SAFETY: Let `size` be the size of a `&T` with metadata `elems`.
634            // By post-condition on `validate_cast_and_convert_metadata`, one of
635            // the following conditions holds:
636            // - `split_at == size`, in which case, by Lemma 2, `split_at <=
637            //   isize::MAX`. Since `size_of::<u8>() == 1`, a `[u8]` with
638            //   `split_at` elems has size not larger than `isize::MAX`.
639            // - `split_at == bytes_len - size`. Since `bytes_len:
640            //   MetadataOf<u8>`, and since `size` is non-negative, `split_at`
641            //   addresses no more bytes than `bytes_len` does. Since
642            //   `bytes_len: MetadataOf<u8>`, `bytes_len` describes a `[u8]`
643            //   which has no more than `isize::MAX` bytes, and thus so does
644            //   `split_at`.
645            let split_at = unsafe { MetadataOf::<[u8]>::new_unchecked(split_at) };
646            Ok((elems, split_at))
647        }
648    }
649}
650
651pub(crate) use len_of::MetadataOf;
652
653/// Since we support multiple versions of Rust, there are often features which
654/// have been stabilized in the most recent stable release which do not yet
655/// exist (stably) on our MSRV. This module provides polyfills for those
656/// features so that we can write more "modern" code, and just remove the
657/// polyfill once our MSRV supports the corresponding feature. Without this,
658/// we'd have to write worse/more verbose code and leave FIXME comments
659/// sprinkled throughout the codebase to update to the new pattern once it's
660/// stabilized.
661///
662/// Each trait is imported as `_` at the crate root; each polyfill should "just
663/// work" at usage sites.
664pub(crate) mod polyfills {
665    use core::ptr::{self, NonNull};
666
667    // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our
668    // MSRV is 1.70, when that function was stabilized.
669    //
670    // The `#[allow(unused)]` is necessary because, on sufficiently recent
671    // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
672    // method rather than to this trait, and so this trait is considered unused.
673    //
674    // FIXME(#67): Once our MSRV is 1.70, remove this.
675    #[allow(unused)]
676    pub(crate) trait NonNullExt<T> {
677        fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>;
678    }
679
680    impl<T> NonNullExt<T> for NonNull<T> {
681        // NOTE on coverage: this will never be tested in nightly since it's a
682        // polyfill for a feature which has been stabilized on our nightly
683        // toolchain.
684        #[cfg_attr(
685            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
686            coverage(off)
687        )]
688        #[inline(always)]
689        fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> {
690            let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len);
691            // SAFETY: `ptr` is converted from `data`, which is non-null.
692            unsafe { NonNull::new_unchecked(ptr) }
693        }
694    }
695
696    // A polyfill for `Self::unchecked_sub` that we can use until methods like
697    // `usize::unchecked_sub` is stabilized.
698    //
699    // The `#[allow(unused)]` is necessary because, on sufficiently recent
700    // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
701    // method rather than to this trait, and so this trait is considered unused.
702    //
703    // FIXME(#67): Once our MSRV is high enough, remove this.
704    #[allow(unused)]
705    pub(crate) trait NumExt {
706        /// Add without checking for overflow.
707        ///
708        /// # Safety
709        ///
710        /// The caller promises that the addition will not overflow.
711        unsafe fn unchecked_add(self, rhs: Self) -> Self;
712
713        /// Subtract without checking for underflow.
714        ///
715        /// # Safety
716        ///
717        /// The caller promises that the subtraction will not underflow.
718        unsafe fn unchecked_sub(self, rhs: Self) -> Self;
719
720        /// Multiply without checking for overflow.
721        ///
722        /// # Safety
723        ///
724        /// The caller promises that the multiplication will not overflow.
725        unsafe fn unchecked_mul(self, rhs: Self) -> Self;
726    }
727
728    // NOTE on coverage: these will never be tested in nightly since they're
729    // polyfills for a feature which has been stabilized on our nightly
730    // toolchain.
731    impl NumExt for usize {
732        #[cfg_attr(
733            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
734            coverage(off)
735        )]
736        #[inline(always)]
737        unsafe fn unchecked_add(self, rhs: usize) -> usize {
738            match self.checked_add(rhs) {
739                Some(x) => x,
740                None => {
741                    // SAFETY: The caller promises that the addition will not
742                    // underflow.
743                    unsafe { core::hint::unreachable_unchecked() }
744                }
745            }
746        }
747
748        #[cfg_attr(
749            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
750            coverage(off)
751        )]
752        #[inline(always)]
753        unsafe fn unchecked_sub(self, rhs: usize) -> usize {
754            match self.checked_sub(rhs) {
755                Some(x) => x,
756                None => {
757                    // SAFETY: The caller promises that the subtraction will not
758                    // underflow.
759                    unsafe { core::hint::unreachable_unchecked() }
760                }
761            }
762        }
763
764        #[cfg_attr(
765            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
766            coverage(off)
767        )]
768        #[inline(always)]
769        unsafe fn unchecked_mul(self, rhs: usize) -> usize {
770            match self.checked_mul(rhs) {
771                Some(x) => x,
772                None => {
773                    // SAFETY: The caller promises that the multiplication will
774                    // not overflow.
775                    unsafe { core::hint::unreachable_unchecked() }
776                }
777            }
778        }
779    }
780}
781
782#[cfg(test)]
783pub(crate) mod testutil {
784    use crate::*;
785
786    /// A `T` which is aligned to at least `align_of::<A>()`.
787    #[derive(Default)]
788    pub(crate) struct Align<T, A> {
789        pub(crate) t: T,
790        _a: [A; 0],
791    }
792
793    impl<T: Default, A> Align<T, A> {
794        pub(crate) fn set_default(&mut self) {
795            self.t = T::default();
796        }
797    }
798
799    impl<T, A> Align<T, A> {
800        pub(crate) const fn new(t: T) -> Align<T, A> {
801            Align { t, _a: [] }
802        }
803    }
804
805    /// A `T` which is guaranteed not to satisfy `align_of::<A>()`.
806    ///
807    /// It must be the case that `align_of::<T>() < align_of::<A>()` in order
808    /// for this type to work properly.
809    #[repr(C)]
810    pub(crate) struct ForceUnalign<T: Unaligned, A> {
811        // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is
812        // placed at the minimum offset that guarantees its alignment. If
813        // `align_of::<T>() < align_of::<A>()`, then that offset will be
814        // guaranteed *not* to satisfy `align_of::<A>()`.
815        //
816        // Note that we need `T: Unaligned` in order to guarantee that there is
817        // no padding between `_u` and `t`.
818        _u: u8,
819        pub(crate) t: T,
820        _a: [A; 0],
821    }
822
823    impl<T: Unaligned, A> ForceUnalign<T, A> {
824        pub(crate) fn new(t: T) -> ForceUnalign<T, A> {
825            ForceUnalign { _u: 0, t, _a: [] }
826        }
827    }
828    // A `u64` with alignment 8.
829    //
830    // Though `u64` has alignment 8 on some platforms, it's not guaranteed. By
831    // contrast, `AU64` is guaranteed to have alignment 8 on all platforms.
832    #[derive(
833        KnownLayout,
834        Immutable,
835        FromBytes,
836        IntoBytes,
837        Eq,
838        PartialEq,
839        Ord,
840        PartialOrd,
841        Default,
842        Debug,
843        Copy,
844        Clone,
845    )]
846    #[repr(C, align(8))]
847    pub(crate) struct AU64(pub(crate) u64);
848
849    impl AU64 {
850        // Converts this `AU64` to bytes using this platform's endianness.
851        pub(crate) fn to_bytes(self) -> [u8; 8] {
852            crate::transmute!(self)
853        }
854    }
855
856    impl Display for AU64 {
857        #[cfg_attr(
858            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
859            coverage(off)
860        )]
861        fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
862            Display::fmt(&self.0, f)
863        }
864    }
865}
866
867#[cfg(test)]
868mod tests {
869    use super::*;
870
871    #[test]
872    fn test_round_down_to_next_multiple_of_alignment() {
873        fn alt_impl(n: usize, align: NonZeroUsize) -> usize {
874            let mul = n / align.get();
875            mul * align.get()
876        }
877
878        for align in [1, 2, 4, 8, 16] {
879            for n in 0..256 {
880                let align = NonZeroUsize::new(align).unwrap();
881                let want = alt_impl(n, align);
882                let got = round_down_to_next_multiple_of_alignment(n, align);
883                assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})", n, align);
884            }
885        }
886    }
887
888    #[rustversion::since(1.57.0)]
889    #[test]
890    #[should_panic]
891    fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() {
892        round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap());
893    }
894    #[test]
895    fn test_send_sync_phantom_data() {
896        let x = SendSyncPhantomData::<u8>::default();
897        let y = x.clone();
898        assert!(x == y);
899        assert!(x == SendSyncPhantomData::<u8>::default());
900    }
901
902    #[test]
903    #[allow(clippy::as_conversions)]
904    fn test_as_address() {
905        let x = 0u8;
906        let r = &x;
907        let mut x_mut = 0u8;
908        let rm = &mut x_mut;
909        let p = r as *const u8;
910        let pm = rm as *mut u8;
911        let nn = NonNull::new(p as *mut u8).unwrap();
912
913        assert_eq!(AsAddress::addr(r), p as usize);
914        assert_eq!(AsAddress::addr(rm), pm as usize);
915        assert_eq!(AsAddress::addr(p), p as usize);
916        assert_eq!(AsAddress::addr(pm), pm as usize);
917        assert_eq!(AsAddress::addr(nn), p as usize);
918    }
919}