zerocopy/util/
mod.rs

1// Copyright 2023 The Fuchsia Authors
2//
3// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
4// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
5// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
6// This file may not be copied, modified, or distributed except according to
7// those terms.
8
9#[macro_use]
10pub(crate) mod macros;
11
12#[doc(hidden)]
13pub mod macro_util;
14
15use core::{
16    marker::PhantomData,
17    mem::{self, ManuallyDrop},
18    num::NonZeroUsize,
19    ptr::NonNull,
20};
21
22use super::*;
23
24/// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the
25/// wrapped `T` is.
26pub(crate) struct SendSyncPhantomData<T: ?Sized>(PhantomData<T>);
27
28// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
29// to be called from multiple threads.
30unsafe impl<T: ?Sized> Send for SendSyncPhantomData<T> {}
31// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
32// to be called from multiple threads.
33unsafe impl<T: ?Sized> Sync for SendSyncPhantomData<T> {}
34
35impl<T: ?Sized> Default for SendSyncPhantomData<T> {
36    fn default() -> SendSyncPhantomData<T> {
37        SendSyncPhantomData(PhantomData)
38    }
39}
40
41impl<T: ?Sized> PartialEq for SendSyncPhantomData<T> {
42    fn eq(&self, _other: &Self) -> bool {
43        true
44    }
45}
46
47impl<T: ?Sized> Eq for SendSyncPhantomData<T> {}
48
49impl<T: ?Sized> Clone for SendSyncPhantomData<T> {
50    fn clone(&self) -> Self {
51        SendSyncPhantomData(PhantomData)
52    }
53}
54
55pub(crate) trait AsAddress {
56    fn addr(self) -> usize;
57}
58
59impl<T: ?Sized> AsAddress for &T {
60    #[inline(always)]
61    fn addr(self) -> usize {
62        let ptr: *const T = self;
63        AsAddress::addr(ptr)
64    }
65}
66
67impl<T: ?Sized> AsAddress for &mut T {
68    #[inline(always)]
69    fn addr(self) -> usize {
70        let ptr: *const T = self;
71        AsAddress::addr(ptr)
72    }
73}
74
75impl<T: ?Sized> AsAddress for NonNull<T> {
76    #[inline(always)]
77    fn addr(self) -> usize {
78        AsAddress::addr(self.as_ptr())
79    }
80}
81
82impl<T: ?Sized> AsAddress for *const T {
83    #[inline(always)]
84    fn addr(self) -> usize {
85        // FIXME(#181), FIXME(https://github.com/rust-lang/rust/issues/95228):
86        // Use `.addr()` instead of `as usize` once it's stable, and get rid of
87        // this `allow`. Currently, `as usize` is the only way to accomplish
88        // this.
89        #[allow(clippy::as_conversions)]
90        #[cfg_attr(
91            __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
92            allow(lossy_provenance_casts)
93        )]
94        return self.cast::<()>() as usize;
95    }
96}
97
98impl<T: ?Sized> AsAddress for *mut T {
99    #[inline(always)]
100    fn addr(self) -> usize {
101        let ptr: *const T = self;
102        AsAddress::addr(ptr)
103    }
104}
105
106/// Validates that `t` is aligned to `align_of::<U>()`.
107#[inline(always)]
108pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> {
109    // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in
110    // turn guarantees that this mod operation will not panic.
111    #[allow(clippy::arithmetic_side_effects)]
112    let remainder = t.addr() % mem::align_of::<U>();
113    if remainder == 0 {
114        Ok(())
115    } else {
116        // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`.
117        // That's only possible if `align_of::<U>() > 1`.
118        Err(unsafe { AlignmentError::new_unchecked(()) })
119    }
120}
121
122/// Returns the bytes needed to pad `len` to the next multiple of `align`.
123///
124/// This function assumes that align is a power of two; there are no guarantees
125/// on the answer it gives if this is not the case.
126#[cfg_attr(
127    kani,
128    kani::requires(len <= isize::MAX as usize),
129    kani::requires(align.is_power_of_two()),
130    kani::ensures(|&p| (len + p) % align.get() == 0),
131    // Ensures that we add the minimum required padding.
132    kani::ensures(|&p| p < align.get()),
133)]
134pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize {
135    #[cfg(kani)]
136    #[kani::proof_for_contract(padding_needed_for)]
137    fn proof() {
138        padding_needed_for(kani::any(), kani::any());
139    }
140
141    // Abstractly, we want to compute:
142    //   align - (len % align).
143    // Handling the case where len%align is 0.
144    // Because align is a power of two, len % align = len & (align-1).
145    // Guaranteed not to underflow as align is nonzero.
146    #[allow(clippy::arithmetic_side_effects)]
147    let mask = align.get() - 1;
148
149    // To efficiently subtract this value from align, we can use the bitwise complement.
150    // Note that ((!len) & (align-1)) gives us a number that with (len &
151    // (align-1)) sums to align-1. So subtracting 1 from x before taking the
152    // complement subtracts `len` from `align`. Some quick inspection of
153    // cases shows that this also handles the case where `len % align = 0`
154    // correctly too: len-1 % align then equals align-1, so the complement mod
155    // align will be 0, as desired.
156    //
157    // The following reasoning can be verified quickly by an SMT solver
158    // supporting the theory of bitvectors:
159    // ```smtlib
160    // ; Naive implementation of padding
161    // (define-fun padding1 (
162    //     (len (_ BitVec 32))
163    //     (align (_ BitVec 32))) (_ BitVec 32)
164    //    (ite
165    //      (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32))))
166    //      (_ bv0 32)
167    //      (bvsub align (bvand len (bvsub align (_ bv1 32))))))
168    //
169    // ; The implementation below
170    // (define-fun padding2 (
171    //     (len (_ BitVec 32))
172    //     (align (_ BitVec 32))) (_ BitVec 32)
173    // (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32))))
174    //
175    // (define-fun is-power-of-two ((x (_ BitVec 32))) Bool
176    //   (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32)))))
177    //
178    // (declare-const len (_ BitVec 32))
179    // (declare-const align (_ BitVec 32))
180    // ; Search for a case where align is a power of two and padding2 disagrees with padding1
181    // (assert (and (is-power-of-two align)
182    //              (not (= (padding1 len align) (padding2 len align)))))
183    // (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20
184    // (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20
185    // (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30
186    // (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30
187    // (simplify (padding1 (_ bv8 32) (_ bv8 32)))    ; 0
188    // (simplify (padding2 (_ bv8 32) (_ bv8 32)))    ; 0
189    // (check-sat) ; unsat, also works for 64-bit bitvectors
190    // ```
191    !(len.wrapping_sub(1)) & mask
192}
193
194/// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align
195/// == 0`.
196///
197/// # Panics
198///
199/// May panic if `align` is not a power of two. Even if it doesn't panic in this
200/// case, it will produce nonsense results.
201#[inline(always)]
202#[cfg_attr(
203    kani,
204    kani::requires(align.is_power_of_two()),
205    kani::ensures(|&m| m <= n && m % align.get() == 0),
206    // Guarantees that `m` is the *largest* value such that `m % align == 0`.
207    kani::ensures(|&m| {
208        // If this `checked_add` fails, then the next multiple would wrap
209        // around, which trivially satisfies the "largest value" requirement.
210        m.checked_add(align.get()).map(|next_mul| next_mul > n).unwrap_or(true)
211    })
212)]
213pub(crate) const fn round_down_to_next_multiple_of_alignment(
214    n: usize,
215    align: NonZeroUsize,
216) -> usize {
217    #[cfg(kani)]
218    #[kani::proof_for_contract(round_down_to_next_multiple_of_alignment)]
219    fn proof() {
220        round_down_to_next_multiple_of_alignment(kani::any(), kani::any());
221    }
222
223    let align = align.get();
224    #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
225    debug_assert!(align.is_power_of_two());
226
227    // Subtraction can't underflow because `align.get() >= 1`.
228    #[allow(clippy::arithmetic_side_effects)]
229    let mask = !(align - 1);
230    n & mask
231}
232
233pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
234    if a.get() < b.get() {
235        b
236    } else {
237        a
238    }
239}
240
241pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
242    if a.get() > b.get() {
243        b
244    } else {
245        a
246    }
247}
248
249/// Copies `src` into the prefix of `dst`.
250///
251/// # Safety
252///
253/// The caller guarantees that `src.len() <= dst.len()`.
254#[inline(always)]
255pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) {
256    debug_assert!(src.len() <= dst.len());
257    // SAFETY: This invocation satisfies the safety contract of
258    // copy_nonoverlapping [1]:
259    // - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes
260    // - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the
261    //   caller has promised that `src.len() <= dst.len()`
262    // - `src` and `dst` are, trivially, properly aligned
263    // - the region of memory beginning at `src` with a size of `src.len()`
264    //   bytes does not overlap with the region of memory beginning at `dst`
265    //   with the same size, because `dst` is derived from an exclusive
266    //   reference.
267    unsafe {
268        core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len());
269    };
270}
271
272/// Unsafely transmutes the given `src` into a type `Dst`.
273///
274/// # Safety
275///
276/// The value `src` must be a valid instance of `Dst`.
277#[inline(always)]
278pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst {
279    static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>());
280
281    #[repr(C)]
282    union Transmute<Src, Dst> {
283        src: ManuallyDrop<Src>,
284        dst: ManuallyDrop<Dst>,
285    }
286
287    // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst`
288    // fields both start at the same offset and the types of those fields are
289    // transparent wrappers around `Src` and `Dst` [1]. Consequently,
290    // initializing `Transmute` with with `src` and then reading out `dst` is
291    // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src`
292    // to `Dst` is valid because — by contract on the caller — `src` is a valid
293    // instance of `Dst`.
294    //
295    // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html:
296    //
297    //     `ManuallyDrop<T>` is guaranteed to have the same layout and bit
298    //     validity as `T`, and is subject to the same layout optimizations as
299    //     `T`.
300    //
301    // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields:
302    //
303    //     Effectively, writing to and then reading from a union with the C
304    //     representation is analogous to a transmute from the type used for
305    //     writing to the type used for reading.
306    unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) }
307}
308
309/// Uses `allocate` to create a `Box<T>`.
310///
311/// # Errors
312///
313/// Returns an error on allocation failure. Allocation failure is guaranteed
314/// never to cause a panic or an abort.
315///
316/// # Safety
317///
318/// `allocate` must be either `alloc::alloc::alloc` or
319/// `alloc::alloc::alloc_zeroed`. The referent of the box returned by `new_box`
320/// has the same bit-validity as the referent of the pointer returned by the
321/// given `allocate` and sufficient size to store `T` with `meta`.
322#[must_use = "has no side effects (other than allocation)"]
323#[cfg(feature = "alloc")]
324#[inline]
325pub(crate) unsafe fn new_box<T>(
326    meta: T::PointerMetadata,
327    allocate: unsafe fn(core::alloc::Layout) -> *mut u8,
328) -> Result<alloc::boxed::Box<T>, AllocError>
329where
330    T: ?Sized + crate::KnownLayout,
331{
332    let size = match T::size_for_metadata(meta) {
333        Some(size) => size,
334        None => return Err(AllocError),
335    };
336
337    let align = T::LAYOUT.align.get();
338    // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a bug in
339    // which sufficiently-large allocations (those which, when rounded up to the
340    // alignment, overflow `isize`) are not rejected, which can cause undefined
341    // behavior. See #64 for details.
342    //
343    // FIXME(#67): Once our MSRV is > 1.64.0, remove this assertion.
344    #[allow(clippy::as_conversions)]
345    let max_alloc = (isize::MAX as usize).saturating_sub(align);
346    if size > max_alloc {
347        return Err(AllocError);
348    }
349
350    // FIXME(https://github.com/rust-lang/rust/issues/55724): Use
351    // `Layout::repeat` once it's stabilized.
352    let layout = Layout::from_size_align(size, align).or(Err(AllocError))?;
353
354    let ptr = if layout.size() != 0 {
355        // SAFETY: By contract on the caller, `allocate` is either
356        // `alloc::alloc::alloc` or `alloc::alloc::alloc_zeroed`. The above
357        // check ensures their shared safety precondition: that the supplied
358        // layout is not zero-sized type [1].
359        //
360        // [1] Per https://doc.rust-lang.org/stable/std/alloc/trait.GlobalAlloc.html#tymethod.alloc:
361        //
362        //     This function is unsafe because undefined behavior can result if
363        //     the caller does not ensure that layout has non-zero size.
364        let ptr = unsafe { allocate(layout) };
365        match NonNull::new(ptr) {
366            Some(ptr) => ptr,
367            None => return Err(AllocError),
368        }
369    } else {
370        let align = T::LAYOUT.align.get();
371
372        // We use `transmute` instead of an `as` cast since Miri (with strict
373        // provenance enabled) notices and complains that an `as` cast creates a
374        // pointer with no provenance. Miri isn't smart enough to realize that
375        // we're only executing this branch when we're constructing a zero-sized
376        // `Box`, which doesn't require provenance.
377        //
378        // SAFETY: any initialized bit sequence is a bit-valid `*mut u8`. All
379        // bits of a `usize` are initialized.
380        //
381        // `#[allow(unknown_lints)]` is for `integer_to_ptr_transmutes`
382        #[allow(unknown_lints)]
383        #[allow(clippy::useless_transmute, integer_to_ptr_transmutes)]
384        let dangling = unsafe { mem::transmute::<usize, *mut u8>(align) };
385        // SAFETY: `dangling` is constructed from `T::LAYOUT.align`, which is a
386        // `NonZeroUsize`, which is guaranteed to be non-zero.
387        //
388        // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` is
389        // zero, but it does require a non-null dangling pointer for its
390        // allocation.
391        //
392        // FIXME(https://github.com/rust-lang/rust/issues/95228): Use
393        // `std::ptr::without_provenance` once it's stable. That may optimize
394        // better. As written, Rust may assume that this consumes "exposed"
395        // provenance, and thus Rust may have to assume that this may consume
396        // provenance from any pointer whose provenance has been exposed.
397        unsafe { NonNull::new_unchecked(dangling) }
398    };
399
400    let ptr = T::raw_from_ptr_len(ptr, meta);
401
402    // FIXME(#429): Add a "SAFETY" comment and remove this `allow`. Make sure to
403    // include a justification that `ptr.as_ptr()` is validly-aligned in the ZST
404    // case (in which we manually construct a dangling pointer) and to justify
405    // why `Box` is safe to drop (it's because `allocate` uses the system
406    // allocator).
407    #[allow(clippy::undocumented_unsafe_blocks)]
408    Ok(unsafe { alloc::boxed::Box::from_raw(ptr.as_ptr()) })
409}
410
411mod len_of {
412    use super::*;
413
414    /// A witness type for metadata of a valid instance of `&T`.
415    pub(crate) struct MetadataOf<T: ?Sized + KnownLayout> {
416        /// # Safety
417        ///
418        /// The size of an instance of `&T` with the given metadata is not
419        /// larger than `isize::MAX`.
420        meta: T::PointerMetadata,
421        _p: PhantomData<T>,
422    }
423
424    impl<T: ?Sized + KnownLayout> Copy for MetadataOf<T> {}
425    impl<T: ?Sized + KnownLayout> Clone for MetadataOf<T> {
426        fn clone(&self) -> Self {
427            *self
428        }
429    }
430
431    impl<T: ?Sized> MetadataOf<T>
432    where
433        T: KnownLayout,
434    {
435        /// Returns `None` if `meta` is greater than `t`'s metadata.
436        #[inline(always)]
437        pub(crate) fn new_in_bounds(t: &T, meta: usize) -> Option<Self>
438        where
439            T: KnownLayout<PointerMetadata = usize>,
440        {
441            if meta <= Ptr::from_ref(t).len() {
442                // SAFETY: We have checked that `meta` is not greater than `t`'s
443                // metadata, which, by invariant on `&T`, addresses no more than
444                // `isize::MAX` bytes [1][2].
445                //
446                // [1] Per https://doc.rust-lang.org/1.85.0/std/primitive.reference.html#safety:
447                //
448                //    For all types, `T: ?Sized`, and for all `t: &T` or `t:
449                //    &mut T`, when such values cross an API boundary, the
450                //    following invariants must generally be upheld:
451                //
452                //    * `t` is non-null
453                //    * `t` is aligned to `align_of_val(t)`
454                //    * if `size_of_val(t) > 0`, then `t` is dereferenceable for
455                //      `size_of_val(t)` many bytes
456                //
457                //    If `t` points at address `a`, being "dereferenceable" for
458                //    N bytes means that the memory range `[a, a + N)` is all
459                //    contained within a single allocated object.
460                //
461                // [2] Per https://doc.rust-lang.org/1.85.0/std/ptr/index.html#allocated-object:
462                //
463                //    For any allocated object with `base` address, `size`, and
464                //    a set of `addresses`, the following are guaranteed:
465                //    - For all addresses `a` in `addresses`, `a` is in the
466                //      range `base .. (base + size)` (note that this requires
467                //      `a < base + size`, not `a <= base + size`)
468                //    - `base` is not equal to [`null()`] (i.e., the address
469                //      with the numerical value 0)
470                //    - `base + size <= usize::MAX`
471                //    - `size <= isize::MAX`
472                Some(unsafe { Self::new_unchecked(meta) })
473            } else {
474                None
475            }
476        }
477
478        /// # Safety
479        ///
480        /// The size of an instance of `&T` with the given metadata is not
481        /// larger than `isize::MAX`.
482        pub(crate) unsafe fn new_unchecked(meta: T::PointerMetadata) -> Self {
483            // SAFETY: The caller has promised that the size of an instance of
484            // `&T` with the given metadata is not larger than `isize::MAX`.
485            Self { meta, _p: PhantomData }
486        }
487
488        pub(crate) fn get(&self) -> T::PointerMetadata
489        where
490            T::PointerMetadata: Copy,
491        {
492            self.meta
493        }
494
495        #[inline]
496        pub(crate) fn padding_needed_for(&self) -> usize
497        where
498            T: KnownLayout<PointerMetadata = usize>,
499        {
500            let trailing_slice_layout = crate::trailing_slice_layout::<T>();
501            // SAFETY: By invariant on `self`, a `&T` with metadata `self.meta`
502            // describes an object of size `<= isize::MAX`. This computes the
503            // size of such a `&T` without any trailing padding, and so neither
504            // the multiplication nor the addition will overflow.
505            //
506            // FIXME(#67): Remove this allow. See NumExt for more details.
507            #[allow(unstable_name_collisions, clippy::incompatible_msrv)]
508            let unpadded_size = unsafe {
509                let trailing_size = self.meta.unchecked_mul(trailing_slice_layout.elem_size);
510                trailing_size.unchecked_add(trailing_slice_layout.offset)
511            };
512
513            util::padding_needed_for(unpadded_size, T::LAYOUT.align)
514        }
515
516        #[inline(always)]
517        pub(crate) fn validate_cast_and_convert_metadata(
518            addr: usize,
519            bytes_len: MetadataOf<[u8]>,
520            cast_type: CastType,
521            meta: Option<T::PointerMetadata>,
522        ) -> Result<(MetadataOf<T>, MetadataOf<[u8]>), MetadataCastError> {
523            let layout = match meta {
524                None => T::LAYOUT,
525                // This can return `None` if the metadata describes an object
526                // which can't fit in an `isize`.
527                Some(meta) => {
528                    let size = match T::size_for_metadata(meta) {
529                        Some(size) => size,
530                        None => return Err(MetadataCastError::Size),
531                    };
532                    DstLayout {
533                        align: T::LAYOUT.align,
534                        size_info: crate::SizeInfo::Sized { size },
535                        statically_shallow_unpadded: false,
536                    }
537                }
538            };
539            // Lemma 0: By contract on `validate_cast_and_convert_metadata`, if
540            // the result is `Ok(..)`, then a `&T` with `elems` trailing slice
541            // elements is no larger in size than `bytes_len.get()`.
542            let (elems, split_at) =
543                layout.validate_cast_and_convert_metadata(addr, bytes_len.get(), cast_type)?;
544            let elems = T::PointerMetadata::from_elem_count(elems);
545
546            // For a slice DST type, if `meta` is `Some(elems)`, then we
547            // synthesize `layout` to describe a sized type whose size is equal
548            // to the size of the instance that we are asked to cast. For sized
549            // types, `validate_cast_and_convert_metadata` returns `elems == 0`.
550            // Thus, in this case, we need to use the `elems` passed by the
551            // caller, not the one returned by
552            // `validate_cast_and_convert_metadata`.
553            //
554            // Lemma 1: A `&T` with `elems` trailing slice elements is no larger
555            // in size than `bytes_len.get()`. Proof:
556            // - If `meta` is `None`, then `elems` satisfies this condition by
557            //   Lemma 0.
558            // - If `meta` is `Some(meta)`, then `layout` describes an object
559            //   whose size is equal to the size of an `&T` with `meta`
560            //   metadata. By Lemma 0, that size is not larger than
561            //   `bytes_len.get()`.
562            //
563            // Lemma 2: A `&T` with `elems` trailing slice elements is no larger
564            // than `isize::MAX` bytes. Proof: By Lemma 1, a `&T` with metadata
565            // `elems` is not larger in size than `bytes_len.get()`. By
566            // invariant on `MetadataOf<[u8]>`, a `&[u8]` with metadata
567            // `bytes_len` is not larger than `isize::MAX`. Because
568            // `size_of::<u8>()` is `1`, a `&[u8]` with metadata `bytes_len` has
569            // size `bytes_len.get()` bytes. Therefore, a `&T` with metadata
570            // `elems` has size not larger than `isize::MAX`.
571            let elems = meta.unwrap_or(elems);
572
573            // SAFETY: See Lemma 2.
574            let elems = unsafe { MetadataOf::new_unchecked(elems) };
575
576            // SAFETY: Let `size` be the size of a `&T` with metadata `elems`.
577            // By post-condition on `validate_cast_and_convert_metadata`, one of
578            // the following conditions holds:
579            // - `split_at == size`, in which case, by Lemma 2, `split_at <=
580            //   isize::MAX`. Since `size_of::<u8>() == 1`, a `[u8]` with
581            //   `split_at` elems has size not larger than `isize::MAX`.
582            // - `split_at == bytes_len - size`. Since `bytes_len:
583            //   MetadataOf<u8>`, and since `size` is non-negative, `split_at`
584            //   addresses no more bytes than `bytes_len` does. Since
585            //   `bytes_len: MetadataOf<u8>`, `bytes_len` describes a `[u8]`
586            //   which has no more than `isize::MAX` bytes, and thus so does
587            //   `split_at`.
588            let split_at = unsafe { MetadataOf::<[u8]>::new_unchecked(split_at) };
589            Ok((elems, split_at))
590        }
591    }
592}
593
594pub(crate) use len_of::MetadataOf;
595
596/// Since we support multiple versions of Rust, there are often features which
597/// have been stabilized in the most recent stable release which do not yet
598/// exist (stably) on our MSRV. This module provides polyfills for those
599/// features so that we can write more "modern" code, and just remove the
600/// polyfill once our MSRV supports the corresponding feature. Without this,
601/// we'd have to write worse/more verbose code and leave FIXME comments sprinkled
602/// throughout the codebase to update to the new pattern once it's stabilized.
603///
604/// Each trait is imported as `_` at the crate root; each polyfill should "just
605/// work" at usage sites.
606pub(crate) mod polyfills {
607    use core::ptr::{self, NonNull};
608
609    // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our
610    // MSRV is 1.70, when that function was stabilized.
611    //
612    // The `#[allow(unused)]` is necessary because, on sufficiently recent
613    // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
614    // method rather than to this trait, and so this trait is considered unused.
615    //
616    // FIXME(#67): Once our MSRV is 1.70, remove this.
617    #[allow(unused)]
618    pub(crate) trait NonNullExt<T> {
619        fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>;
620    }
621
622    impl<T> NonNullExt<T> for NonNull<T> {
623        // NOTE on coverage: this will never be tested in nightly since it's a
624        // polyfill for a feature which has been stabilized on our nightly
625        // toolchain.
626        #[cfg_attr(
627            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
628            coverage(off)
629        )]
630        #[inline(always)]
631        fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> {
632            let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len);
633            // SAFETY: `ptr` is converted from `data`, which is non-null.
634            unsafe { NonNull::new_unchecked(ptr) }
635        }
636    }
637
638    // A polyfill for `Self::unchecked_sub` that we can use until methods like
639    // `usize::unchecked_sub` is stabilized.
640    //
641    // The `#[allow(unused)]` is necessary because, on sufficiently recent
642    // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
643    // method rather than to this trait, and so this trait is considered unused.
644    //
645    // FIXME(#67): Once our MSRV is high enough, remove this.
646    #[allow(unused)]
647    pub(crate) trait NumExt {
648        /// Add without checking for overflow.
649        ///
650        /// # Safety
651        ///
652        /// The caller promises that the addition will not overflow.
653        unsafe fn unchecked_add(self, rhs: Self) -> Self;
654
655        /// Subtract without checking for underflow.
656        ///
657        /// # Safety
658        ///
659        /// The caller promises that the subtraction will not underflow.
660        unsafe fn unchecked_sub(self, rhs: Self) -> Self;
661
662        /// Multiply without checking for overflow.
663        ///
664        /// # Safety
665        ///
666        /// The caller promises that the multiplication will not overflow.
667        unsafe fn unchecked_mul(self, rhs: Self) -> Self;
668    }
669
670    // NOTE on coverage: these will never be tested in nightly since they're
671    // polyfills for a feature which has been stabilized on our nightly
672    // toolchain.
673    impl NumExt for usize {
674        #[cfg_attr(
675            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
676            coverage(off)
677        )]
678        #[inline(always)]
679        unsafe fn unchecked_add(self, rhs: usize) -> usize {
680            match self.checked_add(rhs) {
681                Some(x) => x,
682                None => {
683                    // SAFETY: The caller promises that the addition will not
684                    // underflow.
685                    unsafe { core::hint::unreachable_unchecked() }
686                }
687            }
688        }
689
690        #[cfg_attr(
691            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
692            coverage(off)
693        )]
694        #[inline(always)]
695        unsafe fn unchecked_sub(self, rhs: usize) -> usize {
696            match self.checked_sub(rhs) {
697                Some(x) => x,
698                None => {
699                    // SAFETY: The caller promises that the subtraction will not
700                    // underflow.
701                    unsafe { core::hint::unreachable_unchecked() }
702                }
703            }
704        }
705
706        #[cfg_attr(
707            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
708            coverage(off)
709        )]
710        #[inline(always)]
711        unsafe fn unchecked_mul(self, rhs: usize) -> usize {
712            match self.checked_mul(rhs) {
713                Some(x) => x,
714                None => {
715                    // SAFETY: The caller promises that the multiplication will
716                    // not overflow.
717                    unsafe { core::hint::unreachable_unchecked() }
718                }
719            }
720        }
721    }
722}
723
724#[cfg(test)]
725pub(crate) mod testutil {
726    use crate::*;
727
728    /// A `T` which is aligned to at least `align_of::<A>()`.
729    #[derive(Default)]
730    pub(crate) struct Align<T, A> {
731        pub(crate) t: T,
732        _a: [A; 0],
733    }
734
735    impl<T: Default, A> Align<T, A> {
736        pub(crate) fn set_default(&mut self) {
737            self.t = T::default();
738        }
739    }
740
741    impl<T, A> Align<T, A> {
742        pub(crate) const fn new(t: T) -> Align<T, A> {
743            Align { t, _a: [] }
744        }
745    }
746
747    /// A `T` which is guaranteed not to satisfy `align_of::<A>()`.
748    ///
749    /// It must be the case that `align_of::<T>() < align_of::<A>()` in order
750    /// for this type to work properly.
751    #[repr(C)]
752    pub(crate) struct ForceUnalign<T: Unaligned, A> {
753        // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is
754        // placed at the minimum offset that guarantees its alignment. If
755        // `align_of::<T>() < align_of::<A>()`, then that offset will be
756        // guaranteed *not* to satisfy `align_of::<A>()`.
757        //
758        // Note that we need `T: Unaligned` in order to guarantee that there is
759        // no padding between `_u` and `t`.
760        _u: u8,
761        pub(crate) t: T,
762        _a: [A; 0],
763    }
764
765    impl<T: Unaligned, A> ForceUnalign<T, A> {
766        pub(crate) fn new(t: T) -> ForceUnalign<T, A> {
767            ForceUnalign { _u: 0, t, _a: [] }
768        }
769    }
770    // A `u64` with alignment 8.
771    //
772    // Though `u64` has alignment 8 on some platforms, it's not guaranteed. By
773    // contrast, `AU64` is guaranteed to have alignment 8 on all platforms.
774    #[derive(
775        KnownLayout,
776        Immutable,
777        FromBytes,
778        IntoBytes,
779        Eq,
780        PartialEq,
781        Ord,
782        PartialOrd,
783        Default,
784        Debug,
785        Copy,
786        Clone,
787    )]
788    #[repr(C, align(8))]
789    pub(crate) struct AU64(pub(crate) u64);
790
791    impl AU64 {
792        // Converts this `AU64` to bytes using this platform's endianness.
793        pub(crate) fn to_bytes(self) -> [u8; 8] {
794            crate::transmute!(self)
795        }
796    }
797
798    impl Display for AU64 {
799        #[cfg_attr(
800            all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
801            coverage(off)
802        )]
803        fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
804            Display::fmt(&self.0, f)
805        }
806    }
807}
808
809#[cfg(test)]
810mod tests {
811    use super::*;
812
813    #[test]
814    fn test_round_down_to_next_multiple_of_alignment() {
815        fn alt_impl(n: usize, align: NonZeroUsize) -> usize {
816            let mul = n / align.get();
817            mul * align.get()
818        }
819
820        for align in [1, 2, 4, 8, 16] {
821            for n in 0..256 {
822                let align = NonZeroUsize::new(align).unwrap();
823                let want = alt_impl(n, align);
824                let got = round_down_to_next_multiple_of_alignment(n, align);
825                assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})", n, align);
826            }
827        }
828    }
829
830    #[rustversion::since(1.57.0)]
831    #[test]
832    #[should_panic]
833    fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() {
834        round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap());
835    }
836}