glam/f32/sse2/
vec4.rs

1// Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3use crate::{f32::math, sse2::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[cfg(target_arch = "x86")]
10use core::arch::x86::*;
11#[cfg(target_arch = "x86_64")]
12use core::arch::x86_64::*;
13
14#[cfg(feature = "zerocopy")]
15use zerocopy_derive::*;
16
17#[repr(C)]
18union UnionCast {
19    a: [f32; 4],
20    v: Vec4,
21}
22
23/// Creates a 4-dimensional vector.
24#[inline(always)]
25#[must_use]
26pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
27    Vec4::new(x, y, z, w)
28}
29
30/// A 4-dimensional vector.
31///
32/// SIMD vector types are used for storage on supported platforms.
33///
34/// This type is 16 byte aligned.
35#[derive(Clone, Copy)]
36#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
37#[cfg_attr(
38    feature = "zerocopy",
39    derive(FromBytes, Immutable, IntoBytes, KnownLayout)
40)]
41#[repr(transparent)]
42pub struct Vec4(pub(crate) __m128);
43
44impl Vec4 {
45    /// All zeroes.
46    pub const ZERO: Self = Self::splat(0.0);
47
48    /// All ones.
49    pub const ONE: Self = Self::splat(1.0);
50
51    /// All negative ones.
52    pub const NEG_ONE: Self = Self::splat(-1.0);
53
54    /// All `f32::MIN`.
55    pub const MIN: Self = Self::splat(f32::MIN);
56
57    /// All `f32::MAX`.
58    pub const MAX: Self = Self::splat(f32::MAX);
59
60    /// All `f32::NAN`.
61    pub const NAN: Self = Self::splat(f32::NAN);
62
63    /// All `f32::INFINITY`.
64    pub const INFINITY: Self = Self::splat(f32::INFINITY);
65
66    /// All `f32::NEG_INFINITY`.
67    pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
68
69    /// A unit vector pointing along the positive X axis.
70    pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
71
72    /// A unit vector pointing along the positive Y axis.
73    pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
74
75    /// A unit vector pointing along the positive Z axis.
76    pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
77
78    /// A unit vector pointing along the positive W axis.
79    pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
80
81    /// A unit vector pointing along the negative X axis.
82    pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
83
84    /// A unit vector pointing along the negative Y axis.
85    pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
86
87    /// A unit vector pointing along the negative Z axis.
88    pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
89
90    /// A unit vector pointing along the negative W axis.
91    pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
92
93    /// The unit axes.
94    pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
95
96    /// Vec4 uses Rust Portable SIMD
97    pub const USES_CORE_SIMD: bool = false;
98    /// Vec4 uses Arm NEON
99    pub const USES_NEON: bool = false;
100    /// Vec4 uses scalar math
101    pub const USES_SCALAR_MATH: bool = false;
102    /// Vec4 uses Intel SSE2
103    pub const USES_SSE2: bool = true;
104    /// Vec4 uses WebAssembly 128-bit SIMD
105    pub const USES_WASM32_SIMD: bool = false;
106
107    /// Creates a new vector.
108    #[inline(always)]
109    #[must_use]
110    pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
111        unsafe { UnionCast { a: [x, y, z, w] }.v }
112    }
113
114    /// Creates a vector with all elements set to `v`.
115    #[inline]
116    #[must_use]
117    pub const fn splat(v: f32) -> Self {
118        unsafe { UnionCast { a: [v; 4] }.v }
119    }
120
121    /// Returns a vector containing each element of `self` modified by a mapping function `f`.
122    #[inline]
123    #[must_use]
124    pub fn map<F>(self, f: F) -> Self
125    where
126        F: Fn(f32) -> f32,
127    {
128        Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
129    }
130
131    /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
132    /// for each element of `self`.
133    ///
134    /// A true element in the mask uses the corresponding element from `if_true`, and false
135    /// uses the element from `if_false`.
136    #[inline]
137    #[must_use]
138    pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
139        Self(unsafe {
140            _mm_or_ps(
141                _mm_andnot_ps(mask.0, if_false.0),
142                _mm_and_ps(if_true.0, mask.0),
143            )
144        })
145    }
146
147    /// Creates a new vector from an array.
148    #[inline]
149    #[must_use]
150    pub const fn from_array(a: [f32; 4]) -> Self {
151        Self::new(a[0], a[1], a[2], a[3])
152    }
153
154    /// Converts `self` to `[x, y, z, w]`
155    #[inline]
156    #[must_use]
157    pub const fn to_array(&self) -> [f32; 4] {
158        unsafe { *(self as *const Self as *const [f32; 4]) }
159    }
160
161    /// Creates a vector from the first 4 values in `slice`.
162    ///
163    /// # Panics
164    ///
165    /// Panics if `slice` is less than 4 elements long.
166    #[inline]
167    #[must_use]
168    pub const fn from_slice(slice: &[f32]) -> Self {
169        assert!(slice.len() >= 4);
170        Self::new(slice[0], slice[1], slice[2], slice[3])
171    }
172
173    /// Writes the elements of `self` to the first 4 elements in `slice`.
174    ///
175    /// # Panics
176    ///
177    /// Panics if `slice` is less than 4 elements long.
178    #[inline]
179    pub fn write_to_slice(self, slice: &mut [f32]) {
180        assert!(slice.len() >= 4);
181        unsafe {
182            _mm_storeu_ps(slice.as_mut_ptr(), self.0);
183        }
184    }
185
186    /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
187    ///
188    /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
189    ///
190    /// To truncate to [`Vec3A`] use [`Vec3A::from_vec4()`].
191    #[inline]
192    #[must_use]
193    pub fn truncate(self) -> Vec3 {
194        use crate::swizzles::Vec4Swizzles;
195        self.xyz()
196    }
197
198    /// Projects a homogeneous coordinate to 3D space by performing perspective divide.
199    ///
200    /// To project to [`Vec3A`] use [`Vec3A::from_homogeneous()`].
201    ///
202    /// # Panics
203    ///
204    /// Will panic if `self.w` is `0` when `glam_assert` is enabled.
205    #[inline]
206    #[must_use]
207    pub fn project(self) -> Vec3 {
208        Vec3::from_homogeneous(self)
209    }
210
211    /// Creates a 4D vector from `self` with the given value of `x`.
212    #[inline]
213    #[must_use]
214    pub fn with_x(mut self, x: f32) -> Self {
215        self.x = x;
216        self
217    }
218
219    /// Creates a 4D vector from `self` with the given value of `y`.
220    #[inline]
221    #[must_use]
222    pub fn with_y(mut self, y: f32) -> Self {
223        self.y = y;
224        self
225    }
226
227    /// Creates a 4D vector from `self` with the given value of `z`.
228    #[inline]
229    #[must_use]
230    pub fn with_z(mut self, z: f32) -> Self {
231        self.z = z;
232        self
233    }
234
235    /// Creates a 4D vector from `self` with the given value of `w`.
236    #[inline]
237    #[must_use]
238    pub fn with_w(mut self, w: f32) -> Self {
239        self.w = w;
240        self
241    }
242
243    /// Computes the dot product of `self` and `rhs`.
244    #[inline]
245    #[must_use]
246    pub fn dot(self, rhs: Self) -> f32 {
247        unsafe { dot4(self.0, rhs.0) }
248    }
249
250    /// Returns a vector where every component is the dot product of `self` and `rhs`.
251    #[inline]
252    #[must_use]
253    pub fn dot_into_vec(self, rhs: Self) -> Self {
254        Self(unsafe { dot4_into_m128(self.0, rhs.0) })
255    }
256
257    /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
258    ///
259    /// In other words this computes `[min(x, rhs.x), min(self.y, rhs.y), ..]`.
260    ///
261    /// NaN propogation does not follow IEEE 754-2008 semantics for minNum and may differ on
262    /// different SIMD architectures.
263    #[inline]
264    #[must_use]
265    pub fn min(self, rhs: Self) -> Self {
266        Self(unsafe { _mm_min_ps(self.0, rhs.0) })
267    }
268
269    /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
270    ///
271    /// In other words this computes `[max(self.x, rhs.x), max(self.y, rhs.y), ..]`.
272    ///
273    /// NaN propogation does not follow IEEE 754-2008 semantics for maxNum and may differ on
274    /// different SIMD architectures.
275    #[inline]
276    #[must_use]
277    pub fn max(self, rhs: Self) -> Self {
278        Self(unsafe { _mm_max_ps(self.0, rhs.0) })
279    }
280
281    /// Component-wise clamping of values, similar to [`f32::clamp`].
282    ///
283    /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
284    ///
285    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
286    /// different SIMD architectures.
287    ///
288    /// # Panics
289    ///
290    /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
291    #[inline]
292    #[must_use]
293    pub fn clamp(self, min: Self, max: Self) -> Self {
294        glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
295        self.max(min).min(max)
296    }
297
298    /// Returns the horizontal minimum of `self`.
299    ///
300    /// In other words this computes `min(x, y, ..)`.
301    ///
302    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
303    /// different SIMD architectures.
304    #[inline]
305    #[must_use]
306    pub fn min_element(self) -> f32 {
307        unsafe {
308            let v = self.0;
309            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
310            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
311            _mm_cvtss_f32(v)
312        }
313    }
314
315    /// Returns the horizontal maximum of `self`.
316    ///
317    /// In other words this computes `max(x, y, ..)`.
318    ///
319    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
320    /// different SIMD architectures.
321    #[inline]
322    #[must_use]
323    pub fn max_element(self) -> f32 {
324        unsafe {
325            let v = self.0;
326            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
327            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
328            _mm_cvtss_f32(v)
329        }
330    }
331
332    /// Returns the index of the first minimum element of `self`.
333    #[doc(alias = "argmin")]
334    #[inline]
335    #[must_use]
336    pub fn min_position(self) -> usize {
337        let mut min = self.x;
338        let mut index = 0;
339        if self.y < min {
340            min = self.y;
341            index = 1;
342        }
343        if self.z < min {
344            min = self.z;
345            index = 2;
346        }
347        if self.w < min {
348            index = 3;
349        }
350        index
351    }
352
353    /// Returns the index of the first maximum element of `self`.
354    #[doc(alias = "argmax")]
355    #[inline]
356    #[must_use]
357    pub fn max_position(self) -> usize {
358        let mut max = self.x;
359        let mut index = 0;
360        if self.y > max {
361            max = self.y;
362            index = 1;
363        }
364        if self.z > max {
365            max = self.z;
366            index = 2;
367        }
368        if self.w > max {
369            index = 3;
370        }
371        index
372    }
373
374    /// Returns the sum of all elements of `self`.
375    ///
376    /// In other words, this computes `self.x + self.y + ..`.
377    #[inline]
378    #[must_use]
379    pub fn element_sum(self) -> f32 {
380        unsafe {
381            let v = self.0;
382            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
383            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
384            _mm_cvtss_f32(v)
385        }
386    }
387
388    /// Returns the product of all elements of `self`.
389    ///
390    /// In other words, this computes `self.x * self.y * ..`.
391    #[inline]
392    #[must_use]
393    pub fn element_product(self) -> f32 {
394        unsafe {
395            let v = self.0;
396            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
397            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
398            _mm_cvtss_f32(v)
399        }
400    }
401
402    /// Returns a vector mask containing the result of a `==` comparison for each element of
403    /// `self` and `rhs`.
404    ///
405    /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
406    /// elements.
407    #[inline]
408    #[must_use]
409    pub fn cmpeq(self, rhs: Self) -> BVec4A {
410        BVec4A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
411    }
412
413    /// Returns a vector mask containing the result of a `!=` comparison for each element of
414    /// `self` and `rhs`.
415    ///
416    /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
417    /// elements.
418    #[inline]
419    #[must_use]
420    pub fn cmpne(self, rhs: Self) -> BVec4A {
421        BVec4A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
422    }
423
424    /// Returns a vector mask containing the result of a `>=` comparison for each element of
425    /// `self` and `rhs`.
426    ///
427    /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
428    /// elements.
429    #[inline]
430    #[must_use]
431    pub fn cmpge(self, rhs: Self) -> BVec4A {
432        BVec4A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
433    }
434
435    /// Returns a vector mask containing the result of a `>` comparison for each element of
436    /// `self` and `rhs`.
437    ///
438    /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
439    /// elements.
440    #[inline]
441    #[must_use]
442    pub fn cmpgt(self, rhs: Self) -> BVec4A {
443        BVec4A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
444    }
445
446    /// Returns a vector mask containing the result of a `<=` comparison for each element of
447    /// `self` and `rhs`.
448    ///
449    /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
450    /// elements.
451    #[inline]
452    #[must_use]
453    pub fn cmple(self, rhs: Self) -> BVec4A {
454        BVec4A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
455    }
456
457    /// Returns a vector mask containing the result of a `<` comparison for each element of
458    /// `self` and `rhs`.
459    ///
460    /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
461    /// elements.
462    #[inline]
463    #[must_use]
464    pub fn cmplt(self, rhs: Self) -> BVec4A {
465        BVec4A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
466    }
467
468    /// Returns a vector containing the absolute value of each element of `self`.
469    #[inline]
470    #[must_use]
471    pub fn abs(self) -> Self {
472        Self(unsafe { crate::sse2::m128_abs(self.0) })
473    }
474
475    /// Returns a vector with elements representing the sign of `self`.
476    ///
477    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
478    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
479    /// - `NAN` if the number is `NAN`
480    #[inline]
481    #[must_use]
482    pub fn signum(self) -> Self {
483        let result = Self(unsafe { _mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0) });
484        let mask = self.is_nan_mask();
485        Self::select(mask, self, result)
486    }
487
488    /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
489    #[inline]
490    #[must_use]
491    pub fn copysign(self, rhs: Self) -> Self {
492        let mask = Self::splat(-0.0);
493        Self(unsafe { _mm_or_ps(_mm_and_ps(rhs.0, mask.0), _mm_andnot_ps(mask.0, self.0)) })
494    }
495
496    /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
497    ///
498    /// A negative element results in a `1` bit and a positive element in a `0` bit.  Element `x` goes
499    /// into the first lowest bit, element `y` into the second, etc.
500    ///
501    /// An element is negative if it has a negative sign, including -0.0, NaNs with negative sign
502    /// bit and negative infinity.
503    #[inline]
504    #[must_use]
505    pub fn is_negative_bitmask(self) -> u32 {
506        unsafe { _mm_movemask_ps(self.0) as u32 }
507    }
508
509    /// Returns `true` if, and only if, all elements are finite.  If any element is either
510    /// `NaN`, positive or negative infinity, this will return `false`.
511    #[inline]
512    #[must_use]
513    pub fn is_finite(self) -> bool {
514        self.is_finite_mask().all()
515    }
516
517    /// Performs `is_finite` on each element of self, returning a vector mask of the results.
518    ///
519    /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
520    #[inline]
521    #[must_use]
522    pub fn is_finite_mask(self) -> BVec4A {
523        BVec4A(unsafe { _mm_cmplt_ps(crate::sse2::m128_abs(self.0), Self::INFINITY.0) })
524    }
525
526    /// Returns `true` if any elements are `NaN`.
527    #[inline]
528    #[must_use]
529    pub fn is_nan(self) -> bool {
530        self.is_nan_mask().any()
531    }
532
533    /// Performs `is_nan` on each element of self, returning a vector mask of the results.
534    ///
535    /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
536    #[inline]
537    #[must_use]
538    pub fn is_nan_mask(self) -> BVec4A {
539        BVec4A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
540    }
541
542    /// Computes the length of `self`.
543    #[doc(alias = "magnitude")]
544    #[inline]
545    #[must_use]
546    pub fn length(self) -> f32 {
547        unsafe {
548            let dot = dot4_in_x(self.0, self.0);
549            _mm_cvtss_f32(_mm_sqrt_ps(dot))
550        }
551    }
552
553    /// Computes the squared length of `self`.
554    ///
555    /// This is faster than `length()` as it avoids a square root operation.
556    #[doc(alias = "magnitude2")]
557    #[inline]
558    #[must_use]
559    pub fn length_squared(self) -> f32 {
560        self.dot(self)
561    }
562
563    /// Computes `1.0 / length()`.
564    ///
565    /// For valid results, `self` must _not_ be of length zero.
566    #[inline]
567    #[must_use]
568    pub fn length_recip(self) -> f32 {
569        unsafe {
570            let dot = dot4_in_x(self.0, self.0);
571            _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
572        }
573    }
574
575    /// Computes the Euclidean distance between two points in space.
576    #[inline]
577    #[must_use]
578    pub fn distance(self, rhs: Self) -> f32 {
579        (self - rhs).length()
580    }
581
582    /// Compute the squared euclidean distance between two points in space.
583    #[inline]
584    #[must_use]
585    pub fn distance_squared(self, rhs: Self) -> f32 {
586        (self - rhs).length_squared()
587    }
588
589    /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
590    #[inline]
591    #[must_use]
592    pub fn div_euclid(self, rhs: Self) -> Self {
593        Self::new(
594            math::div_euclid(self.x, rhs.x),
595            math::div_euclid(self.y, rhs.y),
596            math::div_euclid(self.z, rhs.z),
597            math::div_euclid(self.w, rhs.w),
598        )
599    }
600
601    /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
602    ///
603    /// [Euclidean division]: f32::rem_euclid
604    #[inline]
605    #[must_use]
606    pub fn rem_euclid(self, rhs: Self) -> Self {
607        Self::new(
608            math::rem_euclid(self.x, rhs.x),
609            math::rem_euclid(self.y, rhs.y),
610            math::rem_euclid(self.z, rhs.z),
611            math::rem_euclid(self.w, rhs.w),
612        )
613    }
614
615    /// Returns `self` normalized to length 1.0.
616    ///
617    /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
618    ///
619    /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
620    ///
621    /// # Panics
622    ///
623    /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
624    #[inline]
625    #[must_use]
626    pub fn normalize(self) -> Self {
627        unsafe {
628            let length = _mm_sqrt_ps(dot4_into_m128(self.0, self.0));
629            #[allow(clippy::let_and_return)]
630            let normalized = Self(_mm_div_ps(self.0, length));
631            glam_assert!(normalized.is_finite());
632            normalized
633        }
634    }
635
636    /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
637    ///
638    /// In particular, if the input is zero (or very close to zero), or non-finite,
639    /// the result of this operation will be `None`.
640    ///
641    /// See also [`Self::normalize_or_zero()`].
642    #[inline]
643    #[must_use]
644    pub fn try_normalize(self) -> Option<Self> {
645        let rcp = self.length_recip();
646        if rcp.is_finite() && rcp > 0.0 {
647            Some(self * rcp)
648        } else {
649            None
650        }
651    }
652
653    /// Returns `self` normalized to length 1.0 if possible, else returns a
654    /// fallback value.
655    ///
656    /// In particular, if the input is zero (or very close to zero), or non-finite,
657    /// the result of this operation will be the fallback value.
658    ///
659    /// See also [`Self::try_normalize()`].
660    #[inline]
661    #[must_use]
662    pub fn normalize_or(self, fallback: Self) -> Self {
663        let rcp = self.length_recip();
664        if rcp.is_finite() && rcp > 0.0 {
665            self * rcp
666        } else {
667            fallback
668        }
669    }
670
671    /// Returns `self` normalized to length 1.0 if possible, else returns zero.
672    ///
673    /// In particular, if the input is zero (or very close to zero), or non-finite,
674    /// the result of this operation will be zero.
675    ///
676    /// See also [`Self::try_normalize()`].
677    #[inline]
678    #[must_use]
679    pub fn normalize_or_zero(self) -> Self {
680        self.normalize_or(Self::ZERO)
681    }
682
683    /// Returns `self` normalized to length 1.0 and the length of `self`.
684    ///
685    /// If `self` is zero length then `(Self::X, 0.0)` is returned.
686    #[inline]
687    #[must_use]
688    pub fn normalize_and_length(self) -> (Self, f32) {
689        let length = self.length();
690        let rcp = 1.0 / length;
691        if rcp.is_finite() && rcp > 0.0 {
692            (self * rcp, length)
693        } else {
694            (Self::X, 0.0)
695        }
696    }
697
698    /// Returns whether `self` is length `1.0` or not.
699    ///
700    /// Uses a precision threshold of approximately `1e-4`.
701    #[inline]
702    #[must_use]
703    pub fn is_normalized(self) -> bool {
704        math::abs(self.length_squared() - 1.0) <= 2e-4
705    }
706
707    /// Returns the vector projection of `self` onto `rhs`.
708    ///
709    /// `rhs` must be of non-zero length.
710    ///
711    /// # Panics
712    ///
713    /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
714    #[inline]
715    #[must_use]
716    pub fn project_onto(self, rhs: Self) -> Self {
717        let other_len_sq_rcp = rhs.dot(rhs).recip();
718        glam_assert!(other_len_sq_rcp.is_finite());
719        rhs * self.dot(rhs) * other_len_sq_rcp
720    }
721
722    /// Returns the vector rejection of `self` from `rhs`.
723    ///
724    /// The vector rejection is the vector perpendicular to the projection of `self` onto
725    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
726    ///
727    /// `rhs` must be of non-zero length.
728    ///
729    /// # Panics
730    ///
731    /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
732    #[doc(alias("plane"))]
733    #[inline]
734    #[must_use]
735    pub fn reject_from(self, rhs: Self) -> Self {
736        self - self.project_onto(rhs)
737    }
738
739    /// Returns the vector projection of `self` onto `rhs`.
740    ///
741    /// `rhs` must be normalized.
742    ///
743    /// # Panics
744    ///
745    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
746    #[inline]
747    #[must_use]
748    pub fn project_onto_normalized(self, rhs: Self) -> Self {
749        glam_assert!(rhs.is_normalized());
750        rhs * self.dot(rhs)
751    }
752
753    /// Returns the vector rejection of `self` from `rhs`.
754    ///
755    /// The vector rejection is the vector perpendicular to the projection of `self` onto
756    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
757    ///
758    /// `rhs` must be normalized.
759    ///
760    /// # Panics
761    ///
762    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
763    #[doc(alias("plane"))]
764    #[inline]
765    #[must_use]
766    pub fn reject_from_normalized(self, rhs: Self) -> Self {
767        self - self.project_onto_normalized(rhs)
768    }
769
770    /// Returns a vector containing the nearest integer to a number for each element of `self`.
771    /// Round half-way cases away from 0.0.
772    #[inline]
773    #[must_use]
774    pub fn round(self) -> Self {
775        Self(unsafe { m128_round(self.0) })
776    }
777
778    /// Returns a vector containing the largest integer less than or equal to a number for each
779    /// element of `self`.
780    #[inline]
781    #[must_use]
782    pub fn floor(self) -> Self {
783        Self(unsafe { m128_floor(self.0) })
784    }
785
786    /// Returns a vector containing the smallest integer greater than or equal to a number for
787    /// each element of `self`.
788    #[inline]
789    #[must_use]
790    pub fn ceil(self) -> Self {
791        Self(unsafe { m128_ceil(self.0) })
792    }
793
794    /// Returns a vector containing the integer part each element of `self`. This means numbers are
795    /// always truncated towards zero.
796    #[inline]
797    #[must_use]
798    pub fn trunc(self) -> Self {
799        Self(unsafe { m128_trunc(self.0) })
800    }
801
802    /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
803    ///
804    /// Note that this differs from the GLSL implementation of `fract` which returns
805    /// `self - self.floor()`.
806    ///
807    /// Note that this is fast but not precise for large numbers.
808    #[inline]
809    #[must_use]
810    pub fn fract(self) -> Self {
811        self - self.trunc()
812    }
813
814    /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
815    ///
816    /// Note that this differs from the Rust implementation of `fract` which returns
817    /// `self - self.trunc()`.
818    ///
819    /// Note that this is fast but not precise for large numbers.
820    #[inline]
821    #[must_use]
822    pub fn fract_gl(self) -> Self {
823        self - self.floor()
824    }
825
826    /// Returns a vector containing `e^self` (the exponential function) for each element of
827    /// `self`.
828    #[inline]
829    #[must_use]
830    pub fn exp(self) -> Self {
831        Self::new(
832            math::exp(self.x),
833            math::exp(self.y),
834            math::exp(self.z),
835            math::exp(self.w),
836        )
837    }
838
839    /// Returns a vector containing `2^self` for each element of `self`.
840    #[inline]
841    #[must_use]
842    pub fn exp2(self) -> Self {
843        Self::new(
844            math::exp2(self.x),
845            math::exp2(self.y),
846            math::exp2(self.z),
847            math::exp2(self.w),
848        )
849    }
850
851    /// Returns a vector containing the natural logarithm for each element of `self`.
852    /// This returns NaN when the element is negative and negative infinity when the element is zero.
853    #[inline]
854    #[must_use]
855    pub fn ln(self) -> Self {
856        Self::new(
857            math::ln(self.x),
858            math::ln(self.y),
859            math::ln(self.z),
860            math::ln(self.w),
861        )
862    }
863
864    /// Returns a vector containing the base 2 logarithm for each element of `self`.
865    /// This returns NaN when the element is negative and negative infinity when the element is zero.
866    #[inline]
867    #[must_use]
868    pub fn log2(self) -> Self {
869        Self::new(
870            math::log2(self.x),
871            math::log2(self.y),
872            math::log2(self.z),
873            math::log2(self.w),
874        )
875    }
876
877    /// Returns a vector containing each element of `self` raised to the power of `n`.
878    #[inline]
879    #[must_use]
880    pub fn powf(self, n: f32) -> Self {
881        Self::new(
882            math::powf(self.x, n),
883            math::powf(self.y, n),
884            math::powf(self.z, n),
885            math::powf(self.w, n),
886        )
887    }
888
889    /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
890    #[inline]
891    #[must_use]
892    pub fn recip(self) -> Self {
893        Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
894    }
895
896    /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
897    ///
898    /// When `s` is `0.0`, the result will be equal to `self`.  When `s` is `1.0`, the result
899    /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
900    /// extrapolated.
901    #[doc(alias = "mix")]
902    #[inline]
903    #[must_use]
904    pub fn lerp(self, rhs: Self, s: f32) -> Self {
905        self * (1.0 - s) + rhs * s
906    }
907
908    /// Moves towards `rhs` based on the value `d`.
909    ///
910    /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
911    /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
912    #[inline]
913    #[must_use]
914    pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
915        let a = rhs - *self;
916        let len = a.length();
917        if len <= d || len <= 1e-4 {
918            return rhs;
919        }
920        *self + a / len * d
921    }
922
923    /// Calculates the midpoint between `self` and `rhs`.
924    ///
925    /// The midpoint is the average of, or halfway point between, two vectors.
926    /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
927    /// while being slightly cheaper to compute.
928    #[inline]
929    pub fn midpoint(self, rhs: Self) -> Self {
930        (self + rhs) * 0.5
931    }
932
933    /// Returns true if the absolute difference of all elements between `self` and `rhs` is
934    /// less than or equal to `max_abs_diff`.
935    ///
936    /// This can be used to compare if two vectors contain similar elements. It works best when
937    /// comparing with a known value. The `max_abs_diff` that should be used used depends on
938    /// the values being compared against.
939    ///
940    /// For more see
941    /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
942    #[inline]
943    #[must_use]
944    pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
945        self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
946    }
947
948    /// Returns a vector with a length no less than `min` and no more than `max`.
949    ///
950    /// # Panics
951    ///
952    /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
953    #[inline]
954    #[must_use]
955    pub fn clamp_length(self, min: f32, max: f32) -> Self {
956        glam_assert!(0.0 <= min);
957        glam_assert!(min <= max);
958        let length_sq = self.length_squared();
959        if length_sq < min * min {
960            min * (self / math::sqrt(length_sq))
961        } else if length_sq > max * max {
962            max * (self / math::sqrt(length_sq))
963        } else {
964            self
965        }
966    }
967
968    /// Returns a vector with a length no more than `max`.
969    ///
970    /// # Panics
971    ///
972    /// Will panic if `max` is negative when `glam_assert` is enabled.
973    #[inline]
974    #[must_use]
975    pub fn clamp_length_max(self, max: f32) -> Self {
976        glam_assert!(0.0 <= max);
977        let length_sq = self.length_squared();
978        if length_sq > max * max {
979            max * (self / math::sqrt(length_sq))
980        } else {
981            self
982        }
983    }
984
985    /// Returns a vector with a length no less than `min`.
986    ///
987    /// # Panics
988    ///
989    /// Will panic if `min` is negative when `glam_assert` is enabled.
990    #[inline]
991    #[must_use]
992    pub fn clamp_length_min(self, min: f32) -> Self {
993        glam_assert!(0.0 <= min);
994        let length_sq = self.length_squared();
995        if length_sq < min * min {
996            min * (self / math::sqrt(length_sq))
997        } else {
998            self
999        }
1000    }
1001
1002    /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
1003    /// error, yielding a more accurate result than an unfused multiply-add.
1004    ///
1005    /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
1006    /// architecture has a dedicated fma CPU instruction. However, this is not always true,
1007    /// and will be heavily dependant on designing algorithms with specific target hardware in
1008    /// mind.
1009    #[inline]
1010    #[must_use]
1011    pub fn mul_add(self, a: Self, b: Self) -> Self {
1012        #[cfg(target_feature = "fma")]
1013        unsafe {
1014            Self(_mm_fmadd_ps(self.0, a.0, b.0))
1015        }
1016        #[cfg(not(target_feature = "fma"))]
1017        Self::new(
1018            math::mul_add(self.x, a.x, b.x),
1019            math::mul_add(self.y, a.y, b.y),
1020            math::mul_add(self.z, a.z, b.z),
1021            math::mul_add(self.w, a.w, b.w),
1022        )
1023    }
1024
1025    /// Returns the reflection vector for a given incident vector `self` and surface normal
1026    /// `normal`.
1027    ///
1028    /// `normal` must be normalized.
1029    ///
1030    /// # Panics
1031    ///
1032    /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
1033    #[inline]
1034    #[must_use]
1035    pub fn reflect(self, normal: Self) -> Self {
1036        glam_assert!(normal.is_normalized());
1037        self - 2.0 * self.dot(normal) * normal
1038    }
1039
1040    /// Returns the refraction direction for a given incident vector `self`, surface normal
1041    /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
1042    /// a zero vector will be returned.
1043    ///
1044    /// `self` and `normal` must be normalized.
1045    ///
1046    /// # Panics
1047    ///
1048    /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
1049    #[inline]
1050    #[must_use]
1051    pub fn refract(self, normal: Self, eta: f32) -> Self {
1052        glam_assert!(self.is_normalized());
1053        glam_assert!(normal.is_normalized());
1054        let n_dot_i = normal.dot(self);
1055        let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1056        if k >= 0.0 {
1057            eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1058        } else {
1059            Self::ZERO
1060        }
1061    }
1062
1063    /// Casts all elements of `self` to `f64`.
1064    #[inline]
1065    #[must_use]
1066    pub fn as_dvec4(&self) -> crate::DVec4 {
1067        crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
1068    }
1069
1070    /// Casts all elements of `self` to `i8`.
1071    #[inline]
1072    #[must_use]
1073    pub fn as_i8vec4(&self) -> crate::I8Vec4 {
1074        crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
1075    }
1076
1077    /// Casts all elements of `self` to `u8`.
1078    #[inline]
1079    #[must_use]
1080    pub fn as_u8vec4(&self) -> crate::U8Vec4 {
1081        crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
1082    }
1083
1084    /// Casts all elements of `self` to `i16`.
1085    #[inline]
1086    #[must_use]
1087    pub fn as_i16vec4(&self) -> crate::I16Vec4 {
1088        crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
1089    }
1090
1091    /// Casts all elements of `self` to `u16`.
1092    #[inline]
1093    #[must_use]
1094    pub fn as_u16vec4(&self) -> crate::U16Vec4 {
1095        crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
1096    }
1097
1098    /// Casts all elements of `self` to `i32`.
1099    #[inline]
1100    #[must_use]
1101    pub fn as_ivec4(&self) -> crate::IVec4 {
1102        crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
1103    }
1104
1105    /// Casts all elements of `self` to `u32`.
1106    #[inline]
1107    #[must_use]
1108    pub fn as_uvec4(&self) -> crate::UVec4 {
1109        crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
1110    }
1111
1112    /// Casts all elements of `self` to `i64`.
1113    #[inline]
1114    #[must_use]
1115    pub fn as_i64vec4(&self) -> crate::I64Vec4 {
1116        crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
1117    }
1118
1119    /// Casts all elements of `self` to `u64`.
1120    #[inline]
1121    #[must_use]
1122    pub fn as_u64vec4(&self) -> crate::U64Vec4 {
1123        crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
1124    }
1125
1126    /// Casts all elements of `self` to `usize`.
1127    #[inline]
1128    #[must_use]
1129    pub fn as_usizevec4(&self) -> crate::USizeVec4 {
1130        crate::USizeVec4::new(
1131            self.x as usize,
1132            self.y as usize,
1133            self.z as usize,
1134            self.w as usize,
1135        )
1136    }
1137}
1138
1139impl Default for Vec4 {
1140    #[inline(always)]
1141    fn default() -> Self {
1142        Self::ZERO
1143    }
1144}
1145
1146impl PartialEq for Vec4 {
1147    #[inline]
1148    fn eq(&self, rhs: &Self) -> bool {
1149        self.cmpeq(*rhs).all()
1150    }
1151}
1152
1153impl Div for Vec4 {
1154    type Output = Self;
1155    #[inline]
1156    fn div(self, rhs: Self) -> Self {
1157        Self(unsafe { _mm_div_ps(self.0, rhs.0) })
1158    }
1159}
1160
1161impl Div<&Self> for Vec4 {
1162    type Output = Self;
1163    #[inline]
1164    fn div(self, rhs: &Self) -> Self {
1165        self.div(*rhs)
1166    }
1167}
1168
1169impl Div<&Vec4> for &Vec4 {
1170    type Output = Vec4;
1171    #[inline]
1172    fn div(self, rhs: &Vec4) -> Vec4 {
1173        (*self).div(*rhs)
1174    }
1175}
1176
1177impl Div<Vec4> for &Vec4 {
1178    type Output = Vec4;
1179    #[inline]
1180    fn div(self, rhs: Vec4) -> Vec4 {
1181        (*self).div(rhs)
1182    }
1183}
1184
1185impl DivAssign for Vec4 {
1186    #[inline]
1187    fn div_assign(&mut self, rhs: Self) {
1188        self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
1189    }
1190}
1191
1192impl DivAssign<&Self> for Vec4 {
1193    #[inline]
1194    fn div_assign(&mut self, rhs: &Self) {
1195        self.div_assign(*rhs);
1196    }
1197}
1198
1199impl Div<f32> for Vec4 {
1200    type Output = Self;
1201    #[inline]
1202    fn div(self, rhs: f32) -> Self {
1203        Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
1204    }
1205}
1206
1207impl Div<&f32> for Vec4 {
1208    type Output = Self;
1209    #[inline]
1210    fn div(self, rhs: &f32) -> Self {
1211        self.div(*rhs)
1212    }
1213}
1214
1215impl Div<&f32> for &Vec4 {
1216    type Output = Vec4;
1217    #[inline]
1218    fn div(self, rhs: &f32) -> Vec4 {
1219        (*self).div(*rhs)
1220    }
1221}
1222
1223impl Div<f32> for &Vec4 {
1224    type Output = Vec4;
1225    #[inline]
1226    fn div(self, rhs: f32) -> Vec4 {
1227        (*self).div(rhs)
1228    }
1229}
1230
1231impl DivAssign<f32> for Vec4 {
1232    #[inline]
1233    fn div_assign(&mut self, rhs: f32) {
1234        self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
1235    }
1236}
1237
1238impl DivAssign<&f32> for Vec4 {
1239    #[inline]
1240    fn div_assign(&mut self, rhs: &f32) {
1241        self.div_assign(*rhs);
1242    }
1243}
1244
1245impl Div<Vec4> for f32 {
1246    type Output = Vec4;
1247    #[inline]
1248    fn div(self, rhs: Vec4) -> Vec4 {
1249        Vec4(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
1250    }
1251}
1252
1253impl Div<&Vec4> for f32 {
1254    type Output = Vec4;
1255    #[inline]
1256    fn div(self, rhs: &Vec4) -> Vec4 {
1257        self.div(*rhs)
1258    }
1259}
1260
1261impl Div<&Vec4> for &f32 {
1262    type Output = Vec4;
1263    #[inline]
1264    fn div(self, rhs: &Vec4) -> Vec4 {
1265        (*self).div(*rhs)
1266    }
1267}
1268
1269impl Div<Vec4> for &f32 {
1270    type Output = Vec4;
1271    #[inline]
1272    fn div(self, rhs: Vec4) -> Vec4 {
1273        (*self).div(rhs)
1274    }
1275}
1276
1277impl Mul for Vec4 {
1278    type Output = Self;
1279    #[inline]
1280    fn mul(self, rhs: Self) -> Self {
1281        Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
1282    }
1283}
1284
1285impl Mul<&Self> for Vec4 {
1286    type Output = Self;
1287    #[inline]
1288    fn mul(self, rhs: &Self) -> Self {
1289        self.mul(*rhs)
1290    }
1291}
1292
1293impl Mul<&Vec4> for &Vec4 {
1294    type Output = Vec4;
1295    #[inline]
1296    fn mul(self, rhs: &Vec4) -> Vec4 {
1297        (*self).mul(*rhs)
1298    }
1299}
1300
1301impl Mul<Vec4> for &Vec4 {
1302    type Output = Vec4;
1303    #[inline]
1304    fn mul(self, rhs: Vec4) -> Vec4 {
1305        (*self).mul(rhs)
1306    }
1307}
1308
1309impl MulAssign for Vec4 {
1310    #[inline]
1311    fn mul_assign(&mut self, rhs: Self) {
1312        self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
1313    }
1314}
1315
1316impl MulAssign<&Self> for Vec4 {
1317    #[inline]
1318    fn mul_assign(&mut self, rhs: &Self) {
1319        self.mul_assign(*rhs);
1320    }
1321}
1322
1323impl Mul<f32> for Vec4 {
1324    type Output = Self;
1325    #[inline]
1326    fn mul(self, rhs: f32) -> Self {
1327        Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
1328    }
1329}
1330
1331impl Mul<&f32> for Vec4 {
1332    type Output = Self;
1333    #[inline]
1334    fn mul(self, rhs: &f32) -> Self {
1335        self.mul(*rhs)
1336    }
1337}
1338
1339impl Mul<&f32> for &Vec4 {
1340    type Output = Vec4;
1341    #[inline]
1342    fn mul(self, rhs: &f32) -> Vec4 {
1343        (*self).mul(*rhs)
1344    }
1345}
1346
1347impl Mul<f32> for &Vec4 {
1348    type Output = Vec4;
1349    #[inline]
1350    fn mul(self, rhs: f32) -> Vec4 {
1351        (*self).mul(rhs)
1352    }
1353}
1354
1355impl MulAssign<f32> for Vec4 {
1356    #[inline]
1357    fn mul_assign(&mut self, rhs: f32) {
1358        self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
1359    }
1360}
1361
1362impl MulAssign<&f32> for Vec4 {
1363    #[inline]
1364    fn mul_assign(&mut self, rhs: &f32) {
1365        self.mul_assign(*rhs);
1366    }
1367}
1368
1369impl Mul<Vec4> for f32 {
1370    type Output = Vec4;
1371    #[inline]
1372    fn mul(self, rhs: Vec4) -> Vec4 {
1373        Vec4(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
1374    }
1375}
1376
1377impl Mul<&Vec4> for f32 {
1378    type Output = Vec4;
1379    #[inline]
1380    fn mul(self, rhs: &Vec4) -> Vec4 {
1381        self.mul(*rhs)
1382    }
1383}
1384
1385impl Mul<&Vec4> for &f32 {
1386    type Output = Vec4;
1387    #[inline]
1388    fn mul(self, rhs: &Vec4) -> Vec4 {
1389        (*self).mul(*rhs)
1390    }
1391}
1392
1393impl Mul<Vec4> for &f32 {
1394    type Output = Vec4;
1395    #[inline]
1396    fn mul(self, rhs: Vec4) -> Vec4 {
1397        (*self).mul(rhs)
1398    }
1399}
1400
1401impl Add for Vec4 {
1402    type Output = Self;
1403    #[inline]
1404    fn add(self, rhs: Self) -> Self {
1405        Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1406    }
1407}
1408
1409impl Add<&Self> for Vec4 {
1410    type Output = Self;
1411    #[inline]
1412    fn add(self, rhs: &Self) -> Self {
1413        self.add(*rhs)
1414    }
1415}
1416
1417impl Add<&Vec4> for &Vec4 {
1418    type Output = Vec4;
1419    #[inline]
1420    fn add(self, rhs: &Vec4) -> Vec4 {
1421        (*self).add(*rhs)
1422    }
1423}
1424
1425impl Add<Vec4> for &Vec4 {
1426    type Output = Vec4;
1427    #[inline]
1428    fn add(self, rhs: Vec4) -> Vec4 {
1429        (*self).add(rhs)
1430    }
1431}
1432
1433impl AddAssign for Vec4 {
1434    #[inline]
1435    fn add_assign(&mut self, rhs: Self) {
1436        self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1437    }
1438}
1439
1440impl AddAssign<&Self> for Vec4 {
1441    #[inline]
1442    fn add_assign(&mut self, rhs: &Self) {
1443        self.add_assign(*rhs);
1444    }
1445}
1446
1447impl Add<f32> for Vec4 {
1448    type Output = Self;
1449    #[inline]
1450    fn add(self, rhs: f32) -> Self {
1451        Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1452    }
1453}
1454
1455impl Add<&f32> for Vec4 {
1456    type Output = Self;
1457    #[inline]
1458    fn add(self, rhs: &f32) -> Self {
1459        self.add(*rhs)
1460    }
1461}
1462
1463impl Add<&f32> for &Vec4 {
1464    type Output = Vec4;
1465    #[inline]
1466    fn add(self, rhs: &f32) -> Vec4 {
1467        (*self).add(*rhs)
1468    }
1469}
1470
1471impl Add<f32> for &Vec4 {
1472    type Output = Vec4;
1473    #[inline]
1474    fn add(self, rhs: f32) -> Vec4 {
1475        (*self).add(rhs)
1476    }
1477}
1478
1479impl AddAssign<f32> for Vec4 {
1480    #[inline]
1481    fn add_assign(&mut self, rhs: f32) {
1482        self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1483    }
1484}
1485
1486impl AddAssign<&f32> for Vec4 {
1487    #[inline]
1488    fn add_assign(&mut self, rhs: &f32) {
1489        self.add_assign(*rhs);
1490    }
1491}
1492
1493impl Add<Vec4> for f32 {
1494    type Output = Vec4;
1495    #[inline]
1496    fn add(self, rhs: Vec4) -> Vec4 {
1497        Vec4(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1498    }
1499}
1500
1501impl Add<&Vec4> for f32 {
1502    type Output = Vec4;
1503    #[inline]
1504    fn add(self, rhs: &Vec4) -> Vec4 {
1505        self.add(*rhs)
1506    }
1507}
1508
1509impl Add<&Vec4> for &f32 {
1510    type Output = Vec4;
1511    #[inline]
1512    fn add(self, rhs: &Vec4) -> Vec4 {
1513        (*self).add(*rhs)
1514    }
1515}
1516
1517impl Add<Vec4> for &f32 {
1518    type Output = Vec4;
1519    #[inline]
1520    fn add(self, rhs: Vec4) -> Vec4 {
1521        (*self).add(rhs)
1522    }
1523}
1524
1525impl Sub for Vec4 {
1526    type Output = Self;
1527    #[inline]
1528    fn sub(self, rhs: Self) -> Self {
1529        Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1530    }
1531}
1532
1533impl Sub<&Self> for Vec4 {
1534    type Output = Self;
1535    #[inline]
1536    fn sub(self, rhs: &Self) -> Self {
1537        self.sub(*rhs)
1538    }
1539}
1540
1541impl Sub<&Vec4> for &Vec4 {
1542    type Output = Vec4;
1543    #[inline]
1544    fn sub(self, rhs: &Vec4) -> Vec4 {
1545        (*self).sub(*rhs)
1546    }
1547}
1548
1549impl Sub<Vec4> for &Vec4 {
1550    type Output = Vec4;
1551    #[inline]
1552    fn sub(self, rhs: Vec4) -> Vec4 {
1553        (*self).sub(rhs)
1554    }
1555}
1556
1557impl SubAssign for Vec4 {
1558    #[inline]
1559    fn sub_assign(&mut self, rhs: Self) {
1560        self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1561    }
1562}
1563
1564impl SubAssign<&Self> for Vec4 {
1565    #[inline]
1566    fn sub_assign(&mut self, rhs: &Self) {
1567        self.sub_assign(*rhs);
1568    }
1569}
1570
1571impl Sub<f32> for Vec4 {
1572    type Output = Self;
1573    #[inline]
1574    fn sub(self, rhs: f32) -> Self {
1575        Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1576    }
1577}
1578
1579impl Sub<&f32> for Vec4 {
1580    type Output = Self;
1581    #[inline]
1582    fn sub(self, rhs: &f32) -> Self {
1583        self.sub(*rhs)
1584    }
1585}
1586
1587impl Sub<&f32> for &Vec4 {
1588    type Output = Vec4;
1589    #[inline]
1590    fn sub(self, rhs: &f32) -> Vec4 {
1591        (*self).sub(*rhs)
1592    }
1593}
1594
1595impl Sub<f32> for &Vec4 {
1596    type Output = Vec4;
1597    #[inline]
1598    fn sub(self, rhs: f32) -> Vec4 {
1599        (*self).sub(rhs)
1600    }
1601}
1602
1603impl SubAssign<f32> for Vec4 {
1604    #[inline]
1605    fn sub_assign(&mut self, rhs: f32) {
1606        self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1607    }
1608}
1609
1610impl SubAssign<&f32> for Vec4 {
1611    #[inline]
1612    fn sub_assign(&mut self, rhs: &f32) {
1613        self.sub_assign(*rhs);
1614    }
1615}
1616
1617impl Sub<Vec4> for f32 {
1618    type Output = Vec4;
1619    #[inline]
1620    fn sub(self, rhs: Vec4) -> Vec4 {
1621        Vec4(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1622    }
1623}
1624
1625impl Sub<&Vec4> for f32 {
1626    type Output = Vec4;
1627    #[inline]
1628    fn sub(self, rhs: &Vec4) -> Vec4 {
1629        self.sub(*rhs)
1630    }
1631}
1632
1633impl Sub<&Vec4> for &f32 {
1634    type Output = Vec4;
1635    #[inline]
1636    fn sub(self, rhs: &Vec4) -> Vec4 {
1637        (*self).sub(*rhs)
1638    }
1639}
1640
1641impl Sub<Vec4> for &f32 {
1642    type Output = Vec4;
1643    #[inline]
1644    fn sub(self, rhs: Vec4) -> Vec4 {
1645        (*self).sub(rhs)
1646    }
1647}
1648
1649impl Rem for Vec4 {
1650    type Output = Self;
1651    #[inline]
1652    fn rem(self, rhs: Self) -> Self {
1653        unsafe {
1654            let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1655            Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1656        }
1657    }
1658}
1659
1660impl Rem<&Self> for Vec4 {
1661    type Output = Self;
1662    #[inline]
1663    fn rem(self, rhs: &Self) -> Self {
1664        self.rem(*rhs)
1665    }
1666}
1667
1668impl Rem<&Vec4> for &Vec4 {
1669    type Output = Vec4;
1670    #[inline]
1671    fn rem(self, rhs: &Vec4) -> Vec4 {
1672        (*self).rem(*rhs)
1673    }
1674}
1675
1676impl Rem<Vec4> for &Vec4 {
1677    type Output = Vec4;
1678    #[inline]
1679    fn rem(self, rhs: Vec4) -> Vec4 {
1680        (*self).rem(rhs)
1681    }
1682}
1683
1684impl RemAssign for Vec4 {
1685    #[inline]
1686    fn rem_assign(&mut self, rhs: Self) {
1687        *self = self.rem(rhs);
1688    }
1689}
1690
1691impl RemAssign<&Self> for Vec4 {
1692    #[inline]
1693    fn rem_assign(&mut self, rhs: &Self) {
1694        self.rem_assign(*rhs);
1695    }
1696}
1697
1698impl Rem<f32> for Vec4 {
1699    type Output = Self;
1700    #[inline]
1701    fn rem(self, rhs: f32) -> Self {
1702        self.rem(Self::splat(rhs))
1703    }
1704}
1705
1706impl Rem<&f32> for Vec4 {
1707    type Output = Self;
1708    #[inline]
1709    fn rem(self, rhs: &f32) -> Self {
1710        self.rem(*rhs)
1711    }
1712}
1713
1714impl Rem<&f32> for &Vec4 {
1715    type Output = Vec4;
1716    #[inline]
1717    fn rem(self, rhs: &f32) -> Vec4 {
1718        (*self).rem(*rhs)
1719    }
1720}
1721
1722impl Rem<f32> for &Vec4 {
1723    type Output = Vec4;
1724    #[inline]
1725    fn rem(self, rhs: f32) -> Vec4 {
1726        (*self).rem(rhs)
1727    }
1728}
1729
1730impl RemAssign<f32> for Vec4 {
1731    #[inline]
1732    fn rem_assign(&mut self, rhs: f32) {
1733        *self = self.rem(Self::splat(rhs));
1734    }
1735}
1736
1737impl RemAssign<&f32> for Vec4 {
1738    #[inline]
1739    fn rem_assign(&mut self, rhs: &f32) {
1740        self.rem_assign(*rhs);
1741    }
1742}
1743
1744impl Rem<Vec4> for f32 {
1745    type Output = Vec4;
1746    #[inline]
1747    fn rem(self, rhs: Vec4) -> Vec4 {
1748        Vec4::splat(self).rem(rhs)
1749    }
1750}
1751
1752impl Rem<&Vec4> for f32 {
1753    type Output = Vec4;
1754    #[inline]
1755    fn rem(self, rhs: &Vec4) -> Vec4 {
1756        self.rem(*rhs)
1757    }
1758}
1759
1760impl Rem<&Vec4> for &f32 {
1761    type Output = Vec4;
1762    #[inline]
1763    fn rem(self, rhs: &Vec4) -> Vec4 {
1764        (*self).rem(*rhs)
1765    }
1766}
1767
1768impl Rem<Vec4> for &f32 {
1769    type Output = Vec4;
1770    #[inline]
1771    fn rem(self, rhs: Vec4) -> Vec4 {
1772        (*self).rem(rhs)
1773    }
1774}
1775
1776impl AsRef<[f32; 4]> for Vec4 {
1777    #[inline]
1778    fn as_ref(&self) -> &[f32; 4] {
1779        unsafe { &*(self as *const Self as *const [f32; 4]) }
1780    }
1781}
1782
1783impl AsMut<[f32; 4]> for Vec4 {
1784    #[inline]
1785    fn as_mut(&mut self) -> &mut [f32; 4] {
1786        unsafe { &mut *(self as *mut Self as *mut [f32; 4]) }
1787    }
1788}
1789
1790impl Sum for Vec4 {
1791    #[inline]
1792    fn sum<I>(iter: I) -> Self
1793    where
1794        I: Iterator<Item = Self>,
1795    {
1796        iter.fold(Self::ZERO, Self::add)
1797    }
1798}
1799
1800impl<'a> Sum<&'a Self> for Vec4 {
1801    #[inline]
1802    fn sum<I>(iter: I) -> Self
1803    where
1804        I: Iterator<Item = &'a Self>,
1805    {
1806        iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1807    }
1808}
1809
1810impl Product for Vec4 {
1811    #[inline]
1812    fn product<I>(iter: I) -> Self
1813    where
1814        I: Iterator<Item = Self>,
1815    {
1816        iter.fold(Self::ONE, Self::mul)
1817    }
1818}
1819
1820impl<'a> Product<&'a Self> for Vec4 {
1821    #[inline]
1822    fn product<I>(iter: I) -> Self
1823    where
1824        I: Iterator<Item = &'a Self>,
1825    {
1826        iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1827    }
1828}
1829
1830impl Neg for Vec4 {
1831    type Output = Self;
1832    #[inline]
1833    fn neg(self) -> Self {
1834        Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1835    }
1836}
1837
1838impl Neg for &Vec4 {
1839    type Output = Vec4;
1840    #[inline]
1841    fn neg(self) -> Vec4 {
1842        (*self).neg()
1843    }
1844}
1845
1846impl Index<usize> for Vec4 {
1847    type Output = f32;
1848    #[inline]
1849    fn index(&self, index: usize) -> &Self::Output {
1850        match index {
1851            0 => &self.x,
1852            1 => &self.y,
1853            2 => &self.z,
1854            3 => &self.w,
1855            _ => panic!("index out of bounds"),
1856        }
1857    }
1858}
1859
1860impl IndexMut<usize> for Vec4 {
1861    #[inline]
1862    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1863        match index {
1864            0 => &mut self.x,
1865            1 => &mut self.y,
1866            2 => &mut self.z,
1867            3 => &mut self.w,
1868            _ => panic!("index out of bounds"),
1869        }
1870    }
1871}
1872
1873impl fmt::Display for Vec4 {
1874    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1875        if let Some(p) = f.precision() {
1876            write!(
1877                f,
1878                "[{:.*}, {:.*}, {:.*}, {:.*}]",
1879                p, self.x, p, self.y, p, self.z, p, self.w
1880            )
1881        } else {
1882            write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1883        }
1884    }
1885}
1886
1887impl fmt::Debug for Vec4 {
1888    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1889        fmt.debug_tuple(stringify!(Vec4))
1890            .field(&self.x)
1891            .field(&self.y)
1892            .field(&self.z)
1893            .field(&self.w)
1894            .finish()
1895    }
1896}
1897
1898impl From<Vec4> for __m128 {
1899    #[inline(always)]
1900    fn from(t: Vec4) -> Self {
1901        t.0
1902    }
1903}
1904
1905impl From<__m128> for Vec4 {
1906    #[inline(always)]
1907    fn from(t: __m128) -> Self {
1908        Self(t)
1909    }
1910}
1911
1912impl From<[f32; 4]> for Vec4 {
1913    #[inline]
1914    fn from(a: [f32; 4]) -> Self {
1915        Self(unsafe { _mm_loadu_ps(a.as_ptr()) })
1916    }
1917}
1918
1919impl From<Vec4> for [f32; 4] {
1920    #[inline]
1921    fn from(v: Vec4) -> Self {
1922        use crate::Align16;
1923        use core::mem::MaybeUninit;
1924        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1925        unsafe {
1926            _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1927            out.assume_init().0
1928        }
1929    }
1930}
1931
1932impl From<(f32, f32, f32, f32)> for Vec4 {
1933    #[inline]
1934    fn from(t: (f32, f32, f32, f32)) -> Self {
1935        Self::new(t.0, t.1, t.2, t.3)
1936    }
1937}
1938
1939impl From<Vec4> for (f32, f32, f32, f32) {
1940    #[inline]
1941    fn from(v: Vec4) -> Self {
1942        (v.x, v.y, v.z, v.w)
1943    }
1944}
1945
1946impl From<(Vec3A, f32)> for Vec4 {
1947    #[inline]
1948    fn from((v, w): (Vec3A, f32)) -> Self {
1949        v.extend(w)
1950    }
1951}
1952
1953impl From<(f32, Vec3A)> for Vec4 {
1954    #[inline]
1955    fn from((x, v): (f32, Vec3A)) -> Self {
1956        Self::new(x, v.x, v.y, v.z)
1957    }
1958}
1959
1960impl From<(Vec3, f32)> for Vec4 {
1961    #[inline]
1962    fn from((v, w): (Vec3, f32)) -> Self {
1963        Self::new(v.x, v.y, v.z, w)
1964    }
1965}
1966
1967impl From<(f32, Vec3)> for Vec4 {
1968    #[inline]
1969    fn from((x, v): (f32, Vec3)) -> Self {
1970        Self::new(x, v.x, v.y, v.z)
1971    }
1972}
1973
1974impl From<(Vec2, f32, f32)> for Vec4 {
1975    #[inline]
1976    fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1977        Self::new(v.x, v.y, z, w)
1978    }
1979}
1980
1981impl From<(Vec2, Vec2)> for Vec4 {
1982    #[inline]
1983    fn from((v, u): (Vec2, Vec2)) -> Self {
1984        Self::new(v.x, v.y, u.x, u.y)
1985    }
1986}
1987
1988impl Deref for Vec4 {
1989    type Target = crate::deref::Vec4<f32>;
1990    #[inline]
1991    fn deref(&self) -> &Self::Target {
1992        unsafe { &*(self as *const Self).cast() }
1993    }
1994}
1995
1996impl DerefMut for Vec4 {
1997    #[inline]
1998    fn deref_mut(&mut self) -> &mut Self::Target {
1999        unsafe { &mut *(self as *mut Self).cast() }
2000    }
2001}
2002
2003impl From<BVec4> for Vec4 {
2004    #[inline]
2005    fn from(v: BVec4) -> Self {
2006        Self::new(
2007            f32::from(v.x),
2008            f32::from(v.y),
2009            f32::from(v.z),
2010            f32::from(v.w),
2011        )
2012    }
2013}
2014
2015#[cfg(not(feature = "scalar-math"))]
2016impl From<BVec4A> for Vec4 {
2017    #[inline]
2018    fn from(v: BVec4A) -> Self {
2019        let bool_array: [bool; 4] = v.into();
2020        Self::new(
2021            f32::from(bool_array[0]),
2022            f32::from(bool_array[1]),
2023            f32::from(bool_array[2]),
2024            f32::from(bool_array[3]),
2025        )
2026    }
2027}