glam/f32/sse2/
vec4.rs

1// Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3use crate::{f32::math, sse2::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[cfg(target_arch = "x86")]
10use core::arch::x86::*;
11#[cfg(target_arch = "x86_64")]
12use core::arch::x86_64::*;
13
14#[cfg(feature = "zerocopy")]
15use zerocopy_derive::*;
16
17#[repr(C)]
18union UnionCast {
19    a: [f32; 4],
20    v: Vec4,
21}
22
23/// Creates a 4-dimensional vector.
24#[inline(always)]
25#[must_use]
26pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
27    Vec4::new(x, y, z, w)
28}
29
30/// A 4-dimensional vector.
31///
32/// SIMD vector types are used for storage on supported platforms.
33///
34/// This type is 16 byte aligned.
35#[derive(Clone, Copy)]
36#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
37#[cfg_attr(
38    feature = "zerocopy",
39    derive(FromBytes, Immutable, IntoBytes, KnownLayout)
40)]
41#[repr(transparent)]
42pub struct Vec4(pub(crate) __m128);
43
44impl Vec4 {
45    /// All zeroes.
46    pub const ZERO: Self = Self::splat(0.0);
47
48    /// All ones.
49    pub const ONE: Self = Self::splat(1.0);
50
51    /// All negative ones.
52    pub const NEG_ONE: Self = Self::splat(-1.0);
53
54    /// All `f32::MIN`.
55    pub const MIN: Self = Self::splat(f32::MIN);
56
57    /// All `f32::MAX`.
58    pub const MAX: Self = Self::splat(f32::MAX);
59
60    /// All `f32::NAN`.
61    pub const NAN: Self = Self::splat(f32::NAN);
62
63    /// All `f32::INFINITY`.
64    pub const INFINITY: Self = Self::splat(f32::INFINITY);
65
66    /// All `f32::NEG_INFINITY`.
67    pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
68
69    /// A unit vector pointing along the positive X axis.
70    pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
71
72    /// A unit vector pointing along the positive Y axis.
73    pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
74
75    /// A unit vector pointing along the positive Z axis.
76    pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
77
78    /// A unit vector pointing along the positive W axis.
79    pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
80
81    /// A unit vector pointing along the negative X axis.
82    pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
83
84    /// A unit vector pointing along the negative Y axis.
85    pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
86
87    /// A unit vector pointing along the negative Z axis.
88    pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
89
90    /// A unit vector pointing along the negative W axis.
91    pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
92
93    /// The unit axes.
94    pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
95
96    /// Vec4 uses Rust Portable SIMD
97    pub const USES_CORE_SIMD: bool = false;
98    /// Vec4 uses Arm NEON
99    pub const USES_NEON: bool = false;
100    /// Vec4 uses scalar math
101    pub const USES_SCALAR_MATH: bool = false;
102    /// Vec4 uses Intel SSE2
103    pub const USES_SSE2: bool = true;
104    /// Vec4 uses WebAssembly 128-bit SIMD
105    pub const USES_WASM32_SIMD: bool = false;
106
107    /// Creates a new vector.
108    #[inline(always)]
109    #[must_use]
110    pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
111        unsafe { UnionCast { a: [x, y, z, w] }.v }
112    }
113
114    /// Creates a vector with all elements set to `v`.
115    #[inline]
116    #[must_use]
117    pub const fn splat(v: f32) -> Self {
118        unsafe { UnionCast { a: [v; 4] }.v }
119    }
120
121    /// Returns a vector containing each element of `self` modified by a mapping function `f`.
122    #[inline]
123    #[must_use]
124    pub fn map<F>(self, f: F) -> Self
125    where
126        F: Fn(f32) -> f32,
127    {
128        Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
129    }
130
131    /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
132    /// for each element of `self`.
133    ///
134    /// A true element in the mask uses the corresponding element from `if_true`, and false
135    /// uses the element from `if_false`.
136    #[inline]
137    #[must_use]
138    pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
139        Self(unsafe {
140            _mm_or_ps(
141                _mm_andnot_ps(mask.0, if_false.0),
142                _mm_and_ps(if_true.0, mask.0),
143            )
144        })
145    }
146
147    /// Creates a new vector from an array.
148    #[inline]
149    #[must_use]
150    pub const fn from_array(a: [f32; 4]) -> Self {
151        Self::new(a[0], a[1], a[2], a[3])
152    }
153
154    /// Converts `self` to `[x, y, z, w]`
155    #[inline]
156    #[must_use]
157    pub const fn to_array(&self) -> [f32; 4] {
158        unsafe { *(self as *const Self as *const [f32; 4]) }
159    }
160
161    /// Creates a vector from the first 4 values in `slice`.
162    ///
163    /// # Panics
164    ///
165    /// Panics if `slice` is less than 4 elements long.
166    #[inline]
167    #[must_use]
168    pub const fn from_slice(slice: &[f32]) -> Self {
169        assert!(slice.len() >= 4);
170        Self::new(slice[0], slice[1], slice[2], slice[3])
171    }
172
173    /// Writes the elements of `self` to the first 4 elements in `slice`.
174    ///
175    /// # Panics
176    ///
177    /// Panics if `slice` is less than 4 elements long.
178    #[inline]
179    pub fn write_to_slice(self, slice: &mut [f32]) {
180        assert!(slice.len() >= 4);
181        unsafe {
182            _mm_storeu_ps(slice.as_mut_ptr(), self.0);
183        }
184    }
185
186    /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
187    ///
188    /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
189    ///
190    /// To truncate to [`Vec3A`] use [`Vec3A::from_vec4()`].
191    #[inline]
192    #[must_use]
193    pub fn truncate(self) -> Vec3 {
194        use crate::swizzles::Vec4Swizzles;
195        self.xyz()
196    }
197
198    /// Creates a 4D vector from `self` with the given value of `x`.
199    #[inline]
200    #[must_use]
201    pub fn with_x(mut self, x: f32) -> Self {
202        self.x = x;
203        self
204    }
205
206    /// Creates a 4D vector from `self` with the given value of `y`.
207    #[inline]
208    #[must_use]
209    pub fn with_y(mut self, y: f32) -> Self {
210        self.y = y;
211        self
212    }
213
214    /// Creates a 4D vector from `self` with the given value of `z`.
215    #[inline]
216    #[must_use]
217    pub fn with_z(mut self, z: f32) -> Self {
218        self.z = z;
219        self
220    }
221
222    /// Creates a 4D vector from `self` with the given value of `w`.
223    #[inline]
224    #[must_use]
225    pub fn with_w(mut self, w: f32) -> Self {
226        self.w = w;
227        self
228    }
229
230    /// Computes the dot product of `self` and `rhs`.
231    #[inline]
232    #[must_use]
233    pub fn dot(self, rhs: Self) -> f32 {
234        unsafe { dot4(self.0, rhs.0) }
235    }
236
237    /// Returns a vector where every component is the dot product of `self` and `rhs`.
238    #[inline]
239    #[must_use]
240    pub fn dot_into_vec(self, rhs: Self) -> Self {
241        Self(unsafe { dot4_into_m128(self.0, rhs.0) })
242    }
243
244    /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
245    ///
246    /// In other words this computes `[min(x, rhs.x), min(self.y, rhs.y), ..]`.
247    ///
248    /// NaN propogation does not follow IEEE 754-2008 semantics for minNum and may differ on
249    /// different SIMD architectures.
250    #[inline]
251    #[must_use]
252    pub fn min(self, rhs: Self) -> Self {
253        Self(unsafe { _mm_min_ps(self.0, rhs.0) })
254    }
255
256    /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
257    ///
258    /// In other words this computes `[max(self.x, rhs.x), max(self.y, rhs.y), ..]`.
259    ///
260    /// NaN propogation does not follow IEEE 754-2008 semantics for maxNum and may differ on
261    /// different SIMD architectures.
262    #[inline]
263    #[must_use]
264    pub fn max(self, rhs: Self) -> Self {
265        Self(unsafe { _mm_max_ps(self.0, rhs.0) })
266    }
267
268    /// Component-wise clamping of values, similar to [`f32::clamp`].
269    ///
270    /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
271    ///
272    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
273    /// different SIMD architectures.
274    ///
275    /// # Panics
276    ///
277    /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
278    #[inline]
279    #[must_use]
280    pub fn clamp(self, min: Self, max: Self) -> Self {
281        glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
282        self.max(min).min(max)
283    }
284
285    /// Returns the horizontal minimum of `self`.
286    ///
287    /// In other words this computes `min(x, y, ..)`.
288    ///
289    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
290    /// different SIMD architectures.
291    #[inline]
292    #[must_use]
293    pub fn min_element(self) -> f32 {
294        unsafe {
295            let v = self.0;
296            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
297            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
298            _mm_cvtss_f32(v)
299        }
300    }
301
302    /// Returns the horizontal maximum of `self`.
303    ///
304    /// In other words this computes `max(x, y, ..)`.
305    ///
306    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
307    /// different SIMD architectures.
308    #[inline]
309    #[must_use]
310    pub fn max_element(self) -> f32 {
311        unsafe {
312            let v = self.0;
313            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
314            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
315            _mm_cvtss_f32(v)
316        }
317    }
318
319    /// Returns the index of the first minimum element of `self`.
320    #[doc(alias = "argmin")]
321    #[inline]
322    #[must_use]
323    pub fn min_position(self) -> usize {
324        let mut min = self.x;
325        let mut index = 0;
326        if self.y < min {
327            min = self.y;
328            index = 1;
329        }
330        if self.z < min {
331            min = self.z;
332            index = 2;
333        }
334        if self.w < min {
335            index = 3;
336        }
337        index
338    }
339
340    /// Returns the index of the first maximum element of `self`.
341    #[doc(alias = "argmax")]
342    #[inline]
343    #[must_use]
344    pub fn max_position(self) -> usize {
345        let mut max = self.x;
346        let mut index = 0;
347        if self.y > max {
348            max = self.y;
349            index = 1;
350        }
351        if self.z > max {
352            max = self.z;
353            index = 2;
354        }
355        if self.w > max {
356            index = 3;
357        }
358        index
359    }
360
361    /// Returns the sum of all elements of `self`.
362    ///
363    /// In other words, this computes `self.x + self.y + ..`.
364    #[inline]
365    #[must_use]
366    pub fn element_sum(self) -> f32 {
367        unsafe {
368            let v = self.0;
369            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
370            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
371            _mm_cvtss_f32(v)
372        }
373    }
374
375    /// Returns the product of all elements of `self`.
376    ///
377    /// In other words, this computes `self.x * self.y * ..`.
378    #[inline]
379    #[must_use]
380    pub fn element_product(self) -> f32 {
381        unsafe {
382            let v = self.0;
383            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
384            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
385            _mm_cvtss_f32(v)
386        }
387    }
388
389    /// Returns a vector mask containing the result of a `==` comparison for each element of
390    /// `self` and `rhs`.
391    ///
392    /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
393    /// elements.
394    #[inline]
395    #[must_use]
396    pub fn cmpeq(self, rhs: Self) -> BVec4A {
397        BVec4A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
398    }
399
400    /// Returns a vector mask containing the result of a `!=` comparison for each element of
401    /// `self` and `rhs`.
402    ///
403    /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
404    /// elements.
405    #[inline]
406    #[must_use]
407    pub fn cmpne(self, rhs: Self) -> BVec4A {
408        BVec4A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
409    }
410
411    /// Returns a vector mask containing the result of a `>=` comparison for each element of
412    /// `self` and `rhs`.
413    ///
414    /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
415    /// elements.
416    #[inline]
417    #[must_use]
418    pub fn cmpge(self, rhs: Self) -> BVec4A {
419        BVec4A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
420    }
421
422    /// Returns a vector mask containing the result of a `>` comparison for each element of
423    /// `self` and `rhs`.
424    ///
425    /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
426    /// elements.
427    #[inline]
428    #[must_use]
429    pub fn cmpgt(self, rhs: Self) -> BVec4A {
430        BVec4A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
431    }
432
433    /// Returns a vector mask containing the result of a `<=` comparison for each element of
434    /// `self` and `rhs`.
435    ///
436    /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
437    /// elements.
438    #[inline]
439    #[must_use]
440    pub fn cmple(self, rhs: Self) -> BVec4A {
441        BVec4A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
442    }
443
444    /// Returns a vector mask containing the result of a `<` comparison for each element of
445    /// `self` and `rhs`.
446    ///
447    /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
448    /// elements.
449    #[inline]
450    #[must_use]
451    pub fn cmplt(self, rhs: Self) -> BVec4A {
452        BVec4A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
453    }
454
455    /// Returns a vector containing the absolute value of each element of `self`.
456    #[inline]
457    #[must_use]
458    pub fn abs(self) -> Self {
459        Self(unsafe { crate::sse2::m128_abs(self.0) })
460    }
461
462    /// Returns a vector with elements representing the sign of `self`.
463    ///
464    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
465    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
466    /// - `NAN` if the number is `NAN`
467    #[inline]
468    #[must_use]
469    pub fn signum(self) -> Self {
470        let result = Self(unsafe { _mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0) });
471        let mask = self.is_nan_mask();
472        Self::select(mask, self, result)
473    }
474
475    /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
476    #[inline]
477    #[must_use]
478    pub fn copysign(self, rhs: Self) -> Self {
479        let mask = Self::splat(-0.0);
480        Self(unsafe { _mm_or_ps(_mm_and_ps(rhs.0, mask.0), _mm_andnot_ps(mask.0, self.0)) })
481    }
482
483    /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
484    ///
485    /// A negative element results in a `1` bit and a positive element in a `0` bit.  Element `x` goes
486    /// into the first lowest bit, element `y` into the second, etc.
487    ///
488    /// An element is negative if it has a negative sign, including -0.0, NaNs with negative sign
489    /// bit and negative infinity.
490    #[inline]
491    #[must_use]
492    pub fn is_negative_bitmask(self) -> u32 {
493        unsafe { _mm_movemask_ps(self.0) as u32 }
494    }
495
496    /// Returns `true` if, and only if, all elements are finite.  If any element is either
497    /// `NaN`, positive or negative infinity, this will return `false`.
498    #[inline]
499    #[must_use]
500    pub fn is_finite(self) -> bool {
501        self.is_finite_mask().all()
502    }
503
504    /// Performs `is_finite` on each element of self, returning a vector mask of the results.
505    ///
506    /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
507    #[inline]
508    #[must_use]
509    pub fn is_finite_mask(self) -> BVec4A {
510        BVec4A(unsafe { _mm_cmplt_ps(crate::sse2::m128_abs(self.0), Self::INFINITY.0) })
511    }
512
513    /// Returns `true` if any elements are `NaN`.
514    #[inline]
515    #[must_use]
516    pub fn is_nan(self) -> bool {
517        self.is_nan_mask().any()
518    }
519
520    /// Performs `is_nan` on each element of self, returning a vector mask of the results.
521    ///
522    /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
523    #[inline]
524    #[must_use]
525    pub fn is_nan_mask(self) -> BVec4A {
526        BVec4A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
527    }
528
529    /// Computes the length of `self`.
530    #[doc(alias = "magnitude")]
531    #[inline]
532    #[must_use]
533    pub fn length(self) -> f32 {
534        unsafe {
535            let dot = dot4_in_x(self.0, self.0);
536            _mm_cvtss_f32(_mm_sqrt_ps(dot))
537        }
538    }
539
540    /// Computes the squared length of `self`.
541    ///
542    /// This is faster than `length()` as it avoids a square root operation.
543    #[doc(alias = "magnitude2")]
544    #[inline]
545    #[must_use]
546    pub fn length_squared(self) -> f32 {
547        self.dot(self)
548    }
549
550    /// Computes `1.0 / length()`.
551    ///
552    /// For valid results, `self` must _not_ be of length zero.
553    #[inline]
554    #[must_use]
555    pub fn length_recip(self) -> f32 {
556        unsafe {
557            let dot = dot4_in_x(self.0, self.0);
558            _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
559        }
560    }
561
562    /// Computes the Euclidean distance between two points in space.
563    #[inline]
564    #[must_use]
565    pub fn distance(self, rhs: Self) -> f32 {
566        (self - rhs).length()
567    }
568
569    /// Compute the squared euclidean distance between two points in space.
570    #[inline]
571    #[must_use]
572    pub fn distance_squared(self, rhs: Self) -> f32 {
573        (self - rhs).length_squared()
574    }
575
576    /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
577    #[inline]
578    #[must_use]
579    pub fn div_euclid(self, rhs: Self) -> Self {
580        Self::new(
581            math::div_euclid(self.x, rhs.x),
582            math::div_euclid(self.y, rhs.y),
583            math::div_euclid(self.z, rhs.z),
584            math::div_euclid(self.w, rhs.w),
585        )
586    }
587
588    /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
589    ///
590    /// [Euclidean division]: f32::rem_euclid
591    #[inline]
592    #[must_use]
593    pub fn rem_euclid(self, rhs: Self) -> Self {
594        Self::new(
595            math::rem_euclid(self.x, rhs.x),
596            math::rem_euclid(self.y, rhs.y),
597            math::rem_euclid(self.z, rhs.z),
598            math::rem_euclid(self.w, rhs.w),
599        )
600    }
601
602    /// Returns `self` normalized to length 1.0.
603    ///
604    /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
605    ///
606    /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
607    ///
608    /// # Panics
609    ///
610    /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
611    #[inline]
612    #[must_use]
613    pub fn normalize(self) -> Self {
614        unsafe {
615            let length = _mm_sqrt_ps(dot4_into_m128(self.0, self.0));
616            #[allow(clippy::let_and_return)]
617            let normalized = Self(_mm_div_ps(self.0, length));
618            glam_assert!(normalized.is_finite());
619            normalized
620        }
621    }
622
623    /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
624    ///
625    /// In particular, if the input is zero (or very close to zero), or non-finite,
626    /// the result of this operation will be `None`.
627    ///
628    /// See also [`Self::normalize_or_zero()`].
629    #[inline]
630    #[must_use]
631    pub fn try_normalize(self) -> Option<Self> {
632        let rcp = self.length_recip();
633        if rcp.is_finite() && rcp > 0.0 {
634            Some(self * rcp)
635        } else {
636            None
637        }
638    }
639
640    /// Returns `self` normalized to length 1.0 if possible, else returns a
641    /// fallback value.
642    ///
643    /// In particular, if the input is zero (or very close to zero), or non-finite,
644    /// the result of this operation will be the fallback value.
645    ///
646    /// See also [`Self::try_normalize()`].
647    #[inline]
648    #[must_use]
649    pub fn normalize_or(self, fallback: Self) -> Self {
650        let rcp = self.length_recip();
651        if rcp.is_finite() && rcp > 0.0 {
652            self * rcp
653        } else {
654            fallback
655        }
656    }
657
658    /// Returns `self` normalized to length 1.0 if possible, else returns zero.
659    ///
660    /// In particular, if the input is zero (or very close to zero), or non-finite,
661    /// the result of this operation will be zero.
662    ///
663    /// See also [`Self::try_normalize()`].
664    #[inline]
665    #[must_use]
666    pub fn normalize_or_zero(self) -> Self {
667        self.normalize_or(Self::ZERO)
668    }
669
670    /// Returns `self` normalized to length 1.0 and the length of `self`.
671    ///
672    /// If `self` is zero length then `(Self::X, 0.0)` is returned.
673    #[inline]
674    #[must_use]
675    pub fn normalize_and_length(self) -> (Self, f32) {
676        let length = self.length();
677        let rcp = 1.0 / length;
678        if rcp.is_finite() && rcp > 0.0 {
679            (self * rcp, length)
680        } else {
681            (Self::X, 0.0)
682        }
683    }
684
685    /// Returns whether `self` is length `1.0` or not.
686    ///
687    /// Uses a precision threshold of approximately `1e-4`.
688    #[inline]
689    #[must_use]
690    pub fn is_normalized(self) -> bool {
691        math::abs(self.length_squared() - 1.0) <= 2e-4
692    }
693
694    /// Returns the vector projection of `self` onto `rhs`.
695    ///
696    /// `rhs` must be of non-zero length.
697    ///
698    /// # Panics
699    ///
700    /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
701    #[inline]
702    #[must_use]
703    pub fn project_onto(self, rhs: Self) -> Self {
704        let other_len_sq_rcp = rhs.dot(rhs).recip();
705        glam_assert!(other_len_sq_rcp.is_finite());
706        rhs * self.dot(rhs) * other_len_sq_rcp
707    }
708
709    /// Returns the vector rejection of `self` from `rhs`.
710    ///
711    /// The vector rejection is the vector perpendicular to the projection of `self` onto
712    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
713    ///
714    /// `rhs` must be of non-zero length.
715    ///
716    /// # Panics
717    ///
718    /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
719    #[doc(alias("plane"))]
720    #[inline]
721    #[must_use]
722    pub fn reject_from(self, rhs: Self) -> Self {
723        self - self.project_onto(rhs)
724    }
725
726    /// Returns the vector projection of `self` onto `rhs`.
727    ///
728    /// `rhs` must be normalized.
729    ///
730    /// # Panics
731    ///
732    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
733    #[inline]
734    #[must_use]
735    pub fn project_onto_normalized(self, rhs: Self) -> Self {
736        glam_assert!(rhs.is_normalized());
737        rhs * self.dot(rhs)
738    }
739
740    /// Returns the vector rejection of `self` from `rhs`.
741    ///
742    /// The vector rejection is the vector perpendicular to the projection of `self` onto
743    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
744    ///
745    /// `rhs` must be normalized.
746    ///
747    /// # Panics
748    ///
749    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
750    #[doc(alias("plane"))]
751    #[inline]
752    #[must_use]
753    pub fn reject_from_normalized(self, rhs: Self) -> Self {
754        self - self.project_onto_normalized(rhs)
755    }
756
757    /// Returns a vector containing the nearest integer to a number for each element of `self`.
758    /// Round half-way cases away from 0.0.
759    #[inline]
760    #[must_use]
761    pub fn round(self) -> Self {
762        Self(unsafe { m128_round(self.0) })
763    }
764
765    /// Returns a vector containing the largest integer less than or equal to a number for each
766    /// element of `self`.
767    #[inline]
768    #[must_use]
769    pub fn floor(self) -> Self {
770        Self(unsafe { m128_floor(self.0) })
771    }
772
773    /// Returns a vector containing the smallest integer greater than or equal to a number for
774    /// each element of `self`.
775    #[inline]
776    #[must_use]
777    pub fn ceil(self) -> Self {
778        Self(unsafe { m128_ceil(self.0) })
779    }
780
781    /// Returns a vector containing the integer part each element of `self`. This means numbers are
782    /// always truncated towards zero.
783    #[inline]
784    #[must_use]
785    pub fn trunc(self) -> Self {
786        Self(unsafe { m128_trunc(self.0) })
787    }
788
789    /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
790    ///
791    /// Note that this differs from the GLSL implementation of `fract` which returns
792    /// `self - self.floor()`.
793    ///
794    /// Note that this is fast but not precise for large numbers.
795    #[inline]
796    #[must_use]
797    pub fn fract(self) -> Self {
798        self - self.trunc()
799    }
800
801    /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
802    ///
803    /// Note that this differs from the Rust implementation of `fract` which returns
804    /// `self - self.trunc()`.
805    ///
806    /// Note that this is fast but not precise for large numbers.
807    #[inline]
808    #[must_use]
809    pub fn fract_gl(self) -> Self {
810        self - self.floor()
811    }
812
813    /// Returns a vector containing `e^self` (the exponential function) for each element of
814    /// `self`.
815    #[inline]
816    #[must_use]
817    pub fn exp(self) -> Self {
818        Self::new(
819            math::exp(self.x),
820            math::exp(self.y),
821            math::exp(self.z),
822            math::exp(self.w),
823        )
824    }
825
826    /// Returns a vector containing each element of `self` raised to the power of `n`.
827    #[inline]
828    #[must_use]
829    pub fn powf(self, n: f32) -> Self {
830        Self::new(
831            math::powf(self.x, n),
832            math::powf(self.y, n),
833            math::powf(self.z, n),
834            math::powf(self.w, n),
835        )
836    }
837
838    /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
839    #[inline]
840    #[must_use]
841    pub fn recip(self) -> Self {
842        Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
843    }
844
845    /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
846    ///
847    /// When `s` is `0.0`, the result will be equal to `self`.  When `s` is `1.0`, the result
848    /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
849    /// extrapolated.
850    #[doc(alias = "mix")]
851    #[inline]
852    #[must_use]
853    pub fn lerp(self, rhs: Self, s: f32) -> Self {
854        self * (1.0 - s) + rhs * s
855    }
856
857    /// Moves towards `rhs` based on the value `d`.
858    ///
859    /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
860    /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
861    #[inline]
862    #[must_use]
863    pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
864        let a = rhs - *self;
865        let len = a.length();
866        if len <= d || len <= 1e-4 {
867            return rhs;
868        }
869        *self + a / len * d
870    }
871
872    /// Calculates the midpoint between `self` and `rhs`.
873    ///
874    /// The midpoint is the average of, or halfway point between, two vectors.
875    /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
876    /// while being slightly cheaper to compute.
877    #[inline]
878    pub fn midpoint(self, rhs: Self) -> Self {
879        (self + rhs) * 0.5
880    }
881
882    /// Returns true if the absolute difference of all elements between `self` and `rhs` is
883    /// less than or equal to `max_abs_diff`.
884    ///
885    /// This can be used to compare if two vectors contain similar elements. It works best when
886    /// comparing with a known value. The `max_abs_diff` that should be used used depends on
887    /// the values being compared against.
888    ///
889    /// For more see
890    /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
891    #[inline]
892    #[must_use]
893    pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
894        self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
895    }
896
897    /// Returns a vector with a length no less than `min` and no more than `max`.
898    ///
899    /// # Panics
900    ///
901    /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
902    #[inline]
903    #[must_use]
904    pub fn clamp_length(self, min: f32, max: f32) -> Self {
905        glam_assert!(0.0 <= min);
906        glam_assert!(min <= max);
907        let length_sq = self.length_squared();
908        if length_sq < min * min {
909            min * (self / math::sqrt(length_sq))
910        } else if length_sq > max * max {
911            max * (self / math::sqrt(length_sq))
912        } else {
913            self
914        }
915    }
916
917    /// Returns a vector with a length no more than `max`.
918    ///
919    /// # Panics
920    ///
921    /// Will panic if `max` is negative when `glam_assert` is enabled.
922    #[inline]
923    #[must_use]
924    pub fn clamp_length_max(self, max: f32) -> Self {
925        glam_assert!(0.0 <= max);
926        let length_sq = self.length_squared();
927        if length_sq > max * max {
928            max * (self / math::sqrt(length_sq))
929        } else {
930            self
931        }
932    }
933
934    /// Returns a vector with a length no less than `min`.
935    ///
936    /// # Panics
937    ///
938    /// Will panic if `min` is negative when `glam_assert` is enabled.
939    #[inline]
940    #[must_use]
941    pub fn clamp_length_min(self, min: f32) -> Self {
942        glam_assert!(0.0 <= min);
943        let length_sq = self.length_squared();
944        if length_sq < min * min {
945            min * (self / math::sqrt(length_sq))
946        } else {
947            self
948        }
949    }
950
951    /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
952    /// error, yielding a more accurate result than an unfused multiply-add.
953    ///
954    /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
955    /// architecture has a dedicated fma CPU instruction. However, this is not always true,
956    /// and will be heavily dependant on designing algorithms with specific target hardware in
957    /// mind.
958    #[inline]
959    #[must_use]
960    pub fn mul_add(self, a: Self, b: Self) -> Self {
961        #[cfg(target_feature = "fma")]
962        unsafe {
963            Self(_mm_fmadd_ps(self.0, a.0, b.0))
964        }
965        #[cfg(not(target_feature = "fma"))]
966        Self::new(
967            math::mul_add(self.x, a.x, b.x),
968            math::mul_add(self.y, a.y, b.y),
969            math::mul_add(self.z, a.z, b.z),
970            math::mul_add(self.w, a.w, b.w),
971        )
972    }
973
974    /// Returns the reflection vector for a given incident vector `self` and surface normal
975    /// `normal`.
976    ///
977    /// `normal` must be normalized.
978    ///
979    /// # Panics
980    ///
981    /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
982    #[inline]
983    #[must_use]
984    pub fn reflect(self, normal: Self) -> Self {
985        glam_assert!(normal.is_normalized());
986        self - 2.0 * self.dot(normal) * normal
987    }
988
989    /// Returns the refraction direction for a given incident vector `self`, surface normal
990    /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
991    /// a zero vector will be returned.
992    ///
993    /// `self` and `normal` must be normalized.
994    ///
995    /// # Panics
996    ///
997    /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
998    #[inline]
999    #[must_use]
1000    pub fn refract(self, normal: Self, eta: f32) -> Self {
1001        glam_assert!(self.is_normalized());
1002        glam_assert!(normal.is_normalized());
1003        let n_dot_i = normal.dot(self);
1004        let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1005        if k >= 0.0 {
1006            eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1007        } else {
1008            Self::ZERO
1009        }
1010    }
1011
1012    /// Casts all elements of `self` to `f64`.
1013    #[inline]
1014    #[must_use]
1015    pub fn as_dvec4(&self) -> crate::DVec4 {
1016        crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
1017    }
1018
1019    /// Casts all elements of `self` to `i8`.
1020    #[inline]
1021    #[must_use]
1022    pub fn as_i8vec4(&self) -> crate::I8Vec4 {
1023        crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
1024    }
1025
1026    /// Casts all elements of `self` to `u8`.
1027    #[inline]
1028    #[must_use]
1029    pub fn as_u8vec4(&self) -> crate::U8Vec4 {
1030        crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
1031    }
1032
1033    /// Casts all elements of `self` to `i16`.
1034    #[inline]
1035    #[must_use]
1036    pub fn as_i16vec4(&self) -> crate::I16Vec4 {
1037        crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
1038    }
1039
1040    /// Casts all elements of `self` to `u16`.
1041    #[inline]
1042    #[must_use]
1043    pub fn as_u16vec4(&self) -> crate::U16Vec4 {
1044        crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
1045    }
1046
1047    /// Casts all elements of `self` to `i32`.
1048    #[inline]
1049    #[must_use]
1050    pub fn as_ivec4(&self) -> crate::IVec4 {
1051        crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
1052    }
1053
1054    /// Casts all elements of `self` to `u32`.
1055    #[inline]
1056    #[must_use]
1057    pub fn as_uvec4(&self) -> crate::UVec4 {
1058        crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
1059    }
1060
1061    /// Casts all elements of `self` to `i64`.
1062    #[inline]
1063    #[must_use]
1064    pub fn as_i64vec4(&self) -> crate::I64Vec4 {
1065        crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
1066    }
1067
1068    /// Casts all elements of `self` to `u64`.
1069    #[inline]
1070    #[must_use]
1071    pub fn as_u64vec4(&self) -> crate::U64Vec4 {
1072        crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
1073    }
1074
1075    /// Casts all elements of `self` to `usize`.
1076    #[inline]
1077    #[must_use]
1078    pub fn as_usizevec4(&self) -> crate::USizeVec4 {
1079        crate::USizeVec4::new(
1080            self.x as usize,
1081            self.y as usize,
1082            self.z as usize,
1083            self.w as usize,
1084        )
1085    }
1086}
1087
1088impl Default for Vec4 {
1089    #[inline(always)]
1090    fn default() -> Self {
1091        Self::ZERO
1092    }
1093}
1094
1095impl PartialEq for Vec4 {
1096    #[inline]
1097    fn eq(&self, rhs: &Self) -> bool {
1098        self.cmpeq(*rhs).all()
1099    }
1100}
1101
1102impl Div for Vec4 {
1103    type Output = Self;
1104    #[inline]
1105    fn div(self, rhs: Self) -> Self {
1106        Self(unsafe { _mm_div_ps(self.0, rhs.0) })
1107    }
1108}
1109
1110impl Div<&Self> for Vec4 {
1111    type Output = Self;
1112    #[inline]
1113    fn div(self, rhs: &Self) -> Self {
1114        self.div(*rhs)
1115    }
1116}
1117
1118impl Div<&Vec4> for &Vec4 {
1119    type Output = Vec4;
1120    #[inline]
1121    fn div(self, rhs: &Vec4) -> Vec4 {
1122        (*self).div(*rhs)
1123    }
1124}
1125
1126impl Div<Vec4> for &Vec4 {
1127    type Output = Vec4;
1128    #[inline]
1129    fn div(self, rhs: Vec4) -> Vec4 {
1130        (*self).div(rhs)
1131    }
1132}
1133
1134impl DivAssign for Vec4 {
1135    #[inline]
1136    fn div_assign(&mut self, rhs: Self) {
1137        self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
1138    }
1139}
1140
1141impl DivAssign<&Self> for Vec4 {
1142    #[inline]
1143    fn div_assign(&mut self, rhs: &Self) {
1144        self.div_assign(*rhs);
1145    }
1146}
1147
1148impl Div<f32> for Vec4 {
1149    type Output = Self;
1150    #[inline]
1151    fn div(self, rhs: f32) -> Self {
1152        Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
1153    }
1154}
1155
1156impl Div<&f32> for Vec4 {
1157    type Output = Self;
1158    #[inline]
1159    fn div(self, rhs: &f32) -> Self {
1160        self.div(*rhs)
1161    }
1162}
1163
1164impl Div<&f32> for &Vec4 {
1165    type Output = Vec4;
1166    #[inline]
1167    fn div(self, rhs: &f32) -> Vec4 {
1168        (*self).div(*rhs)
1169    }
1170}
1171
1172impl Div<f32> for &Vec4 {
1173    type Output = Vec4;
1174    #[inline]
1175    fn div(self, rhs: f32) -> Vec4 {
1176        (*self).div(rhs)
1177    }
1178}
1179
1180impl DivAssign<f32> for Vec4 {
1181    #[inline]
1182    fn div_assign(&mut self, rhs: f32) {
1183        self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
1184    }
1185}
1186
1187impl DivAssign<&f32> for Vec4 {
1188    #[inline]
1189    fn div_assign(&mut self, rhs: &f32) {
1190        self.div_assign(*rhs);
1191    }
1192}
1193
1194impl Div<Vec4> for f32 {
1195    type Output = Vec4;
1196    #[inline]
1197    fn div(self, rhs: Vec4) -> Vec4 {
1198        Vec4(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
1199    }
1200}
1201
1202impl Div<&Vec4> for f32 {
1203    type Output = Vec4;
1204    #[inline]
1205    fn div(self, rhs: &Vec4) -> Vec4 {
1206        self.div(*rhs)
1207    }
1208}
1209
1210impl Div<&Vec4> for &f32 {
1211    type Output = Vec4;
1212    #[inline]
1213    fn div(self, rhs: &Vec4) -> Vec4 {
1214        (*self).div(*rhs)
1215    }
1216}
1217
1218impl Div<Vec4> for &f32 {
1219    type Output = Vec4;
1220    #[inline]
1221    fn div(self, rhs: Vec4) -> Vec4 {
1222        (*self).div(rhs)
1223    }
1224}
1225
1226impl Mul for Vec4 {
1227    type Output = Self;
1228    #[inline]
1229    fn mul(self, rhs: Self) -> Self {
1230        Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
1231    }
1232}
1233
1234impl Mul<&Self> for Vec4 {
1235    type Output = Self;
1236    #[inline]
1237    fn mul(self, rhs: &Self) -> Self {
1238        self.mul(*rhs)
1239    }
1240}
1241
1242impl Mul<&Vec4> for &Vec4 {
1243    type Output = Vec4;
1244    #[inline]
1245    fn mul(self, rhs: &Vec4) -> Vec4 {
1246        (*self).mul(*rhs)
1247    }
1248}
1249
1250impl Mul<Vec4> for &Vec4 {
1251    type Output = Vec4;
1252    #[inline]
1253    fn mul(self, rhs: Vec4) -> Vec4 {
1254        (*self).mul(rhs)
1255    }
1256}
1257
1258impl MulAssign for Vec4 {
1259    #[inline]
1260    fn mul_assign(&mut self, rhs: Self) {
1261        self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
1262    }
1263}
1264
1265impl MulAssign<&Self> for Vec4 {
1266    #[inline]
1267    fn mul_assign(&mut self, rhs: &Self) {
1268        self.mul_assign(*rhs);
1269    }
1270}
1271
1272impl Mul<f32> for Vec4 {
1273    type Output = Self;
1274    #[inline]
1275    fn mul(self, rhs: f32) -> Self {
1276        Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
1277    }
1278}
1279
1280impl Mul<&f32> for Vec4 {
1281    type Output = Self;
1282    #[inline]
1283    fn mul(self, rhs: &f32) -> Self {
1284        self.mul(*rhs)
1285    }
1286}
1287
1288impl Mul<&f32> for &Vec4 {
1289    type Output = Vec4;
1290    #[inline]
1291    fn mul(self, rhs: &f32) -> Vec4 {
1292        (*self).mul(*rhs)
1293    }
1294}
1295
1296impl Mul<f32> for &Vec4 {
1297    type Output = Vec4;
1298    #[inline]
1299    fn mul(self, rhs: f32) -> Vec4 {
1300        (*self).mul(rhs)
1301    }
1302}
1303
1304impl MulAssign<f32> for Vec4 {
1305    #[inline]
1306    fn mul_assign(&mut self, rhs: f32) {
1307        self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
1308    }
1309}
1310
1311impl MulAssign<&f32> for Vec4 {
1312    #[inline]
1313    fn mul_assign(&mut self, rhs: &f32) {
1314        self.mul_assign(*rhs);
1315    }
1316}
1317
1318impl Mul<Vec4> for f32 {
1319    type Output = Vec4;
1320    #[inline]
1321    fn mul(self, rhs: Vec4) -> Vec4 {
1322        Vec4(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
1323    }
1324}
1325
1326impl Mul<&Vec4> for f32 {
1327    type Output = Vec4;
1328    #[inline]
1329    fn mul(self, rhs: &Vec4) -> Vec4 {
1330        self.mul(*rhs)
1331    }
1332}
1333
1334impl Mul<&Vec4> for &f32 {
1335    type Output = Vec4;
1336    #[inline]
1337    fn mul(self, rhs: &Vec4) -> Vec4 {
1338        (*self).mul(*rhs)
1339    }
1340}
1341
1342impl Mul<Vec4> for &f32 {
1343    type Output = Vec4;
1344    #[inline]
1345    fn mul(self, rhs: Vec4) -> Vec4 {
1346        (*self).mul(rhs)
1347    }
1348}
1349
1350impl Add for Vec4 {
1351    type Output = Self;
1352    #[inline]
1353    fn add(self, rhs: Self) -> Self {
1354        Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1355    }
1356}
1357
1358impl Add<&Self> for Vec4 {
1359    type Output = Self;
1360    #[inline]
1361    fn add(self, rhs: &Self) -> Self {
1362        self.add(*rhs)
1363    }
1364}
1365
1366impl Add<&Vec4> for &Vec4 {
1367    type Output = Vec4;
1368    #[inline]
1369    fn add(self, rhs: &Vec4) -> Vec4 {
1370        (*self).add(*rhs)
1371    }
1372}
1373
1374impl Add<Vec4> for &Vec4 {
1375    type Output = Vec4;
1376    #[inline]
1377    fn add(self, rhs: Vec4) -> Vec4 {
1378        (*self).add(rhs)
1379    }
1380}
1381
1382impl AddAssign for Vec4 {
1383    #[inline]
1384    fn add_assign(&mut self, rhs: Self) {
1385        self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1386    }
1387}
1388
1389impl AddAssign<&Self> for Vec4 {
1390    #[inline]
1391    fn add_assign(&mut self, rhs: &Self) {
1392        self.add_assign(*rhs);
1393    }
1394}
1395
1396impl Add<f32> for Vec4 {
1397    type Output = Self;
1398    #[inline]
1399    fn add(self, rhs: f32) -> Self {
1400        Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1401    }
1402}
1403
1404impl Add<&f32> for Vec4 {
1405    type Output = Self;
1406    #[inline]
1407    fn add(self, rhs: &f32) -> Self {
1408        self.add(*rhs)
1409    }
1410}
1411
1412impl Add<&f32> for &Vec4 {
1413    type Output = Vec4;
1414    #[inline]
1415    fn add(self, rhs: &f32) -> Vec4 {
1416        (*self).add(*rhs)
1417    }
1418}
1419
1420impl Add<f32> for &Vec4 {
1421    type Output = Vec4;
1422    #[inline]
1423    fn add(self, rhs: f32) -> Vec4 {
1424        (*self).add(rhs)
1425    }
1426}
1427
1428impl AddAssign<f32> for Vec4 {
1429    #[inline]
1430    fn add_assign(&mut self, rhs: f32) {
1431        self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1432    }
1433}
1434
1435impl AddAssign<&f32> for Vec4 {
1436    #[inline]
1437    fn add_assign(&mut self, rhs: &f32) {
1438        self.add_assign(*rhs);
1439    }
1440}
1441
1442impl Add<Vec4> for f32 {
1443    type Output = Vec4;
1444    #[inline]
1445    fn add(self, rhs: Vec4) -> Vec4 {
1446        Vec4(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1447    }
1448}
1449
1450impl Add<&Vec4> for f32 {
1451    type Output = Vec4;
1452    #[inline]
1453    fn add(self, rhs: &Vec4) -> Vec4 {
1454        self.add(*rhs)
1455    }
1456}
1457
1458impl Add<&Vec4> for &f32 {
1459    type Output = Vec4;
1460    #[inline]
1461    fn add(self, rhs: &Vec4) -> Vec4 {
1462        (*self).add(*rhs)
1463    }
1464}
1465
1466impl Add<Vec4> for &f32 {
1467    type Output = Vec4;
1468    #[inline]
1469    fn add(self, rhs: Vec4) -> Vec4 {
1470        (*self).add(rhs)
1471    }
1472}
1473
1474impl Sub for Vec4 {
1475    type Output = Self;
1476    #[inline]
1477    fn sub(self, rhs: Self) -> Self {
1478        Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1479    }
1480}
1481
1482impl Sub<&Self> for Vec4 {
1483    type Output = Self;
1484    #[inline]
1485    fn sub(self, rhs: &Self) -> Self {
1486        self.sub(*rhs)
1487    }
1488}
1489
1490impl Sub<&Vec4> for &Vec4 {
1491    type Output = Vec4;
1492    #[inline]
1493    fn sub(self, rhs: &Vec4) -> Vec4 {
1494        (*self).sub(*rhs)
1495    }
1496}
1497
1498impl Sub<Vec4> for &Vec4 {
1499    type Output = Vec4;
1500    #[inline]
1501    fn sub(self, rhs: Vec4) -> Vec4 {
1502        (*self).sub(rhs)
1503    }
1504}
1505
1506impl SubAssign for Vec4 {
1507    #[inline]
1508    fn sub_assign(&mut self, rhs: Self) {
1509        self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1510    }
1511}
1512
1513impl SubAssign<&Self> for Vec4 {
1514    #[inline]
1515    fn sub_assign(&mut self, rhs: &Self) {
1516        self.sub_assign(*rhs);
1517    }
1518}
1519
1520impl Sub<f32> for Vec4 {
1521    type Output = Self;
1522    #[inline]
1523    fn sub(self, rhs: f32) -> Self {
1524        Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1525    }
1526}
1527
1528impl Sub<&f32> for Vec4 {
1529    type Output = Self;
1530    #[inline]
1531    fn sub(self, rhs: &f32) -> Self {
1532        self.sub(*rhs)
1533    }
1534}
1535
1536impl Sub<&f32> for &Vec4 {
1537    type Output = Vec4;
1538    #[inline]
1539    fn sub(self, rhs: &f32) -> Vec4 {
1540        (*self).sub(*rhs)
1541    }
1542}
1543
1544impl Sub<f32> for &Vec4 {
1545    type Output = Vec4;
1546    #[inline]
1547    fn sub(self, rhs: f32) -> Vec4 {
1548        (*self).sub(rhs)
1549    }
1550}
1551
1552impl SubAssign<f32> for Vec4 {
1553    #[inline]
1554    fn sub_assign(&mut self, rhs: f32) {
1555        self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1556    }
1557}
1558
1559impl SubAssign<&f32> for Vec4 {
1560    #[inline]
1561    fn sub_assign(&mut self, rhs: &f32) {
1562        self.sub_assign(*rhs);
1563    }
1564}
1565
1566impl Sub<Vec4> for f32 {
1567    type Output = Vec4;
1568    #[inline]
1569    fn sub(self, rhs: Vec4) -> Vec4 {
1570        Vec4(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1571    }
1572}
1573
1574impl Sub<&Vec4> for f32 {
1575    type Output = Vec4;
1576    #[inline]
1577    fn sub(self, rhs: &Vec4) -> Vec4 {
1578        self.sub(*rhs)
1579    }
1580}
1581
1582impl Sub<&Vec4> for &f32 {
1583    type Output = Vec4;
1584    #[inline]
1585    fn sub(self, rhs: &Vec4) -> Vec4 {
1586        (*self).sub(*rhs)
1587    }
1588}
1589
1590impl Sub<Vec4> for &f32 {
1591    type Output = Vec4;
1592    #[inline]
1593    fn sub(self, rhs: Vec4) -> Vec4 {
1594        (*self).sub(rhs)
1595    }
1596}
1597
1598impl Rem for Vec4 {
1599    type Output = Self;
1600    #[inline]
1601    fn rem(self, rhs: Self) -> Self {
1602        unsafe {
1603            let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1604            Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1605        }
1606    }
1607}
1608
1609impl Rem<&Self> for Vec4 {
1610    type Output = Self;
1611    #[inline]
1612    fn rem(self, rhs: &Self) -> Self {
1613        self.rem(*rhs)
1614    }
1615}
1616
1617impl Rem<&Vec4> for &Vec4 {
1618    type Output = Vec4;
1619    #[inline]
1620    fn rem(self, rhs: &Vec4) -> Vec4 {
1621        (*self).rem(*rhs)
1622    }
1623}
1624
1625impl Rem<Vec4> for &Vec4 {
1626    type Output = Vec4;
1627    #[inline]
1628    fn rem(self, rhs: Vec4) -> Vec4 {
1629        (*self).rem(rhs)
1630    }
1631}
1632
1633impl RemAssign for Vec4 {
1634    #[inline]
1635    fn rem_assign(&mut self, rhs: Self) {
1636        *self = self.rem(rhs);
1637    }
1638}
1639
1640impl RemAssign<&Self> for Vec4 {
1641    #[inline]
1642    fn rem_assign(&mut self, rhs: &Self) {
1643        self.rem_assign(*rhs);
1644    }
1645}
1646
1647impl Rem<f32> for Vec4 {
1648    type Output = Self;
1649    #[inline]
1650    fn rem(self, rhs: f32) -> Self {
1651        self.rem(Self::splat(rhs))
1652    }
1653}
1654
1655impl Rem<&f32> for Vec4 {
1656    type Output = Self;
1657    #[inline]
1658    fn rem(self, rhs: &f32) -> Self {
1659        self.rem(*rhs)
1660    }
1661}
1662
1663impl Rem<&f32> for &Vec4 {
1664    type Output = Vec4;
1665    #[inline]
1666    fn rem(self, rhs: &f32) -> Vec4 {
1667        (*self).rem(*rhs)
1668    }
1669}
1670
1671impl Rem<f32> for &Vec4 {
1672    type Output = Vec4;
1673    #[inline]
1674    fn rem(self, rhs: f32) -> Vec4 {
1675        (*self).rem(rhs)
1676    }
1677}
1678
1679impl RemAssign<f32> for Vec4 {
1680    #[inline]
1681    fn rem_assign(&mut self, rhs: f32) {
1682        *self = self.rem(Self::splat(rhs));
1683    }
1684}
1685
1686impl RemAssign<&f32> for Vec4 {
1687    #[inline]
1688    fn rem_assign(&mut self, rhs: &f32) {
1689        self.rem_assign(*rhs);
1690    }
1691}
1692
1693impl Rem<Vec4> for f32 {
1694    type Output = Vec4;
1695    #[inline]
1696    fn rem(self, rhs: Vec4) -> Vec4 {
1697        Vec4::splat(self).rem(rhs)
1698    }
1699}
1700
1701impl Rem<&Vec4> for f32 {
1702    type Output = Vec4;
1703    #[inline]
1704    fn rem(self, rhs: &Vec4) -> Vec4 {
1705        self.rem(*rhs)
1706    }
1707}
1708
1709impl Rem<&Vec4> for &f32 {
1710    type Output = Vec4;
1711    #[inline]
1712    fn rem(self, rhs: &Vec4) -> Vec4 {
1713        (*self).rem(*rhs)
1714    }
1715}
1716
1717impl Rem<Vec4> for &f32 {
1718    type Output = Vec4;
1719    #[inline]
1720    fn rem(self, rhs: Vec4) -> Vec4 {
1721        (*self).rem(rhs)
1722    }
1723}
1724
1725impl AsRef<[f32; 4]> for Vec4 {
1726    #[inline]
1727    fn as_ref(&self) -> &[f32; 4] {
1728        unsafe { &*(self as *const Self as *const [f32; 4]) }
1729    }
1730}
1731
1732impl AsMut<[f32; 4]> for Vec4 {
1733    #[inline]
1734    fn as_mut(&mut self) -> &mut [f32; 4] {
1735        unsafe { &mut *(self as *mut Self as *mut [f32; 4]) }
1736    }
1737}
1738
1739impl Sum for Vec4 {
1740    #[inline]
1741    fn sum<I>(iter: I) -> Self
1742    where
1743        I: Iterator<Item = Self>,
1744    {
1745        iter.fold(Self::ZERO, Self::add)
1746    }
1747}
1748
1749impl<'a> Sum<&'a Self> for Vec4 {
1750    #[inline]
1751    fn sum<I>(iter: I) -> Self
1752    where
1753        I: Iterator<Item = &'a Self>,
1754    {
1755        iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1756    }
1757}
1758
1759impl Product for Vec4 {
1760    #[inline]
1761    fn product<I>(iter: I) -> Self
1762    where
1763        I: Iterator<Item = Self>,
1764    {
1765        iter.fold(Self::ONE, Self::mul)
1766    }
1767}
1768
1769impl<'a> Product<&'a Self> for Vec4 {
1770    #[inline]
1771    fn product<I>(iter: I) -> Self
1772    where
1773        I: Iterator<Item = &'a Self>,
1774    {
1775        iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1776    }
1777}
1778
1779impl Neg for Vec4 {
1780    type Output = Self;
1781    #[inline]
1782    fn neg(self) -> Self {
1783        Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1784    }
1785}
1786
1787impl Neg for &Vec4 {
1788    type Output = Vec4;
1789    #[inline]
1790    fn neg(self) -> Vec4 {
1791        (*self).neg()
1792    }
1793}
1794
1795impl Index<usize> for Vec4 {
1796    type Output = f32;
1797    #[inline]
1798    fn index(&self, index: usize) -> &Self::Output {
1799        match index {
1800            0 => &self.x,
1801            1 => &self.y,
1802            2 => &self.z,
1803            3 => &self.w,
1804            _ => panic!("index out of bounds"),
1805        }
1806    }
1807}
1808
1809impl IndexMut<usize> for Vec4 {
1810    #[inline]
1811    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1812        match index {
1813            0 => &mut self.x,
1814            1 => &mut self.y,
1815            2 => &mut self.z,
1816            3 => &mut self.w,
1817            _ => panic!("index out of bounds"),
1818        }
1819    }
1820}
1821
1822impl fmt::Display for Vec4 {
1823    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1824        if let Some(p) = f.precision() {
1825            write!(
1826                f,
1827                "[{:.*}, {:.*}, {:.*}, {:.*}]",
1828                p, self.x, p, self.y, p, self.z, p, self.w
1829            )
1830        } else {
1831            write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1832        }
1833    }
1834}
1835
1836impl fmt::Debug for Vec4 {
1837    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1838        fmt.debug_tuple(stringify!(Vec4))
1839            .field(&self.x)
1840            .field(&self.y)
1841            .field(&self.z)
1842            .field(&self.w)
1843            .finish()
1844    }
1845}
1846
1847impl From<Vec4> for __m128 {
1848    #[inline(always)]
1849    fn from(t: Vec4) -> Self {
1850        t.0
1851    }
1852}
1853
1854impl From<__m128> for Vec4 {
1855    #[inline(always)]
1856    fn from(t: __m128) -> Self {
1857        Self(t)
1858    }
1859}
1860
1861impl From<[f32; 4]> for Vec4 {
1862    #[inline]
1863    fn from(a: [f32; 4]) -> Self {
1864        Self(unsafe { _mm_loadu_ps(a.as_ptr()) })
1865    }
1866}
1867
1868impl From<Vec4> for [f32; 4] {
1869    #[inline]
1870    fn from(v: Vec4) -> Self {
1871        use crate::Align16;
1872        use core::mem::MaybeUninit;
1873        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1874        unsafe {
1875            _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1876            out.assume_init().0
1877        }
1878    }
1879}
1880
1881impl From<(f32, f32, f32, f32)> for Vec4 {
1882    #[inline]
1883    fn from(t: (f32, f32, f32, f32)) -> Self {
1884        Self::new(t.0, t.1, t.2, t.3)
1885    }
1886}
1887
1888impl From<Vec4> for (f32, f32, f32, f32) {
1889    #[inline]
1890    fn from(v: Vec4) -> Self {
1891        (v.x, v.y, v.z, v.w)
1892    }
1893}
1894
1895impl From<(Vec3A, f32)> for Vec4 {
1896    #[inline]
1897    fn from((v, w): (Vec3A, f32)) -> Self {
1898        v.extend(w)
1899    }
1900}
1901
1902impl From<(f32, Vec3A)> for Vec4 {
1903    #[inline]
1904    fn from((x, v): (f32, Vec3A)) -> Self {
1905        Self::new(x, v.x, v.y, v.z)
1906    }
1907}
1908
1909impl From<(Vec3, f32)> for Vec4 {
1910    #[inline]
1911    fn from((v, w): (Vec3, f32)) -> Self {
1912        Self::new(v.x, v.y, v.z, w)
1913    }
1914}
1915
1916impl From<(f32, Vec3)> for Vec4 {
1917    #[inline]
1918    fn from((x, v): (f32, Vec3)) -> Self {
1919        Self::new(x, v.x, v.y, v.z)
1920    }
1921}
1922
1923impl From<(Vec2, f32, f32)> for Vec4 {
1924    #[inline]
1925    fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1926        Self::new(v.x, v.y, z, w)
1927    }
1928}
1929
1930impl From<(Vec2, Vec2)> for Vec4 {
1931    #[inline]
1932    fn from((v, u): (Vec2, Vec2)) -> Self {
1933        Self::new(v.x, v.y, u.x, u.y)
1934    }
1935}
1936
1937impl Deref for Vec4 {
1938    type Target = crate::deref::Vec4<f32>;
1939    #[inline]
1940    fn deref(&self) -> &Self::Target {
1941        unsafe { &*(self as *const Self).cast() }
1942    }
1943}
1944
1945impl DerefMut for Vec4 {
1946    #[inline]
1947    fn deref_mut(&mut self) -> &mut Self::Target {
1948        unsafe { &mut *(self as *mut Self).cast() }
1949    }
1950}
1951
1952impl From<BVec4> for Vec4 {
1953    #[inline]
1954    fn from(v: BVec4) -> Self {
1955        Self::new(
1956            f32::from(v.x),
1957            f32::from(v.y),
1958            f32::from(v.z),
1959            f32::from(v.w),
1960        )
1961    }
1962}
1963
1964#[cfg(not(feature = "scalar-math"))]
1965impl From<BVec4A> for Vec4 {
1966    #[inline]
1967    fn from(v: BVec4A) -> Self {
1968        let bool_array: [bool; 4] = v.into();
1969        Self::new(
1970            f32::from(bool_array[0]),
1971            f32::from(bool_array[1]),
1972            f32::from(bool_array[2]),
1973            f32::from(bool_array[3]),
1974        )
1975    }
1976}