glam/f32/sse2/
vec4.rs

1// Generated from vec.rs.tera template. Edit the template, not the generated file.
2
3use crate::{f32::math, sse2::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[cfg(target_arch = "x86")]
10use core::arch::x86::*;
11#[cfg(target_arch = "x86_64")]
12use core::arch::x86_64::*;
13
14#[repr(C)]
15union UnionCast {
16    a: [f32; 4],
17    v: Vec4,
18}
19
20/// Creates a 4-dimensional vector.
21#[inline(always)]
22#[must_use]
23pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
24    Vec4::new(x, y, z, w)
25}
26
27/// A 4-dimensional vector.
28///
29/// SIMD vector types are used for storage on supported platforms.
30///
31/// This type is 16 byte aligned.
32#[derive(Clone, Copy)]
33#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
34#[repr(transparent)]
35pub struct Vec4(pub(crate) __m128);
36
37impl Vec4 {
38    /// All zeroes.
39    pub const ZERO: Self = Self::splat(0.0);
40
41    /// All ones.
42    pub const ONE: Self = Self::splat(1.0);
43
44    /// All negative ones.
45    pub const NEG_ONE: Self = Self::splat(-1.0);
46
47    /// All `f32::MIN`.
48    pub const MIN: Self = Self::splat(f32::MIN);
49
50    /// All `f32::MAX`.
51    pub const MAX: Self = Self::splat(f32::MAX);
52
53    /// All `f32::NAN`.
54    pub const NAN: Self = Self::splat(f32::NAN);
55
56    /// All `f32::INFINITY`.
57    pub const INFINITY: Self = Self::splat(f32::INFINITY);
58
59    /// All `f32::NEG_INFINITY`.
60    pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
61
62    /// A unit vector pointing along the positive X axis.
63    pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
64
65    /// A unit vector pointing along the positive Y axis.
66    pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
67
68    /// A unit vector pointing along the positive Z axis.
69    pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
70
71    /// A unit vector pointing along the positive W axis.
72    pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
73
74    /// A unit vector pointing along the negative X axis.
75    pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
76
77    /// A unit vector pointing along the negative Y axis.
78    pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
79
80    /// A unit vector pointing along the negative Z axis.
81    pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
82
83    /// A unit vector pointing along the negative W axis.
84    pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
85
86    /// The unit axes.
87    pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
88
89    /// Vec4 uses Rust Portable SIMD
90    pub const USES_CORE_SIMD: bool = false;
91    /// Vec4 uses Arm NEON
92    pub const USES_NEON: bool = false;
93    /// Vec4 uses scalar math
94    pub const USES_SCALAR_MATH: bool = false;
95    /// Vec4 uses Intel SSE2
96    pub const USES_SSE2: bool = true;
97    /// Vec4 uses WebAssembly 128-bit SIMD
98    pub const USES_WASM32_SIMD: bool = false;
99
100    /// Creates a new vector.
101    #[inline(always)]
102    #[must_use]
103    pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
104        unsafe { UnionCast { a: [x, y, z, w] }.v }
105    }
106
107    /// Creates a vector with all elements set to `v`.
108    #[inline]
109    #[must_use]
110    pub const fn splat(v: f32) -> Self {
111        unsafe { UnionCast { a: [v; 4] }.v }
112    }
113
114    /// Returns a vector containing each element of `self` modified by a mapping function `f`.
115    #[inline]
116    #[must_use]
117    pub fn map<F>(self, f: F) -> Self
118    where
119        F: Fn(f32) -> f32,
120    {
121        Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
122    }
123
124    /// Creates a vector from the elements in `if_true` and `if_false`, selecting which to use
125    /// for each element of `self`.
126    ///
127    /// A true element in the mask uses the corresponding element from `if_true`, and false
128    /// uses the element from `if_false`.
129    #[inline]
130    #[must_use]
131    pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
132        Self(unsafe {
133            _mm_or_ps(
134                _mm_andnot_ps(mask.0, if_false.0),
135                _mm_and_ps(if_true.0, mask.0),
136            )
137        })
138    }
139
140    /// Creates a new vector from an array.
141    #[inline]
142    #[must_use]
143    pub const fn from_array(a: [f32; 4]) -> Self {
144        Self::new(a[0], a[1], a[2], a[3])
145    }
146
147    /// Converts `self` to `[x, y, z, w]`
148    #[inline]
149    #[must_use]
150    pub const fn to_array(&self) -> [f32; 4] {
151        unsafe { *(self as *const Self as *const [f32; 4]) }
152    }
153
154    /// Creates a vector from the first 4 values in `slice`.
155    ///
156    /// # Panics
157    ///
158    /// Panics if `slice` is less than 4 elements long.
159    #[inline]
160    #[must_use]
161    pub const fn from_slice(slice: &[f32]) -> Self {
162        assert!(slice.len() >= 4);
163        Self::new(slice[0], slice[1], slice[2], slice[3])
164    }
165
166    /// Writes the elements of `self` to the first 4 elements in `slice`.
167    ///
168    /// # Panics
169    ///
170    /// Panics if `slice` is less than 4 elements long.
171    #[inline]
172    pub fn write_to_slice(self, slice: &mut [f32]) {
173        assert!(slice.len() >= 4);
174        unsafe {
175            _mm_storeu_ps(slice.as_mut_ptr(), self.0);
176        }
177    }
178
179    /// Creates a 3D vector from the `x`, `y` and `z` elements of `self`, discarding `w`.
180    ///
181    /// Truncation to [`Vec3`] may also be performed by using [`self.xyz()`][crate::swizzles::Vec4Swizzles::xyz()].
182    ///
183    /// To truncate to [`Vec3A`] use [`Vec3A::from_vec4()`].
184    #[inline]
185    #[must_use]
186    pub fn truncate(self) -> Vec3 {
187        use crate::swizzles::Vec4Swizzles;
188        self.xyz()
189    }
190
191    /// Creates a 4D vector from `self` with the given value of `x`.
192    #[inline]
193    #[must_use]
194    pub fn with_x(mut self, x: f32) -> Self {
195        self.x = x;
196        self
197    }
198
199    /// Creates a 4D vector from `self` with the given value of `y`.
200    #[inline]
201    #[must_use]
202    pub fn with_y(mut self, y: f32) -> Self {
203        self.y = y;
204        self
205    }
206
207    /// Creates a 4D vector from `self` with the given value of `z`.
208    #[inline]
209    #[must_use]
210    pub fn with_z(mut self, z: f32) -> Self {
211        self.z = z;
212        self
213    }
214
215    /// Creates a 4D vector from `self` with the given value of `w`.
216    #[inline]
217    #[must_use]
218    pub fn with_w(mut self, w: f32) -> Self {
219        self.w = w;
220        self
221    }
222
223    /// Computes the dot product of `self` and `rhs`.
224    #[inline]
225    #[must_use]
226    pub fn dot(self, rhs: Self) -> f32 {
227        unsafe { dot4(self.0, rhs.0) }
228    }
229
230    /// Returns a vector where every component is the dot product of `self` and `rhs`.
231    #[inline]
232    #[must_use]
233    pub fn dot_into_vec(self, rhs: Self) -> Self {
234        Self(unsafe { dot4_into_m128(self.0, rhs.0) })
235    }
236
237    /// Returns a vector containing the minimum values for each element of `self` and `rhs`.
238    ///
239    /// In other words this computes `[min(x, rhs.x), min(self.y, rhs.y), ..]`.
240    ///
241    /// NaN propogation does not follow IEEE 754-2008 semantics for minNum and may differ on
242    /// different SIMD architectures.
243    #[inline]
244    #[must_use]
245    pub fn min(self, rhs: Self) -> Self {
246        Self(unsafe { _mm_min_ps(self.0, rhs.0) })
247    }
248
249    /// Returns a vector containing the maximum values for each element of `self` and `rhs`.
250    ///
251    /// In other words this computes `[max(self.x, rhs.x), max(self.y, rhs.y), ..]`.
252    ///
253    /// NaN propogation does not follow IEEE 754-2008 semantics for maxNum and may differ on
254    /// different SIMD architectures.
255    #[inline]
256    #[must_use]
257    pub fn max(self, rhs: Self) -> Self {
258        Self(unsafe { _mm_max_ps(self.0, rhs.0) })
259    }
260
261    /// Component-wise clamping of values, similar to [`f32::clamp`].
262    ///
263    /// Each element in `min` must be less-or-equal to the corresponding element in `max`.
264    ///
265    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
266    /// different SIMD architectures.
267    ///
268    /// # Panics
269    ///
270    /// Will panic if `min` is greater than `max` when `glam_assert` is enabled.
271    #[inline]
272    #[must_use]
273    pub fn clamp(self, min: Self, max: Self) -> Self {
274        glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
275        self.max(min).min(max)
276    }
277
278    /// Returns the horizontal minimum of `self`.
279    ///
280    /// In other words this computes `min(x, y, ..)`.
281    ///
282    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
283    /// different SIMD architectures.
284    #[inline]
285    #[must_use]
286    pub fn min_element(self) -> f32 {
287        unsafe {
288            let v = self.0;
289            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
290            let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
291            _mm_cvtss_f32(v)
292        }
293    }
294
295    /// Returns the horizontal maximum of `self`.
296    ///
297    /// In other words this computes `max(x, y, ..)`.
298    ///
299    /// NaN propogation does not follow IEEE 754-2008 semantics and may differ on
300    /// different SIMD architectures.
301    #[inline]
302    #[must_use]
303    pub fn max_element(self) -> f32 {
304        unsafe {
305            let v = self.0;
306            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
307            let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
308            _mm_cvtss_f32(v)
309        }
310    }
311
312    /// Returns the index of the first minimum element of `self`.
313    #[doc(alias = "argmin")]
314    #[inline]
315    #[must_use]
316    pub fn min_position(self) -> usize {
317        let mut min = self.x;
318        let mut index = 0;
319        if self.y < min {
320            min = self.y;
321            index = 1;
322        }
323        if self.z < min {
324            min = self.z;
325            index = 2;
326        }
327        if self.w < min {
328            index = 3;
329        }
330        index
331    }
332
333    /// Returns the index of the first maximum element of `self`.
334    #[doc(alias = "argmax")]
335    #[inline]
336    #[must_use]
337    pub fn max_position(self) -> usize {
338        let mut max = self.x;
339        let mut index = 0;
340        if self.y > max {
341            max = self.y;
342            index = 1;
343        }
344        if self.z > max {
345            max = self.z;
346            index = 2;
347        }
348        if self.w > max {
349            index = 3;
350        }
351        index
352    }
353
354    /// Returns the sum of all elements of `self`.
355    ///
356    /// In other words, this computes `self.x + self.y + ..`.
357    #[inline]
358    #[must_use]
359    pub fn element_sum(self) -> f32 {
360        unsafe {
361            let v = self.0;
362            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
363            let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
364            _mm_cvtss_f32(v)
365        }
366    }
367
368    /// Returns the product of all elements of `self`.
369    ///
370    /// In other words, this computes `self.x * self.y * ..`.
371    #[inline]
372    #[must_use]
373    pub fn element_product(self) -> f32 {
374        unsafe {
375            let v = self.0;
376            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
377            let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
378            _mm_cvtss_f32(v)
379        }
380    }
381
382    /// Returns a vector mask containing the result of a `==` comparison for each element of
383    /// `self` and `rhs`.
384    ///
385    /// In other words, this computes `[self.x == rhs.x, self.y == rhs.y, ..]` for all
386    /// elements.
387    #[inline]
388    #[must_use]
389    pub fn cmpeq(self, rhs: Self) -> BVec4A {
390        BVec4A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
391    }
392
393    /// Returns a vector mask containing the result of a `!=` comparison for each element of
394    /// `self` and `rhs`.
395    ///
396    /// In other words this computes `[self.x != rhs.x, self.y != rhs.y, ..]` for all
397    /// elements.
398    #[inline]
399    #[must_use]
400    pub fn cmpne(self, rhs: Self) -> BVec4A {
401        BVec4A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
402    }
403
404    /// Returns a vector mask containing the result of a `>=` comparison for each element of
405    /// `self` and `rhs`.
406    ///
407    /// In other words this computes `[self.x >= rhs.x, self.y >= rhs.y, ..]` for all
408    /// elements.
409    #[inline]
410    #[must_use]
411    pub fn cmpge(self, rhs: Self) -> BVec4A {
412        BVec4A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
413    }
414
415    /// Returns a vector mask containing the result of a `>` comparison for each element of
416    /// `self` and `rhs`.
417    ///
418    /// In other words this computes `[self.x > rhs.x, self.y > rhs.y, ..]` for all
419    /// elements.
420    #[inline]
421    #[must_use]
422    pub fn cmpgt(self, rhs: Self) -> BVec4A {
423        BVec4A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
424    }
425
426    /// Returns a vector mask containing the result of a `<=` comparison for each element of
427    /// `self` and `rhs`.
428    ///
429    /// In other words this computes `[self.x <= rhs.x, self.y <= rhs.y, ..]` for all
430    /// elements.
431    #[inline]
432    #[must_use]
433    pub fn cmple(self, rhs: Self) -> BVec4A {
434        BVec4A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
435    }
436
437    /// Returns a vector mask containing the result of a `<` comparison for each element of
438    /// `self` and `rhs`.
439    ///
440    /// In other words this computes `[self.x < rhs.x, self.y < rhs.y, ..]` for all
441    /// elements.
442    #[inline]
443    #[must_use]
444    pub fn cmplt(self, rhs: Self) -> BVec4A {
445        BVec4A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
446    }
447
448    /// Returns a vector containing the absolute value of each element of `self`.
449    #[inline]
450    #[must_use]
451    pub fn abs(self) -> Self {
452        Self(unsafe { crate::sse2::m128_abs(self.0) })
453    }
454
455    /// Returns a vector with elements representing the sign of `self`.
456    ///
457    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
458    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
459    /// - `NAN` if the number is `NAN`
460    #[inline]
461    #[must_use]
462    pub fn signum(self) -> Self {
463        let result = Self(unsafe { _mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0) });
464        let mask = self.is_nan_mask();
465        Self::select(mask, self, result)
466    }
467
468    /// Returns a vector with signs of `rhs` and the magnitudes of `self`.
469    #[inline]
470    #[must_use]
471    pub fn copysign(self, rhs: Self) -> Self {
472        let mask = Self::splat(-0.0);
473        Self(unsafe { _mm_or_ps(_mm_and_ps(rhs.0, mask.0), _mm_andnot_ps(mask.0, self.0)) })
474    }
475
476    /// Returns a bitmask with the lowest 4 bits set to the sign bits from the elements of `self`.
477    ///
478    /// A negative element results in a `1` bit and a positive element in a `0` bit.  Element `x` goes
479    /// into the first lowest bit, element `y` into the second, etc.
480    ///
481    /// An element is negative if it has a negative sign, including -0.0, NaNs with negative sign
482    /// bit and negative infinity.
483    #[inline]
484    #[must_use]
485    pub fn is_negative_bitmask(self) -> u32 {
486        unsafe { _mm_movemask_ps(self.0) as u32 }
487    }
488
489    /// Returns `true` if, and only if, all elements are finite.  If any element is either
490    /// `NaN`, positive or negative infinity, this will return `false`.
491    #[inline]
492    #[must_use]
493    pub fn is_finite(self) -> bool {
494        self.is_finite_mask().all()
495    }
496
497    /// Performs `is_finite` on each element of self, returning a vector mask of the results.
498    ///
499    /// In other words, this computes `[x.is_finite(), y.is_finite(), ...]`.
500    #[inline]
501    #[must_use]
502    pub fn is_finite_mask(self) -> BVec4A {
503        BVec4A(unsafe { _mm_cmplt_ps(crate::sse2::m128_abs(self.0), Self::INFINITY.0) })
504    }
505
506    /// Returns `true` if any elements are `NaN`.
507    #[inline]
508    #[must_use]
509    pub fn is_nan(self) -> bool {
510        self.is_nan_mask().any()
511    }
512
513    /// Performs `is_nan` on each element of self, returning a vector mask of the results.
514    ///
515    /// In other words, this computes `[x.is_nan(), y.is_nan(), ...]`.
516    #[inline]
517    #[must_use]
518    pub fn is_nan_mask(self) -> BVec4A {
519        BVec4A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
520    }
521
522    /// Computes the length of `self`.
523    #[doc(alias = "magnitude")]
524    #[inline]
525    #[must_use]
526    pub fn length(self) -> f32 {
527        unsafe {
528            let dot = dot4_in_x(self.0, self.0);
529            _mm_cvtss_f32(_mm_sqrt_ps(dot))
530        }
531    }
532
533    /// Computes the squared length of `self`.
534    ///
535    /// This is faster than `length()` as it avoids a square root operation.
536    #[doc(alias = "magnitude2")]
537    #[inline]
538    #[must_use]
539    pub fn length_squared(self) -> f32 {
540        self.dot(self)
541    }
542
543    /// Computes `1.0 / length()`.
544    ///
545    /// For valid results, `self` must _not_ be of length zero.
546    #[inline]
547    #[must_use]
548    pub fn length_recip(self) -> f32 {
549        unsafe {
550            let dot = dot4_in_x(self.0, self.0);
551            _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
552        }
553    }
554
555    /// Computes the Euclidean distance between two points in space.
556    #[inline]
557    #[must_use]
558    pub fn distance(self, rhs: Self) -> f32 {
559        (self - rhs).length()
560    }
561
562    /// Compute the squared euclidean distance between two points in space.
563    #[inline]
564    #[must_use]
565    pub fn distance_squared(self, rhs: Self) -> f32 {
566        (self - rhs).length_squared()
567    }
568
569    /// Returns the element-wise quotient of [Euclidean division] of `self` by `rhs`.
570    #[inline]
571    #[must_use]
572    pub fn div_euclid(self, rhs: Self) -> Self {
573        Self::new(
574            math::div_euclid(self.x, rhs.x),
575            math::div_euclid(self.y, rhs.y),
576            math::div_euclid(self.z, rhs.z),
577            math::div_euclid(self.w, rhs.w),
578        )
579    }
580
581    /// Returns the element-wise remainder of [Euclidean division] of `self` by `rhs`.
582    ///
583    /// [Euclidean division]: f32::rem_euclid
584    #[inline]
585    #[must_use]
586    pub fn rem_euclid(self, rhs: Self) -> Self {
587        Self::new(
588            math::rem_euclid(self.x, rhs.x),
589            math::rem_euclid(self.y, rhs.y),
590            math::rem_euclid(self.z, rhs.z),
591            math::rem_euclid(self.w, rhs.w),
592        )
593    }
594
595    /// Returns `self` normalized to length 1.0.
596    ///
597    /// For valid results, `self` must be finite and _not_ of length zero, nor very close to zero.
598    ///
599    /// See also [`Self::try_normalize()`] and [`Self::normalize_or_zero()`].
600    ///
601    /// # Panics
602    ///
603    /// Will panic if the resulting normalized vector is not finite when `glam_assert` is enabled.
604    #[inline]
605    #[must_use]
606    pub fn normalize(self) -> Self {
607        unsafe {
608            let length = _mm_sqrt_ps(dot4_into_m128(self.0, self.0));
609            #[allow(clippy::let_and_return)]
610            let normalized = Self(_mm_div_ps(self.0, length));
611            glam_assert!(normalized.is_finite());
612            normalized
613        }
614    }
615
616    /// Returns `self` normalized to length 1.0 if possible, else returns `None`.
617    ///
618    /// In particular, if the input is zero (or very close to zero), or non-finite,
619    /// the result of this operation will be `None`.
620    ///
621    /// See also [`Self::normalize_or_zero()`].
622    #[inline]
623    #[must_use]
624    pub fn try_normalize(self) -> Option<Self> {
625        let rcp = self.length_recip();
626        if rcp.is_finite() && rcp > 0.0 {
627            Some(self * rcp)
628        } else {
629            None
630        }
631    }
632
633    /// Returns `self` normalized to length 1.0 if possible, else returns a
634    /// fallback value.
635    ///
636    /// In particular, if the input is zero (or very close to zero), or non-finite,
637    /// the result of this operation will be the fallback value.
638    ///
639    /// See also [`Self::try_normalize()`].
640    #[inline]
641    #[must_use]
642    pub fn normalize_or(self, fallback: Self) -> Self {
643        let rcp = self.length_recip();
644        if rcp.is_finite() && rcp > 0.0 {
645            self * rcp
646        } else {
647            fallback
648        }
649    }
650
651    /// Returns `self` normalized to length 1.0 if possible, else returns zero.
652    ///
653    /// In particular, if the input is zero (or very close to zero), or non-finite,
654    /// the result of this operation will be zero.
655    ///
656    /// See also [`Self::try_normalize()`].
657    #[inline]
658    #[must_use]
659    pub fn normalize_or_zero(self) -> Self {
660        self.normalize_or(Self::ZERO)
661    }
662
663    /// Returns `self` normalized to length 1.0 and the length of `self`.
664    ///
665    /// If `self` is zero length then `(Self::X, 0.0)` is returned.
666    #[inline]
667    #[must_use]
668    pub fn normalize_and_length(self) -> (Self, f32) {
669        let length = self.length();
670        let rcp = 1.0 / length;
671        if rcp.is_finite() && rcp > 0.0 {
672            (self * rcp, length)
673        } else {
674            (Self::X, 0.0)
675        }
676    }
677
678    /// Returns whether `self` is length `1.0` or not.
679    ///
680    /// Uses a precision threshold of approximately `1e-4`.
681    #[inline]
682    #[must_use]
683    pub fn is_normalized(self) -> bool {
684        math::abs(self.length_squared() - 1.0) <= 2e-4
685    }
686
687    /// Returns the vector projection of `self` onto `rhs`.
688    ///
689    /// `rhs` must be of non-zero length.
690    ///
691    /// # Panics
692    ///
693    /// Will panic if `rhs` is zero length when `glam_assert` is enabled.
694    #[inline]
695    #[must_use]
696    pub fn project_onto(self, rhs: Self) -> Self {
697        let other_len_sq_rcp = rhs.dot(rhs).recip();
698        glam_assert!(other_len_sq_rcp.is_finite());
699        rhs * self.dot(rhs) * other_len_sq_rcp
700    }
701
702    /// Returns the vector rejection of `self` from `rhs`.
703    ///
704    /// The vector rejection is the vector perpendicular to the projection of `self` onto
705    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
706    ///
707    /// `rhs` must be of non-zero length.
708    ///
709    /// # Panics
710    ///
711    /// Will panic if `rhs` has a length of zero when `glam_assert` is enabled.
712    #[doc(alias("plane"))]
713    #[inline]
714    #[must_use]
715    pub fn reject_from(self, rhs: Self) -> Self {
716        self - self.project_onto(rhs)
717    }
718
719    /// Returns the vector projection of `self` onto `rhs`.
720    ///
721    /// `rhs` must be normalized.
722    ///
723    /// # Panics
724    ///
725    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
726    #[inline]
727    #[must_use]
728    pub fn project_onto_normalized(self, rhs: Self) -> Self {
729        glam_assert!(rhs.is_normalized());
730        rhs * self.dot(rhs)
731    }
732
733    /// Returns the vector rejection of `self` from `rhs`.
734    ///
735    /// The vector rejection is the vector perpendicular to the projection of `self` onto
736    /// `rhs`, in rhs words the result of `self - self.project_onto(rhs)`.
737    ///
738    /// `rhs` must be normalized.
739    ///
740    /// # Panics
741    ///
742    /// Will panic if `rhs` is not normalized when `glam_assert` is enabled.
743    #[doc(alias("plane"))]
744    #[inline]
745    #[must_use]
746    pub fn reject_from_normalized(self, rhs: Self) -> Self {
747        self - self.project_onto_normalized(rhs)
748    }
749
750    /// Returns a vector containing the nearest integer to a number for each element of `self`.
751    /// Round half-way cases away from 0.0.
752    #[inline]
753    #[must_use]
754    pub fn round(self) -> Self {
755        Self(unsafe { m128_round(self.0) })
756    }
757
758    /// Returns a vector containing the largest integer less than or equal to a number for each
759    /// element of `self`.
760    #[inline]
761    #[must_use]
762    pub fn floor(self) -> Self {
763        Self(unsafe { m128_floor(self.0) })
764    }
765
766    /// Returns a vector containing the smallest integer greater than or equal to a number for
767    /// each element of `self`.
768    #[inline]
769    #[must_use]
770    pub fn ceil(self) -> Self {
771        Self(unsafe { m128_ceil(self.0) })
772    }
773
774    /// Returns a vector containing the integer part each element of `self`. This means numbers are
775    /// always truncated towards zero.
776    #[inline]
777    #[must_use]
778    pub fn trunc(self) -> Self {
779        Self(unsafe { m128_trunc(self.0) })
780    }
781
782    /// Returns a vector containing the fractional part of the vector as `self - self.trunc()`.
783    ///
784    /// Note that this differs from the GLSL implementation of `fract` which returns
785    /// `self - self.floor()`.
786    ///
787    /// Note that this is fast but not precise for large numbers.
788    #[inline]
789    #[must_use]
790    pub fn fract(self) -> Self {
791        self - self.trunc()
792    }
793
794    /// Returns a vector containing the fractional part of the vector as `self - self.floor()`.
795    ///
796    /// Note that this differs from the Rust implementation of `fract` which returns
797    /// `self - self.trunc()`.
798    ///
799    /// Note that this is fast but not precise for large numbers.
800    #[inline]
801    #[must_use]
802    pub fn fract_gl(self) -> Self {
803        self - self.floor()
804    }
805
806    /// Returns a vector containing `e^self` (the exponential function) for each element of
807    /// `self`.
808    #[inline]
809    #[must_use]
810    pub fn exp(self) -> Self {
811        Self::new(
812            math::exp(self.x),
813            math::exp(self.y),
814            math::exp(self.z),
815            math::exp(self.w),
816        )
817    }
818
819    /// Returns a vector containing each element of `self` raised to the power of `n`.
820    #[inline]
821    #[must_use]
822    pub fn powf(self, n: f32) -> Self {
823        Self::new(
824            math::powf(self.x, n),
825            math::powf(self.y, n),
826            math::powf(self.z, n),
827            math::powf(self.w, n),
828        )
829    }
830
831    /// Returns a vector containing the reciprocal `1.0/n` of each element of `self`.
832    #[inline]
833    #[must_use]
834    pub fn recip(self) -> Self {
835        Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
836    }
837
838    /// Performs a linear interpolation between `self` and `rhs` based on the value `s`.
839    ///
840    /// When `s` is `0.0`, the result will be equal to `self`.  When `s` is `1.0`, the result
841    /// will be equal to `rhs`. When `s` is outside of range `[0, 1]`, the result is linearly
842    /// extrapolated.
843    #[doc(alias = "mix")]
844    #[inline]
845    #[must_use]
846    pub fn lerp(self, rhs: Self, s: f32) -> Self {
847        self * (1.0 - s) + rhs * s
848    }
849
850    /// Moves towards `rhs` based on the value `d`.
851    ///
852    /// When `d` is `0.0`, the result will be equal to `self`. When `d` is equal to
853    /// `self.distance(rhs)`, the result will be equal to `rhs`. Will not go past `rhs`.
854    #[inline]
855    #[must_use]
856    pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
857        let a = rhs - *self;
858        let len = a.length();
859        if len <= d || len <= 1e-4 {
860            return rhs;
861        }
862        *self + a / len * d
863    }
864
865    /// Calculates the midpoint between `self` and `rhs`.
866    ///
867    /// The midpoint is the average of, or halfway point between, two vectors.
868    /// `a.midpoint(b)` should yield the same result as `a.lerp(b, 0.5)`
869    /// while being slightly cheaper to compute.
870    #[inline]
871    pub fn midpoint(self, rhs: Self) -> Self {
872        (self + rhs) * 0.5
873    }
874
875    /// Returns true if the absolute difference of all elements between `self` and `rhs` is
876    /// less than or equal to `max_abs_diff`.
877    ///
878    /// This can be used to compare if two vectors contain similar elements. It works best when
879    /// comparing with a known value. The `max_abs_diff` that should be used used depends on
880    /// the values being compared against.
881    ///
882    /// For more see
883    /// [comparing floating point numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/).
884    #[inline]
885    #[must_use]
886    pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
887        self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
888    }
889
890    /// Returns a vector with a length no less than `min` and no more than `max`.
891    ///
892    /// # Panics
893    ///
894    /// Will panic if `min` is greater than `max`, or if either `min` or `max` is negative, when `glam_assert` is enabled.
895    #[inline]
896    #[must_use]
897    pub fn clamp_length(self, min: f32, max: f32) -> Self {
898        glam_assert!(0.0 <= min);
899        glam_assert!(min <= max);
900        let length_sq = self.length_squared();
901        if length_sq < min * min {
902            min * (self / math::sqrt(length_sq))
903        } else if length_sq > max * max {
904            max * (self / math::sqrt(length_sq))
905        } else {
906            self
907        }
908    }
909
910    /// Returns a vector with a length no more than `max`.
911    ///
912    /// # Panics
913    ///
914    /// Will panic if `max` is negative when `glam_assert` is enabled.
915    #[inline]
916    #[must_use]
917    pub fn clamp_length_max(self, max: f32) -> Self {
918        glam_assert!(0.0 <= max);
919        let length_sq = self.length_squared();
920        if length_sq > max * max {
921            max * (self / math::sqrt(length_sq))
922        } else {
923            self
924        }
925    }
926
927    /// Returns a vector with a length no less than `min`.
928    ///
929    /// # Panics
930    ///
931    /// Will panic if `min` is negative when `glam_assert` is enabled.
932    #[inline]
933    #[must_use]
934    pub fn clamp_length_min(self, min: f32) -> Self {
935        glam_assert!(0.0 <= min);
936        let length_sq = self.length_squared();
937        if length_sq < min * min {
938            min * (self / math::sqrt(length_sq))
939        } else {
940            self
941        }
942    }
943
944    /// Fused multiply-add. Computes `(self * a) + b` element-wise with only one rounding
945    /// error, yielding a more accurate result than an unfused multiply-add.
946    ///
947    /// Using `mul_add` *may* be more performant than an unfused multiply-add if the target
948    /// architecture has a dedicated fma CPU instruction. However, this is not always true,
949    /// and will be heavily dependant on designing algorithms with specific target hardware in
950    /// mind.
951    #[inline]
952    #[must_use]
953    pub fn mul_add(self, a: Self, b: Self) -> Self {
954        #[cfg(target_feature = "fma")]
955        unsafe {
956            Self(_mm_fmadd_ps(self.0, a.0, b.0))
957        }
958        #[cfg(not(target_feature = "fma"))]
959        Self::new(
960            math::mul_add(self.x, a.x, b.x),
961            math::mul_add(self.y, a.y, b.y),
962            math::mul_add(self.z, a.z, b.z),
963            math::mul_add(self.w, a.w, b.w),
964        )
965    }
966
967    /// Returns the reflection vector for a given incident vector `self` and surface normal
968    /// `normal`.
969    ///
970    /// `normal` must be normalized.
971    ///
972    /// # Panics
973    ///
974    /// Will panic if `normal` is not normalized when `glam_assert` is enabled.
975    #[inline]
976    #[must_use]
977    pub fn reflect(self, normal: Self) -> Self {
978        glam_assert!(normal.is_normalized());
979        self - 2.0 * self.dot(normal) * normal
980    }
981
982    /// Returns the refraction direction for a given incident vector `self`, surface normal
983    /// `normal` and ratio of indices of refraction, `eta`. When total internal reflection occurs,
984    /// a zero vector will be returned.
985    ///
986    /// `self` and `normal` must be normalized.
987    ///
988    /// # Panics
989    ///
990    /// Will panic if `self` or `normal` is not normalized when `glam_assert` is enabled.
991    #[inline]
992    #[must_use]
993    pub fn refract(self, normal: Self, eta: f32) -> Self {
994        glam_assert!(self.is_normalized());
995        glam_assert!(normal.is_normalized());
996        let n_dot_i = normal.dot(self);
997        let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
998        if k >= 0.0 {
999            eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1000        } else {
1001            Self::ZERO
1002        }
1003    }
1004
1005    /// Casts all elements of `self` to `f64`.
1006    #[inline]
1007    #[must_use]
1008    pub fn as_dvec4(&self) -> crate::DVec4 {
1009        crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
1010    }
1011
1012    /// Casts all elements of `self` to `i8`.
1013    #[inline]
1014    #[must_use]
1015    pub fn as_i8vec4(&self) -> crate::I8Vec4 {
1016        crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
1017    }
1018
1019    /// Casts all elements of `self` to `u8`.
1020    #[inline]
1021    #[must_use]
1022    pub fn as_u8vec4(&self) -> crate::U8Vec4 {
1023        crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
1024    }
1025
1026    /// Casts all elements of `self` to `i16`.
1027    #[inline]
1028    #[must_use]
1029    pub fn as_i16vec4(&self) -> crate::I16Vec4 {
1030        crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
1031    }
1032
1033    /// Casts all elements of `self` to `u16`.
1034    #[inline]
1035    #[must_use]
1036    pub fn as_u16vec4(&self) -> crate::U16Vec4 {
1037        crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
1038    }
1039
1040    /// Casts all elements of `self` to `i32`.
1041    #[inline]
1042    #[must_use]
1043    pub fn as_ivec4(&self) -> crate::IVec4 {
1044        crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
1045    }
1046
1047    /// Casts all elements of `self` to `u32`.
1048    #[inline]
1049    #[must_use]
1050    pub fn as_uvec4(&self) -> crate::UVec4 {
1051        crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
1052    }
1053
1054    /// Casts all elements of `self` to `i64`.
1055    #[inline]
1056    #[must_use]
1057    pub fn as_i64vec4(&self) -> crate::I64Vec4 {
1058        crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
1059    }
1060
1061    /// Casts all elements of `self` to `u64`.
1062    #[inline]
1063    #[must_use]
1064    pub fn as_u64vec4(&self) -> crate::U64Vec4 {
1065        crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
1066    }
1067
1068    /// Casts all elements of `self` to `usize`.
1069    #[inline]
1070    #[must_use]
1071    pub fn as_usizevec4(&self) -> crate::USizeVec4 {
1072        crate::USizeVec4::new(
1073            self.x as usize,
1074            self.y as usize,
1075            self.z as usize,
1076            self.w as usize,
1077        )
1078    }
1079}
1080
1081impl Default for Vec4 {
1082    #[inline(always)]
1083    fn default() -> Self {
1084        Self::ZERO
1085    }
1086}
1087
1088impl PartialEq for Vec4 {
1089    #[inline]
1090    fn eq(&self, rhs: &Self) -> bool {
1091        self.cmpeq(*rhs).all()
1092    }
1093}
1094
1095impl Div for Vec4 {
1096    type Output = Self;
1097    #[inline]
1098    fn div(self, rhs: Self) -> Self {
1099        Self(unsafe { _mm_div_ps(self.0, rhs.0) })
1100    }
1101}
1102
1103impl Div<&Self> for Vec4 {
1104    type Output = Self;
1105    #[inline]
1106    fn div(self, rhs: &Self) -> Self {
1107        self.div(*rhs)
1108    }
1109}
1110
1111impl Div<&Vec4> for &Vec4 {
1112    type Output = Vec4;
1113    #[inline]
1114    fn div(self, rhs: &Vec4) -> Vec4 {
1115        (*self).div(*rhs)
1116    }
1117}
1118
1119impl Div<Vec4> for &Vec4 {
1120    type Output = Vec4;
1121    #[inline]
1122    fn div(self, rhs: Vec4) -> Vec4 {
1123        (*self).div(rhs)
1124    }
1125}
1126
1127impl DivAssign for Vec4 {
1128    #[inline]
1129    fn div_assign(&mut self, rhs: Self) {
1130        self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
1131    }
1132}
1133
1134impl DivAssign<&Self> for Vec4 {
1135    #[inline]
1136    fn div_assign(&mut self, rhs: &Self) {
1137        self.div_assign(*rhs);
1138    }
1139}
1140
1141impl Div<f32> for Vec4 {
1142    type Output = Self;
1143    #[inline]
1144    fn div(self, rhs: f32) -> Self {
1145        Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
1146    }
1147}
1148
1149impl Div<&f32> for Vec4 {
1150    type Output = Self;
1151    #[inline]
1152    fn div(self, rhs: &f32) -> Self {
1153        self.div(*rhs)
1154    }
1155}
1156
1157impl Div<&f32> for &Vec4 {
1158    type Output = Vec4;
1159    #[inline]
1160    fn div(self, rhs: &f32) -> Vec4 {
1161        (*self).div(*rhs)
1162    }
1163}
1164
1165impl Div<f32> for &Vec4 {
1166    type Output = Vec4;
1167    #[inline]
1168    fn div(self, rhs: f32) -> Vec4 {
1169        (*self).div(rhs)
1170    }
1171}
1172
1173impl DivAssign<f32> for Vec4 {
1174    #[inline]
1175    fn div_assign(&mut self, rhs: f32) {
1176        self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
1177    }
1178}
1179
1180impl DivAssign<&f32> for Vec4 {
1181    #[inline]
1182    fn div_assign(&mut self, rhs: &f32) {
1183        self.div_assign(*rhs);
1184    }
1185}
1186
1187impl Div<Vec4> for f32 {
1188    type Output = Vec4;
1189    #[inline]
1190    fn div(self, rhs: Vec4) -> Vec4 {
1191        Vec4(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
1192    }
1193}
1194
1195impl Div<&Vec4> for f32 {
1196    type Output = Vec4;
1197    #[inline]
1198    fn div(self, rhs: &Vec4) -> Vec4 {
1199        self.div(*rhs)
1200    }
1201}
1202
1203impl Div<&Vec4> for &f32 {
1204    type Output = Vec4;
1205    #[inline]
1206    fn div(self, rhs: &Vec4) -> Vec4 {
1207        (*self).div(*rhs)
1208    }
1209}
1210
1211impl Div<Vec4> for &f32 {
1212    type Output = Vec4;
1213    #[inline]
1214    fn div(self, rhs: Vec4) -> Vec4 {
1215        (*self).div(rhs)
1216    }
1217}
1218
1219impl Mul for Vec4 {
1220    type Output = Self;
1221    #[inline]
1222    fn mul(self, rhs: Self) -> Self {
1223        Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
1224    }
1225}
1226
1227impl Mul<&Self> for Vec4 {
1228    type Output = Self;
1229    #[inline]
1230    fn mul(self, rhs: &Self) -> Self {
1231        self.mul(*rhs)
1232    }
1233}
1234
1235impl Mul<&Vec4> for &Vec4 {
1236    type Output = Vec4;
1237    #[inline]
1238    fn mul(self, rhs: &Vec4) -> Vec4 {
1239        (*self).mul(*rhs)
1240    }
1241}
1242
1243impl Mul<Vec4> for &Vec4 {
1244    type Output = Vec4;
1245    #[inline]
1246    fn mul(self, rhs: Vec4) -> Vec4 {
1247        (*self).mul(rhs)
1248    }
1249}
1250
1251impl MulAssign for Vec4 {
1252    #[inline]
1253    fn mul_assign(&mut self, rhs: Self) {
1254        self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
1255    }
1256}
1257
1258impl MulAssign<&Self> for Vec4 {
1259    #[inline]
1260    fn mul_assign(&mut self, rhs: &Self) {
1261        self.mul_assign(*rhs);
1262    }
1263}
1264
1265impl Mul<f32> for Vec4 {
1266    type Output = Self;
1267    #[inline]
1268    fn mul(self, rhs: f32) -> Self {
1269        Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
1270    }
1271}
1272
1273impl Mul<&f32> for Vec4 {
1274    type Output = Self;
1275    #[inline]
1276    fn mul(self, rhs: &f32) -> Self {
1277        self.mul(*rhs)
1278    }
1279}
1280
1281impl Mul<&f32> for &Vec4 {
1282    type Output = Vec4;
1283    #[inline]
1284    fn mul(self, rhs: &f32) -> Vec4 {
1285        (*self).mul(*rhs)
1286    }
1287}
1288
1289impl Mul<f32> for &Vec4 {
1290    type Output = Vec4;
1291    #[inline]
1292    fn mul(self, rhs: f32) -> Vec4 {
1293        (*self).mul(rhs)
1294    }
1295}
1296
1297impl MulAssign<f32> for Vec4 {
1298    #[inline]
1299    fn mul_assign(&mut self, rhs: f32) {
1300        self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
1301    }
1302}
1303
1304impl MulAssign<&f32> for Vec4 {
1305    #[inline]
1306    fn mul_assign(&mut self, rhs: &f32) {
1307        self.mul_assign(*rhs);
1308    }
1309}
1310
1311impl Mul<Vec4> for f32 {
1312    type Output = Vec4;
1313    #[inline]
1314    fn mul(self, rhs: Vec4) -> Vec4 {
1315        Vec4(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
1316    }
1317}
1318
1319impl Mul<&Vec4> for f32 {
1320    type Output = Vec4;
1321    #[inline]
1322    fn mul(self, rhs: &Vec4) -> Vec4 {
1323        self.mul(*rhs)
1324    }
1325}
1326
1327impl Mul<&Vec4> for &f32 {
1328    type Output = Vec4;
1329    #[inline]
1330    fn mul(self, rhs: &Vec4) -> Vec4 {
1331        (*self).mul(*rhs)
1332    }
1333}
1334
1335impl Mul<Vec4> for &f32 {
1336    type Output = Vec4;
1337    #[inline]
1338    fn mul(self, rhs: Vec4) -> Vec4 {
1339        (*self).mul(rhs)
1340    }
1341}
1342
1343impl Add for Vec4 {
1344    type Output = Self;
1345    #[inline]
1346    fn add(self, rhs: Self) -> Self {
1347        Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1348    }
1349}
1350
1351impl Add<&Self> for Vec4 {
1352    type Output = Self;
1353    #[inline]
1354    fn add(self, rhs: &Self) -> Self {
1355        self.add(*rhs)
1356    }
1357}
1358
1359impl Add<&Vec4> for &Vec4 {
1360    type Output = Vec4;
1361    #[inline]
1362    fn add(self, rhs: &Vec4) -> Vec4 {
1363        (*self).add(*rhs)
1364    }
1365}
1366
1367impl Add<Vec4> for &Vec4 {
1368    type Output = Vec4;
1369    #[inline]
1370    fn add(self, rhs: Vec4) -> Vec4 {
1371        (*self).add(rhs)
1372    }
1373}
1374
1375impl AddAssign for Vec4 {
1376    #[inline]
1377    fn add_assign(&mut self, rhs: Self) {
1378        self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1379    }
1380}
1381
1382impl AddAssign<&Self> for Vec4 {
1383    #[inline]
1384    fn add_assign(&mut self, rhs: &Self) {
1385        self.add_assign(*rhs);
1386    }
1387}
1388
1389impl Add<f32> for Vec4 {
1390    type Output = Self;
1391    #[inline]
1392    fn add(self, rhs: f32) -> Self {
1393        Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1394    }
1395}
1396
1397impl Add<&f32> for Vec4 {
1398    type Output = Self;
1399    #[inline]
1400    fn add(self, rhs: &f32) -> Self {
1401        self.add(*rhs)
1402    }
1403}
1404
1405impl Add<&f32> for &Vec4 {
1406    type Output = Vec4;
1407    #[inline]
1408    fn add(self, rhs: &f32) -> Vec4 {
1409        (*self).add(*rhs)
1410    }
1411}
1412
1413impl Add<f32> for &Vec4 {
1414    type Output = Vec4;
1415    #[inline]
1416    fn add(self, rhs: f32) -> Vec4 {
1417        (*self).add(rhs)
1418    }
1419}
1420
1421impl AddAssign<f32> for Vec4 {
1422    #[inline]
1423    fn add_assign(&mut self, rhs: f32) {
1424        self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1425    }
1426}
1427
1428impl AddAssign<&f32> for Vec4 {
1429    #[inline]
1430    fn add_assign(&mut self, rhs: &f32) {
1431        self.add_assign(*rhs);
1432    }
1433}
1434
1435impl Add<Vec4> for f32 {
1436    type Output = Vec4;
1437    #[inline]
1438    fn add(self, rhs: Vec4) -> Vec4 {
1439        Vec4(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1440    }
1441}
1442
1443impl Add<&Vec4> for f32 {
1444    type Output = Vec4;
1445    #[inline]
1446    fn add(self, rhs: &Vec4) -> Vec4 {
1447        self.add(*rhs)
1448    }
1449}
1450
1451impl Add<&Vec4> for &f32 {
1452    type Output = Vec4;
1453    #[inline]
1454    fn add(self, rhs: &Vec4) -> Vec4 {
1455        (*self).add(*rhs)
1456    }
1457}
1458
1459impl Add<Vec4> for &f32 {
1460    type Output = Vec4;
1461    #[inline]
1462    fn add(self, rhs: Vec4) -> Vec4 {
1463        (*self).add(rhs)
1464    }
1465}
1466
1467impl Sub for Vec4 {
1468    type Output = Self;
1469    #[inline]
1470    fn sub(self, rhs: Self) -> Self {
1471        Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1472    }
1473}
1474
1475impl Sub<&Self> for Vec4 {
1476    type Output = Self;
1477    #[inline]
1478    fn sub(self, rhs: &Self) -> Self {
1479        self.sub(*rhs)
1480    }
1481}
1482
1483impl Sub<&Vec4> for &Vec4 {
1484    type Output = Vec4;
1485    #[inline]
1486    fn sub(self, rhs: &Vec4) -> Vec4 {
1487        (*self).sub(*rhs)
1488    }
1489}
1490
1491impl Sub<Vec4> for &Vec4 {
1492    type Output = Vec4;
1493    #[inline]
1494    fn sub(self, rhs: Vec4) -> Vec4 {
1495        (*self).sub(rhs)
1496    }
1497}
1498
1499impl SubAssign for Vec4 {
1500    #[inline]
1501    fn sub_assign(&mut self, rhs: Self) {
1502        self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1503    }
1504}
1505
1506impl SubAssign<&Self> for Vec4 {
1507    #[inline]
1508    fn sub_assign(&mut self, rhs: &Self) {
1509        self.sub_assign(*rhs);
1510    }
1511}
1512
1513impl Sub<f32> for Vec4 {
1514    type Output = Self;
1515    #[inline]
1516    fn sub(self, rhs: f32) -> Self {
1517        Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1518    }
1519}
1520
1521impl Sub<&f32> for Vec4 {
1522    type Output = Self;
1523    #[inline]
1524    fn sub(self, rhs: &f32) -> Self {
1525        self.sub(*rhs)
1526    }
1527}
1528
1529impl Sub<&f32> for &Vec4 {
1530    type Output = Vec4;
1531    #[inline]
1532    fn sub(self, rhs: &f32) -> Vec4 {
1533        (*self).sub(*rhs)
1534    }
1535}
1536
1537impl Sub<f32> for &Vec4 {
1538    type Output = Vec4;
1539    #[inline]
1540    fn sub(self, rhs: f32) -> Vec4 {
1541        (*self).sub(rhs)
1542    }
1543}
1544
1545impl SubAssign<f32> for Vec4 {
1546    #[inline]
1547    fn sub_assign(&mut self, rhs: f32) {
1548        self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1549    }
1550}
1551
1552impl SubAssign<&f32> for Vec4 {
1553    #[inline]
1554    fn sub_assign(&mut self, rhs: &f32) {
1555        self.sub_assign(*rhs);
1556    }
1557}
1558
1559impl Sub<Vec4> for f32 {
1560    type Output = Vec4;
1561    #[inline]
1562    fn sub(self, rhs: Vec4) -> Vec4 {
1563        Vec4(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1564    }
1565}
1566
1567impl Sub<&Vec4> for f32 {
1568    type Output = Vec4;
1569    #[inline]
1570    fn sub(self, rhs: &Vec4) -> Vec4 {
1571        self.sub(*rhs)
1572    }
1573}
1574
1575impl Sub<&Vec4> for &f32 {
1576    type Output = Vec4;
1577    #[inline]
1578    fn sub(self, rhs: &Vec4) -> Vec4 {
1579        (*self).sub(*rhs)
1580    }
1581}
1582
1583impl Sub<Vec4> for &f32 {
1584    type Output = Vec4;
1585    #[inline]
1586    fn sub(self, rhs: Vec4) -> Vec4 {
1587        (*self).sub(rhs)
1588    }
1589}
1590
1591impl Rem for Vec4 {
1592    type Output = Self;
1593    #[inline]
1594    fn rem(self, rhs: Self) -> Self {
1595        unsafe {
1596            let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1597            Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1598        }
1599    }
1600}
1601
1602impl Rem<&Self> for Vec4 {
1603    type Output = Self;
1604    #[inline]
1605    fn rem(self, rhs: &Self) -> Self {
1606        self.rem(*rhs)
1607    }
1608}
1609
1610impl Rem<&Vec4> for &Vec4 {
1611    type Output = Vec4;
1612    #[inline]
1613    fn rem(self, rhs: &Vec4) -> Vec4 {
1614        (*self).rem(*rhs)
1615    }
1616}
1617
1618impl Rem<Vec4> for &Vec4 {
1619    type Output = Vec4;
1620    #[inline]
1621    fn rem(self, rhs: Vec4) -> Vec4 {
1622        (*self).rem(rhs)
1623    }
1624}
1625
1626impl RemAssign for Vec4 {
1627    #[inline]
1628    fn rem_assign(&mut self, rhs: Self) {
1629        *self = self.rem(rhs);
1630    }
1631}
1632
1633impl RemAssign<&Self> for Vec4 {
1634    #[inline]
1635    fn rem_assign(&mut self, rhs: &Self) {
1636        self.rem_assign(*rhs);
1637    }
1638}
1639
1640impl Rem<f32> for Vec4 {
1641    type Output = Self;
1642    #[inline]
1643    fn rem(self, rhs: f32) -> Self {
1644        self.rem(Self::splat(rhs))
1645    }
1646}
1647
1648impl Rem<&f32> for Vec4 {
1649    type Output = Self;
1650    #[inline]
1651    fn rem(self, rhs: &f32) -> Self {
1652        self.rem(*rhs)
1653    }
1654}
1655
1656impl Rem<&f32> for &Vec4 {
1657    type Output = Vec4;
1658    #[inline]
1659    fn rem(self, rhs: &f32) -> Vec4 {
1660        (*self).rem(*rhs)
1661    }
1662}
1663
1664impl Rem<f32> for &Vec4 {
1665    type Output = Vec4;
1666    #[inline]
1667    fn rem(self, rhs: f32) -> Vec4 {
1668        (*self).rem(rhs)
1669    }
1670}
1671
1672impl RemAssign<f32> for Vec4 {
1673    #[inline]
1674    fn rem_assign(&mut self, rhs: f32) {
1675        *self = self.rem(Self::splat(rhs));
1676    }
1677}
1678
1679impl RemAssign<&f32> for Vec4 {
1680    #[inline]
1681    fn rem_assign(&mut self, rhs: &f32) {
1682        self.rem_assign(*rhs);
1683    }
1684}
1685
1686impl Rem<Vec4> for f32 {
1687    type Output = Vec4;
1688    #[inline]
1689    fn rem(self, rhs: Vec4) -> Vec4 {
1690        Vec4::splat(self).rem(rhs)
1691    }
1692}
1693
1694impl Rem<&Vec4> for f32 {
1695    type Output = Vec4;
1696    #[inline]
1697    fn rem(self, rhs: &Vec4) -> Vec4 {
1698        self.rem(*rhs)
1699    }
1700}
1701
1702impl Rem<&Vec4> for &f32 {
1703    type Output = Vec4;
1704    #[inline]
1705    fn rem(self, rhs: &Vec4) -> Vec4 {
1706        (*self).rem(*rhs)
1707    }
1708}
1709
1710impl Rem<Vec4> for &f32 {
1711    type Output = Vec4;
1712    #[inline]
1713    fn rem(self, rhs: Vec4) -> Vec4 {
1714        (*self).rem(rhs)
1715    }
1716}
1717
1718impl AsRef<[f32; 4]> for Vec4 {
1719    #[inline]
1720    fn as_ref(&self) -> &[f32; 4] {
1721        unsafe { &*(self as *const Self as *const [f32; 4]) }
1722    }
1723}
1724
1725impl AsMut<[f32; 4]> for Vec4 {
1726    #[inline]
1727    fn as_mut(&mut self) -> &mut [f32; 4] {
1728        unsafe { &mut *(self as *mut Self as *mut [f32; 4]) }
1729    }
1730}
1731
1732impl Sum for Vec4 {
1733    #[inline]
1734    fn sum<I>(iter: I) -> Self
1735    where
1736        I: Iterator<Item = Self>,
1737    {
1738        iter.fold(Self::ZERO, Self::add)
1739    }
1740}
1741
1742impl<'a> Sum<&'a Self> for Vec4 {
1743    #[inline]
1744    fn sum<I>(iter: I) -> Self
1745    where
1746        I: Iterator<Item = &'a Self>,
1747    {
1748        iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1749    }
1750}
1751
1752impl Product for Vec4 {
1753    #[inline]
1754    fn product<I>(iter: I) -> Self
1755    where
1756        I: Iterator<Item = Self>,
1757    {
1758        iter.fold(Self::ONE, Self::mul)
1759    }
1760}
1761
1762impl<'a> Product<&'a Self> for Vec4 {
1763    #[inline]
1764    fn product<I>(iter: I) -> Self
1765    where
1766        I: Iterator<Item = &'a Self>,
1767    {
1768        iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1769    }
1770}
1771
1772impl Neg for Vec4 {
1773    type Output = Self;
1774    #[inline]
1775    fn neg(self) -> Self {
1776        Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1777    }
1778}
1779
1780impl Neg for &Vec4 {
1781    type Output = Vec4;
1782    #[inline]
1783    fn neg(self) -> Vec4 {
1784        (*self).neg()
1785    }
1786}
1787
1788impl Index<usize> for Vec4 {
1789    type Output = f32;
1790    #[inline]
1791    fn index(&self, index: usize) -> &Self::Output {
1792        match index {
1793            0 => &self.x,
1794            1 => &self.y,
1795            2 => &self.z,
1796            3 => &self.w,
1797            _ => panic!("index out of bounds"),
1798        }
1799    }
1800}
1801
1802impl IndexMut<usize> for Vec4 {
1803    #[inline]
1804    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1805        match index {
1806            0 => &mut self.x,
1807            1 => &mut self.y,
1808            2 => &mut self.z,
1809            3 => &mut self.w,
1810            _ => panic!("index out of bounds"),
1811        }
1812    }
1813}
1814
1815impl fmt::Display for Vec4 {
1816    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1817        if let Some(p) = f.precision() {
1818            write!(
1819                f,
1820                "[{:.*}, {:.*}, {:.*}, {:.*}]",
1821                p, self.x, p, self.y, p, self.z, p, self.w
1822            )
1823        } else {
1824            write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1825        }
1826    }
1827}
1828
1829impl fmt::Debug for Vec4 {
1830    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1831        fmt.debug_tuple(stringify!(Vec4))
1832            .field(&self.x)
1833            .field(&self.y)
1834            .field(&self.z)
1835            .field(&self.w)
1836            .finish()
1837    }
1838}
1839
1840impl From<Vec4> for __m128 {
1841    #[inline(always)]
1842    fn from(t: Vec4) -> Self {
1843        t.0
1844    }
1845}
1846
1847impl From<__m128> for Vec4 {
1848    #[inline(always)]
1849    fn from(t: __m128) -> Self {
1850        Self(t)
1851    }
1852}
1853
1854impl From<[f32; 4]> for Vec4 {
1855    #[inline]
1856    fn from(a: [f32; 4]) -> Self {
1857        Self(unsafe { _mm_loadu_ps(a.as_ptr()) })
1858    }
1859}
1860
1861impl From<Vec4> for [f32; 4] {
1862    #[inline]
1863    fn from(v: Vec4) -> Self {
1864        use crate::Align16;
1865        use core::mem::MaybeUninit;
1866        let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1867        unsafe {
1868            _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1869            out.assume_init().0
1870        }
1871    }
1872}
1873
1874impl From<(f32, f32, f32, f32)> for Vec4 {
1875    #[inline]
1876    fn from(t: (f32, f32, f32, f32)) -> Self {
1877        Self::new(t.0, t.1, t.2, t.3)
1878    }
1879}
1880
1881impl From<Vec4> for (f32, f32, f32, f32) {
1882    #[inline]
1883    fn from(v: Vec4) -> Self {
1884        (v.x, v.y, v.z, v.w)
1885    }
1886}
1887
1888impl From<(Vec3A, f32)> for Vec4 {
1889    #[inline]
1890    fn from((v, w): (Vec3A, f32)) -> Self {
1891        v.extend(w)
1892    }
1893}
1894
1895impl From<(f32, Vec3A)> for Vec4 {
1896    #[inline]
1897    fn from((x, v): (f32, Vec3A)) -> Self {
1898        Self::new(x, v.x, v.y, v.z)
1899    }
1900}
1901
1902impl From<(Vec3, f32)> for Vec4 {
1903    #[inline]
1904    fn from((v, w): (Vec3, f32)) -> Self {
1905        Self::new(v.x, v.y, v.z, w)
1906    }
1907}
1908
1909impl From<(f32, Vec3)> for Vec4 {
1910    #[inline]
1911    fn from((x, v): (f32, Vec3)) -> Self {
1912        Self::new(x, v.x, v.y, v.z)
1913    }
1914}
1915
1916impl From<(Vec2, f32, f32)> for Vec4 {
1917    #[inline]
1918    fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1919        Self::new(v.x, v.y, z, w)
1920    }
1921}
1922
1923impl From<(Vec2, Vec2)> for Vec4 {
1924    #[inline]
1925    fn from((v, u): (Vec2, Vec2)) -> Self {
1926        Self::new(v.x, v.y, u.x, u.y)
1927    }
1928}
1929
1930impl Deref for Vec4 {
1931    type Target = crate::deref::Vec4<f32>;
1932    #[inline]
1933    fn deref(&self) -> &Self::Target {
1934        unsafe { &*(self as *const Self).cast() }
1935    }
1936}
1937
1938impl DerefMut for Vec4 {
1939    #[inline]
1940    fn deref_mut(&mut self) -> &mut Self::Target {
1941        unsafe { &mut *(self as *mut Self).cast() }
1942    }
1943}
1944
1945impl From<BVec4> for Vec4 {
1946    #[inline]
1947    fn from(v: BVec4) -> Self {
1948        Self::new(
1949            f32::from(v.x),
1950            f32::from(v.y),
1951            f32::from(v.z),
1952            f32::from(v.w),
1953        )
1954    }
1955}
1956
1957#[cfg(not(feature = "scalar-math"))]
1958impl From<BVec4A> for Vec4 {
1959    #[inline]
1960    fn from(v: BVec4A) -> Self {
1961        let bool_array: [bool; 4] = v.into();
1962        Self::new(
1963            f32::from(bool_array[0]),
1964            f32::from(bool_array[1]),
1965            f32::from(bool_array[2]),
1966            f32::from(bool_array[3]),
1967        )
1968    }
1969}