1use crate::{f32::math, sse2::*, BVec4, BVec4A, Vec2, Vec3, Vec3A};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[cfg(target_arch = "x86")]
10use core::arch::x86::*;
11#[cfg(target_arch = "x86_64")]
12use core::arch::x86_64::*;
13
14#[cfg(feature = "zerocopy")]
15use zerocopy_derive::*;
16
17#[repr(C)]
18union UnionCast {
19 a: [f32; 4],
20 v: Vec4,
21}
22
23#[inline(always)]
25#[must_use]
26pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
27 Vec4::new(x, y, z, w)
28}
29
30#[derive(Clone, Copy)]
36#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
37#[cfg_attr(
38 feature = "zerocopy",
39 derive(FromBytes, Immutable, IntoBytes, KnownLayout)
40)]
41#[repr(transparent)]
42pub struct Vec4(pub(crate) __m128);
43
44impl Vec4 {
45 pub const ZERO: Self = Self::splat(0.0);
47
48 pub const ONE: Self = Self::splat(1.0);
50
51 pub const NEG_ONE: Self = Self::splat(-1.0);
53
54 pub const MIN: Self = Self::splat(f32::MIN);
56
57 pub const MAX: Self = Self::splat(f32::MAX);
59
60 pub const NAN: Self = Self::splat(f32::NAN);
62
63 pub const INFINITY: Self = Self::splat(f32::INFINITY);
65
66 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
68
69 pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
71
72 pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
74
75 pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
77
78 pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
80
81 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
83
84 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
86
87 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
89
90 pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
92
93 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
95
96 pub const USES_CORE_SIMD: bool = false;
98 pub const USES_NEON: bool = false;
100 pub const USES_SCALAR_MATH: bool = false;
102 pub const USES_SSE2: bool = true;
104 pub const USES_WASM_SIMD: bool = false;
106 #[deprecated(since = "0.31.0", note = "Renamed to USES_WASM_SIMD")]
107 pub const USES_WASM32_SIMD: bool = false;
108
109 #[inline(always)]
111 #[must_use]
112 pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
113 unsafe { UnionCast { a: [x, y, z, w] }.v }
114 }
115
116 #[inline]
118 #[must_use]
119 pub const fn splat(v: f32) -> Self {
120 unsafe { UnionCast { a: [v; 4] }.v }
121 }
122
123 #[inline]
125 #[must_use]
126 pub fn map<F>(self, f: F) -> Self
127 where
128 F: Fn(f32) -> f32,
129 {
130 Self::new(f(self.x), f(self.y), f(self.z), f(self.w))
131 }
132
133 #[inline]
139 #[must_use]
140 pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
141 Self(unsafe {
142 _mm_or_ps(
143 _mm_andnot_ps(mask.0, if_false.0),
144 _mm_and_ps(if_true.0, mask.0),
145 )
146 })
147 }
148
149 #[inline]
151 #[must_use]
152 pub const fn from_array(a: [f32; 4]) -> Self {
153 Self::new(a[0], a[1], a[2], a[3])
154 }
155
156 #[inline]
158 #[must_use]
159 pub const fn to_array(&self) -> [f32; 4] {
160 unsafe { *(self as *const Self as *const [f32; 4]) }
161 }
162
163 #[inline]
169 #[must_use]
170 pub const fn from_slice(slice: &[f32]) -> Self {
171 assert!(slice.len() >= 4);
172 Self::new(slice[0], slice[1], slice[2], slice[3])
173 }
174
175 #[inline]
181 pub fn write_to_slice(self, slice: &mut [f32]) {
182 assert!(slice.len() >= 4);
183 unsafe {
184 _mm_storeu_ps(slice.as_mut_ptr(), self.0);
185 }
186 }
187
188 #[inline]
194 #[must_use]
195 pub fn truncate(self) -> Vec3 {
196 use crate::swizzles::Vec4Swizzles;
197 self.xyz()
198 }
199
200 #[inline]
208 #[must_use]
209 pub fn project(self) -> Vec3 {
210 Vec3::from_homogeneous(self)
211 }
212
213 #[inline]
215 #[must_use]
216 pub fn with_x(mut self, x: f32) -> Self {
217 self.x = x;
218 self
219 }
220
221 #[inline]
223 #[must_use]
224 pub fn with_y(mut self, y: f32) -> Self {
225 self.y = y;
226 self
227 }
228
229 #[inline]
231 #[must_use]
232 pub fn with_z(mut self, z: f32) -> Self {
233 self.z = z;
234 self
235 }
236
237 #[inline]
239 #[must_use]
240 pub fn with_w(mut self, w: f32) -> Self {
241 self.w = w;
242 self
243 }
244
245 #[inline]
247 #[must_use]
248 pub fn dot(self, rhs: Self) -> f32 {
249 unsafe { dot4(self.0, rhs.0) }
250 }
251
252 #[inline]
254 #[must_use]
255 pub fn dot_into_vec(self, rhs: Self) -> Self {
256 Self(unsafe { dot4_into_m128(self.0, rhs.0) })
257 }
258
259 #[inline]
266 #[must_use]
267 pub fn min(self, rhs: Self) -> Self {
268 Self(unsafe { _mm_min_ps(self.0, rhs.0) })
269 }
270
271 #[inline]
278 #[must_use]
279 pub fn max(self, rhs: Self) -> Self {
280 Self(unsafe { _mm_max_ps(self.0, rhs.0) })
281 }
282
283 #[inline]
294 #[must_use]
295 pub fn clamp(self, min: Self, max: Self) -> Self {
296 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
297 self.max(min).min(max)
298 }
299
300 #[inline]
307 #[must_use]
308 pub fn min_element(self) -> f32 {
309 unsafe {
310 let v = self.0;
311 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
312 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
313 _mm_cvtss_f32(v)
314 }
315 }
316
317 #[inline]
324 #[must_use]
325 pub fn max_element(self) -> f32 {
326 unsafe {
327 let v = self.0;
328 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
329 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
330 _mm_cvtss_f32(v)
331 }
332 }
333
334 #[doc(alias = "argmin")]
336 #[inline]
337 #[must_use]
338 pub fn min_position(self) -> usize {
339 let mut min = self.x;
340 let mut index = 0;
341 if self.y < min {
342 min = self.y;
343 index = 1;
344 }
345 if self.z < min {
346 min = self.z;
347 index = 2;
348 }
349 if self.w < min {
350 index = 3;
351 }
352 index
353 }
354
355 #[doc(alias = "argmax")]
357 #[inline]
358 #[must_use]
359 pub fn max_position(self) -> usize {
360 let mut max = self.x;
361 let mut index = 0;
362 if self.y > max {
363 max = self.y;
364 index = 1;
365 }
366 if self.z > max {
367 max = self.z;
368 index = 2;
369 }
370 if self.w > max {
371 index = 3;
372 }
373 index
374 }
375
376 #[inline]
380 #[must_use]
381 pub fn element_sum(self) -> f32 {
382 unsafe {
383 let v = self.0;
384 let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
385 let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
386 _mm_cvtss_f32(v)
387 }
388 }
389
390 #[inline]
394 #[must_use]
395 pub fn element_product(self) -> f32 {
396 unsafe {
397 let v = self.0;
398 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_11_00_01));
399 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
400 _mm_cvtss_f32(v)
401 }
402 }
403
404 #[inline]
410 #[must_use]
411 pub fn cmpeq(self, rhs: Self) -> BVec4A {
412 BVec4A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
413 }
414
415 #[inline]
421 #[must_use]
422 pub fn cmpne(self, rhs: Self) -> BVec4A {
423 BVec4A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
424 }
425
426 #[inline]
432 #[must_use]
433 pub fn cmpge(self, rhs: Self) -> BVec4A {
434 BVec4A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
435 }
436
437 #[inline]
443 #[must_use]
444 pub fn cmpgt(self, rhs: Self) -> BVec4A {
445 BVec4A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
446 }
447
448 #[inline]
454 #[must_use]
455 pub fn cmple(self, rhs: Self) -> BVec4A {
456 BVec4A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
457 }
458
459 #[inline]
465 #[must_use]
466 pub fn cmplt(self, rhs: Self) -> BVec4A {
467 BVec4A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
468 }
469
470 #[inline]
472 #[must_use]
473 pub fn abs(self) -> Self {
474 Self(unsafe { crate::sse2::m128_abs(self.0) })
475 }
476
477 #[inline]
483 #[must_use]
484 pub fn signum(self) -> Self {
485 let result = Self(unsafe { _mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0) });
486 let mask = self.is_nan_mask();
487 Self::select(mask, self, result)
488 }
489
490 #[inline]
492 #[must_use]
493 pub fn copysign(self, rhs: Self) -> Self {
494 let mask = Self::splat(-0.0);
495 Self(unsafe { _mm_or_ps(_mm_and_ps(rhs.0, mask.0), _mm_andnot_ps(mask.0, self.0)) })
496 }
497
498 #[inline]
506 #[must_use]
507 pub fn is_negative_bitmask(self) -> u32 {
508 unsafe { _mm_movemask_ps(self.0) as u32 }
509 }
510
511 #[inline]
514 #[must_use]
515 pub fn is_finite(self) -> bool {
516 self.is_finite_mask().all()
517 }
518
519 #[inline]
523 #[must_use]
524 pub fn is_finite_mask(self) -> BVec4A {
525 BVec4A(unsafe { _mm_cmplt_ps(crate::sse2::m128_abs(self.0), Self::INFINITY.0) })
526 }
527
528 #[inline]
530 #[must_use]
531 pub fn is_nan(self) -> bool {
532 self.is_nan_mask().any()
533 }
534
535 #[inline]
539 #[must_use]
540 pub fn is_nan_mask(self) -> BVec4A {
541 BVec4A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
542 }
543
544 #[doc(alias = "magnitude")]
546 #[inline]
547 #[must_use]
548 pub fn length(self) -> f32 {
549 unsafe {
550 let dot = dot4_in_x(self.0, self.0);
551 _mm_cvtss_f32(_mm_sqrt_ps(dot))
552 }
553 }
554
555 #[doc(alias = "magnitude2")]
559 #[inline]
560 #[must_use]
561 pub fn length_squared(self) -> f32 {
562 self.dot(self)
563 }
564
565 #[inline]
569 #[must_use]
570 pub fn length_recip(self) -> f32 {
571 unsafe {
572 let dot = dot4_in_x(self.0, self.0);
573 _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
574 }
575 }
576
577 #[inline]
579 #[must_use]
580 pub fn distance(self, rhs: Self) -> f32 {
581 (self - rhs).length()
582 }
583
584 #[inline]
586 #[must_use]
587 pub fn distance_squared(self, rhs: Self) -> f32 {
588 (self - rhs).length_squared()
589 }
590
591 #[inline]
593 #[must_use]
594 pub fn div_euclid(self, rhs: Self) -> Self {
595 Self::new(
596 math::div_euclid(self.x, rhs.x),
597 math::div_euclid(self.y, rhs.y),
598 math::div_euclid(self.z, rhs.z),
599 math::div_euclid(self.w, rhs.w),
600 )
601 }
602
603 #[inline]
607 #[must_use]
608 pub fn rem_euclid(self, rhs: Self) -> Self {
609 Self::new(
610 math::rem_euclid(self.x, rhs.x),
611 math::rem_euclid(self.y, rhs.y),
612 math::rem_euclid(self.z, rhs.z),
613 math::rem_euclid(self.w, rhs.w),
614 )
615 }
616
617 #[inline]
627 #[must_use]
628 pub fn normalize(self) -> Self {
629 unsafe {
630 let length = _mm_sqrt_ps(dot4_into_m128(self.0, self.0));
631 #[allow(clippy::let_and_return)]
632 let normalized = Self(_mm_div_ps(self.0, length));
633 glam_assert!(normalized.is_finite());
634 normalized
635 }
636 }
637
638 #[inline]
645 #[must_use]
646 pub fn try_normalize(self) -> Option<Self> {
647 let rcp = self.length_recip();
648 if rcp.is_finite() && rcp > 0.0 {
649 Some(self * rcp)
650 } else {
651 None
652 }
653 }
654
655 #[inline]
663 #[must_use]
664 pub fn normalize_or(self, fallback: Self) -> Self {
665 let rcp = self.length_recip();
666 if rcp.is_finite() && rcp > 0.0 {
667 self * rcp
668 } else {
669 fallback
670 }
671 }
672
673 #[inline]
680 #[must_use]
681 pub fn normalize_or_zero(self) -> Self {
682 self.normalize_or(Self::ZERO)
683 }
684
685 #[inline]
689 #[must_use]
690 pub fn normalize_and_length(self) -> (Self, f32) {
691 let length = self.length();
692 let rcp = 1.0 / length;
693 if rcp.is_finite() && rcp > 0.0 {
694 (self * rcp, length)
695 } else {
696 (Self::X, 0.0)
697 }
698 }
699
700 #[inline]
704 #[must_use]
705 pub fn is_normalized(self) -> bool {
706 math::abs(self.length_squared() - 1.0) <= 2e-4
707 }
708
709 #[inline]
717 #[must_use]
718 pub fn project_onto(self, rhs: Self) -> Self {
719 let other_len_sq_rcp = rhs.dot(rhs).recip();
720 glam_assert!(other_len_sq_rcp.is_finite());
721 rhs * self.dot(rhs) * other_len_sq_rcp
722 }
723
724 #[doc(alias("plane"))]
735 #[inline]
736 #[must_use]
737 pub fn reject_from(self, rhs: Self) -> Self {
738 self - self.project_onto(rhs)
739 }
740
741 #[inline]
749 #[must_use]
750 pub fn project_onto_normalized(self, rhs: Self) -> Self {
751 glam_assert!(rhs.is_normalized());
752 rhs * self.dot(rhs)
753 }
754
755 #[doc(alias("plane"))]
766 #[inline]
767 #[must_use]
768 pub fn reject_from_normalized(self, rhs: Self) -> Self {
769 self - self.project_onto_normalized(rhs)
770 }
771
772 #[inline]
775 #[must_use]
776 pub fn round(self) -> Self {
777 Self(unsafe { m128_round(self.0) })
778 }
779
780 #[inline]
783 #[must_use]
784 pub fn floor(self) -> Self {
785 Self(unsafe { m128_floor(self.0) })
786 }
787
788 #[inline]
791 #[must_use]
792 pub fn ceil(self) -> Self {
793 Self(unsafe { m128_ceil(self.0) })
794 }
795
796 #[inline]
799 #[must_use]
800 pub fn trunc(self) -> Self {
801 Self(unsafe { m128_trunc(self.0) })
802 }
803
804 #[inline]
808 #[must_use]
809 pub fn step(self, rhs: Self) -> Self {
810 Self::select(rhs.cmplt(self), Self::ZERO, Self::ONE)
811 }
812
813 #[inline]
815 #[must_use]
816 pub fn saturate(self) -> Self {
817 self.clamp(Self::ZERO, Self::ONE)
818 }
819
820 #[inline]
827 #[must_use]
828 pub fn fract(self) -> Self {
829 self - self.trunc()
830 }
831
832 #[inline]
839 #[must_use]
840 pub fn fract_gl(self) -> Self {
841 self - self.floor()
842 }
843
844 #[inline]
847 #[must_use]
848 pub fn exp(self) -> Self {
849 Self::new(
850 math::exp(self.x),
851 math::exp(self.y),
852 math::exp(self.z),
853 math::exp(self.w),
854 )
855 }
856
857 #[inline]
859 #[must_use]
860 pub fn exp2(self) -> Self {
861 Self::new(
862 math::exp2(self.x),
863 math::exp2(self.y),
864 math::exp2(self.z),
865 math::exp2(self.w),
866 )
867 }
868
869 #[inline]
872 #[must_use]
873 pub fn ln(self) -> Self {
874 Self::new(
875 math::ln(self.x),
876 math::ln(self.y),
877 math::ln(self.z),
878 math::ln(self.w),
879 )
880 }
881
882 #[inline]
885 #[must_use]
886 pub fn log2(self) -> Self {
887 Self::new(
888 math::log2(self.x),
889 math::log2(self.y),
890 math::log2(self.z),
891 math::log2(self.w),
892 )
893 }
894
895 #[inline]
897 #[must_use]
898 pub fn powf(self, n: f32) -> Self {
899 Self::new(
900 math::powf(self.x, n),
901 math::powf(self.y, n),
902 math::powf(self.z, n),
903 math::powf(self.w, n),
904 )
905 }
906
907 #[inline]
910 #[must_use]
911 pub fn sqrt(self) -> Self {
912 Self::new(
913 math::sqrt(self.x),
914 math::sqrt(self.y),
915 math::sqrt(self.z),
916 math::sqrt(self.w),
917 )
918 }
919
920 #[inline]
922 #[must_use]
923 pub fn cos(self) -> Self {
924 Self::new(
925 math::cos(self.x),
926 math::cos(self.y),
927 math::cos(self.z),
928 math::cos(self.w),
929 )
930 }
931
932 #[inline]
934 #[must_use]
935 pub fn sin(self) -> Self {
936 Self::new(
937 math::sin(self.x),
938 math::sin(self.y),
939 math::sin(self.z),
940 math::sin(self.w),
941 )
942 }
943
944 #[inline]
946 #[must_use]
947 pub fn sin_cos(self) -> (Self, Self) {
948 let (sin_x, cos_x) = math::sin_cos(self.x);
949 let (sin_y, cos_y) = math::sin_cos(self.y);
950 let (sin_z, cos_z) = math::sin_cos(self.z);
951 let (sin_w, cos_w) = math::sin_cos(self.w);
952
953 (
954 Self::new(sin_x, sin_y, sin_z, sin_w),
955 Self::new(cos_x, cos_y, cos_z, cos_w),
956 )
957 }
958
959 #[inline]
961 #[must_use]
962 pub fn recip(self) -> Self {
963 Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
964 }
965
966 #[doc(alias = "mix")]
972 #[inline]
973 #[must_use]
974 pub fn lerp(self, rhs: Self, s: f32) -> Self {
975 self * (1.0 - s) + rhs * s
976 }
977
978 #[inline]
983 #[must_use]
984 pub fn move_towards(self, rhs: Self, d: f32) -> Self {
985 let a = rhs - self;
986 let len = a.length();
987 if len <= d || len <= 1e-4 {
988 return rhs;
989 }
990 self + a / len * d
991 }
992
993 #[inline]
999 pub fn midpoint(self, rhs: Self) -> Self {
1000 (self + rhs) * 0.5
1001 }
1002
1003 #[inline]
1013 #[must_use]
1014 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
1015 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
1016 }
1017
1018 #[inline]
1024 #[must_use]
1025 pub fn clamp_length(self, min: f32, max: f32) -> Self {
1026 glam_assert!(0.0 <= min);
1027 glam_assert!(min <= max);
1028 let length_sq = self.length_squared();
1029 if length_sq < min * min {
1030 min * (self / math::sqrt(length_sq))
1031 } else if length_sq > max * max {
1032 max * (self / math::sqrt(length_sq))
1033 } else {
1034 self
1035 }
1036 }
1037
1038 #[inline]
1044 #[must_use]
1045 pub fn clamp_length_max(self, max: f32) -> Self {
1046 glam_assert!(0.0 <= max);
1047 let length_sq = self.length_squared();
1048 if length_sq > max * max {
1049 max * (self / math::sqrt(length_sq))
1050 } else {
1051 self
1052 }
1053 }
1054
1055 #[inline]
1061 #[must_use]
1062 pub fn clamp_length_min(self, min: f32) -> Self {
1063 glam_assert!(0.0 <= min);
1064 let length_sq = self.length_squared();
1065 if length_sq < min * min {
1066 min * (self / math::sqrt(length_sq))
1067 } else {
1068 self
1069 }
1070 }
1071
1072 #[inline]
1080 #[must_use]
1081 pub fn mul_add(self, a: Self, b: Self) -> Self {
1082 #[cfg(target_feature = "fma")]
1083 unsafe {
1084 Self(_mm_fmadd_ps(self.0, a.0, b.0))
1085 }
1086 #[cfg(not(target_feature = "fma"))]
1087 Self::new(
1088 math::mul_add(self.x, a.x, b.x),
1089 math::mul_add(self.y, a.y, b.y),
1090 math::mul_add(self.z, a.z, b.z),
1091 math::mul_add(self.w, a.w, b.w),
1092 )
1093 }
1094
1095 #[inline]
1104 #[must_use]
1105 pub fn reflect(self, normal: Self) -> Self {
1106 glam_assert!(normal.is_normalized());
1107 self - 2.0 * self.dot(normal) * normal
1108 }
1109
1110 #[inline]
1120 #[must_use]
1121 pub fn refract(self, normal: Self, eta: f32) -> Self {
1122 glam_assert!(self.is_normalized());
1123 glam_assert!(normal.is_normalized());
1124 let n_dot_i = normal.dot(self);
1125 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1126 if k >= 0.0 {
1127 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1128 } else {
1129 Self::ZERO
1130 }
1131 }
1132
1133 #[inline]
1135 #[must_use]
1136 pub fn as_dvec4(self) -> crate::DVec4 {
1137 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
1138 }
1139
1140 #[inline]
1142 #[must_use]
1143 pub fn as_i8vec4(self) -> crate::I8Vec4 {
1144 crate::I8Vec4::new(self.x as i8, self.y as i8, self.z as i8, self.w as i8)
1145 }
1146
1147 #[inline]
1149 #[must_use]
1150 pub fn as_u8vec4(self) -> crate::U8Vec4 {
1151 crate::U8Vec4::new(self.x as u8, self.y as u8, self.z as u8, self.w as u8)
1152 }
1153
1154 #[inline]
1156 #[must_use]
1157 pub fn as_i16vec4(self) -> crate::I16Vec4 {
1158 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
1159 }
1160
1161 #[inline]
1163 #[must_use]
1164 pub fn as_u16vec4(self) -> crate::U16Vec4 {
1165 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
1166 }
1167
1168 #[inline]
1170 #[must_use]
1171 pub fn as_ivec4(self) -> crate::IVec4 {
1172 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
1173 }
1174
1175 #[inline]
1177 #[must_use]
1178 pub fn as_uvec4(self) -> crate::UVec4 {
1179 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
1180 }
1181
1182 #[inline]
1184 #[must_use]
1185 pub fn as_i64vec4(self) -> crate::I64Vec4 {
1186 crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
1187 }
1188
1189 #[inline]
1191 #[must_use]
1192 pub fn as_u64vec4(self) -> crate::U64Vec4 {
1193 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
1194 }
1195
1196 #[inline]
1198 #[must_use]
1199 pub fn as_isizevec4(self) -> crate::ISizeVec4 {
1200 crate::ISizeVec4::new(
1201 self.x as isize,
1202 self.y as isize,
1203 self.z as isize,
1204 self.w as isize,
1205 )
1206 }
1207
1208 #[inline]
1210 #[must_use]
1211 pub fn as_usizevec4(self) -> crate::USizeVec4 {
1212 crate::USizeVec4::new(
1213 self.x as usize,
1214 self.y as usize,
1215 self.z as usize,
1216 self.w as usize,
1217 )
1218 }
1219}
1220
1221impl Default for Vec4 {
1222 #[inline(always)]
1223 fn default() -> Self {
1224 Self::ZERO
1225 }
1226}
1227
1228impl PartialEq for Vec4 {
1229 #[inline]
1230 fn eq(&self, rhs: &Self) -> bool {
1231 self.cmpeq(*rhs).all()
1232 }
1233}
1234
1235impl Div for Vec4 {
1236 type Output = Self;
1237 #[inline]
1238 fn div(self, rhs: Self) -> Self {
1239 Self(unsafe { _mm_div_ps(self.0, rhs.0) })
1240 }
1241}
1242
1243impl Div<&Self> for Vec4 {
1244 type Output = Self;
1245 #[inline]
1246 fn div(self, rhs: &Self) -> Self {
1247 self.div(*rhs)
1248 }
1249}
1250
1251impl Div<&Vec4> for &Vec4 {
1252 type Output = Vec4;
1253 #[inline]
1254 fn div(self, rhs: &Vec4) -> Vec4 {
1255 (*self).div(*rhs)
1256 }
1257}
1258
1259impl Div<Vec4> for &Vec4 {
1260 type Output = Vec4;
1261 #[inline]
1262 fn div(self, rhs: Vec4) -> Vec4 {
1263 (*self).div(rhs)
1264 }
1265}
1266
1267impl DivAssign for Vec4 {
1268 #[inline]
1269 fn div_assign(&mut self, rhs: Self) {
1270 self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
1271 }
1272}
1273
1274impl DivAssign<&Self> for Vec4 {
1275 #[inline]
1276 fn div_assign(&mut self, rhs: &Self) {
1277 self.div_assign(*rhs);
1278 }
1279}
1280
1281impl Div<f32> for Vec4 {
1282 type Output = Self;
1283 #[inline]
1284 fn div(self, rhs: f32) -> Self {
1285 Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
1286 }
1287}
1288
1289impl Div<&f32> for Vec4 {
1290 type Output = Self;
1291 #[inline]
1292 fn div(self, rhs: &f32) -> Self {
1293 self.div(*rhs)
1294 }
1295}
1296
1297impl Div<&f32> for &Vec4 {
1298 type Output = Vec4;
1299 #[inline]
1300 fn div(self, rhs: &f32) -> Vec4 {
1301 (*self).div(*rhs)
1302 }
1303}
1304
1305impl Div<f32> for &Vec4 {
1306 type Output = Vec4;
1307 #[inline]
1308 fn div(self, rhs: f32) -> Vec4 {
1309 (*self).div(rhs)
1310 }
1311}
1312
1313impl DivAssign<f32> for Vec4 {
1314 #[inline]
1315 fn div_assign(&mut self, rhs: f32) {
1316 self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
1317 }
1318}
1319
1320impl DivAssign<&f32> for Vec4 {
1321 #[inline]
1322 fn div_assign(&mut self, rhs: &f32) {
1323 self.div_assign(*rhs);
1324 }
1325}
1326
1327impl Div<Vec4> for f32 {
1328 type Output = Vec4;
1329 #[inline]
1330 fn div(self, rhs: Vec4) -> Vec4 {
1331 Vec4(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
1332 }
1333}
1334
1335impl Div<&Vec4> for f32 {
1336 type Output = Vec4;
1337 #[inline]
1338 fn div(self, rhs: &Vec4) -> Vec4 {
1339 self.div(*rhs)
1340 }
1341}
1342
1343impl Div<&Vec4> for &f32 {
1344 type Output = Vec4;
1345 #[inline]
1346 fn div(self, rhs: &Vec4) -> Vec4 {
1347 (*self).div(*rhs)
1348 }
1349}
1350
1351impl Div<Vec4> for &f32 {
1352 type Output = Vec4;
1353 #[inline]
1354 fn div(self, rhs: Vec4) -> Vec4 {
1355 (*self).div(rhs)
1356 }
1357}
1358
1359impl Mul for Vec4 {
1360 type Output = Self;
1361 #[inline]
1362 fn mul(self, rhs: Self) -> Self {
1363 Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
1364 }
1365}
1366
1367impl Mul<&Self> for Vec4 {
1368 type Output = Self;
1369 #[inline]
1370 fn mul(self, rhs: &Self) -> Self {
1371 self.mul(*rhs)
1372 }
1373}
1374
1375impl Mul<&Vec4> for &Vec4 {
1376 type Output = Vec4;
1377 #[inline]
1378 fn mul(self, rhs: &Vec4) -> Vec4 {
1379 (*self).mul(*rhs)
1380 }
1381}
1382
1383impl Mul<Vec4> for &Vec4 {
1384 type Output = Vec4;
1385 #[inline]
1386 fn mul(self, rhs: Vec4) -> Vec4 {
1387 (*self).mul(rhs)
1388 }
1389}
1390
1391impl MulAssign for Vec4 {
1392 #[inline]
1393 fn mul_assign(&mut self, rhs: Self) {
1394 self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
1395 }
1396}
1397
1398impl MulAssign<&Self> for Vec4 {
1399 #[inline]
1400 fn mul_assign(&mut self, rhs: &Self) {
1401 self.mul_assign(*rhs);
1402 }
1403}
1404
1405impl Mul<f32> for Vec4 {
1406 type Output = Self;
1407 #[inline]
1408 fn mul(self, rhs: f32) -> Self {
1409 Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
1410 }
1411}
1412
1413impl Mul<&f32> for Vec4 {
1414 type Output = Self;
1415 #[inline]
1416 fn mul(self, rhs: &f32) -> Self {
1417 self.mul(*rhs)
1418 }
1419}
1420
1421impl Mul<&f32> for &Vec4 {
1422 type Output = Vec4;
1423 #[inline]
1424 fn mul(self, rhs: &f32) -> Vec4 {
1425 (*self).mul(*rhs)
1426 }
1427}
1428
1429impl Mul<f32> for &Vec4 {
1430 type Output = Vec4;
1431 #[inline]
1432 fn mul(self, rhs: f32) -> Vec4 {
1433 (*self).mul(rhs)
1434 }
1435}
1436
1437impl MulAssign<f32> for Vec4 {
1438 #[inline]
1439 fn mul_assign(&mut self, rhs: f32) {
1440 self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
1441 }
1442}
1443
1444impl MulAssign<&f32> for Vec4 {
1445 #[inline]
1446 fn mul_assign(&mut self, rhs: &f32) {
1447 self.mul_assign(*rhs);
1448 }
1449}
1450
1451impl Mul<Vec4> for f32 {
1452 type Output = Vec4;
1453 #[inline]
1454 fn mul(self, rhs: Vec4) -> Vec4 {
1455 Vec4(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
1456 }
1457}
1458
1459impl Mul<&Vec4> for f32 {
1460 type Output = Vec4;
1461 #[inline]
1462 fn mul(self, rhs: &Vec4) -> Vec4 {
1463 self.mul(*rhs)
1464 }
1465}
1466
1467impl Mul<&Vec4> for &f32 {
1468 type Output = Vec4;
1469 #[inline]
1470 fn mul(self, rhs: &Vec4) -> Vec4 {
1471 (*self).mul(*rhs)
1472 }
1473}
1474
1475impl Mul<Vec4> for &f32 {
1476 type Output = Vec4;
1477 #[inline]
1478 fn mul(self, rhs: Vec4) -> Vec4 {
1479 (*self).mul(rhs)
1480 }
1481}
1482
1483impl Add for Vec4 {
1484 type Output = Self;
1485 #[inline]
1486 fn add(self, rhs: Self) -> Self {
1487 Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1488 }
1489}
1490
1491impl Add<&Self> for Vec4 {
1492 type Output = Self;
1493 #[inline]
1494 fn add(self, rhs: &Self) -> Self {
1495 self.add(*rhs)
1496 }
1497}
1498
1499impl Add<&Vec4> for &Vec4 {
1500 type Output = Vec4;
1501 #[inline]
1502 fn add(self, rhs: &Vec4) -> Vec4 {
1503 (*self).add(*rhs)
1504 }
1505}
1506
1507impl Add<Vec4> for &Vec4 {
1508 type Output = Vec4;
1509 #[inline]
1510 fn add(self, rhs: Vec4) -> Vec4 {
1511 (*self).add(rhs)
1512 }
1513}
1514
1515impl AddAssign for Vec4 {
1516 #[inline]
1517 fn add_assign(&mut self, rhs: Self) {
1518 self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1519 }
1520}
1521
1522impl AddAssign<&Self> for Vec4 {
1523 #[inline]
1524 fn add_assign(&mut self, rhs: &Self) {
1525 self.add_assign(*rhs);
1526 }
1527}
1528
1529impl Add<f32> for Vec4 {
1530 type Output = Self;
1531 #[inline]
1532 fn add(self, rhs: f32) -> Self {
1533 Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1534 }
1535}
1536
1537impl Add<&f32> for Vec4 {
1538 type Output = Self;
1539 #[inline]
1540 fn add(self, rhs: &f32) -> Self {
1541 self.add(*rhs)
1542 }
1543}
1544
1545impl Add<&f32> for &Vec4 {
1546 type Output = Vec4;
1547 #[inline]
1548 fn add(self, rhs: &f32) -> Vec4 {
1549 (*self).add(*rhs)
1550 }
1551}
1552
1553impl Add<f32> for &Vec4 {
1554 type Output = Vec4;
1555 #[inline]
1556 fn add(self, rhs: f32) -> Vec4 {
1557 (*self).add(rhs)
1558 }
1559}
1560
1561impl AddAssign<f32> for Vec4 {
1562 #[inline]
1563 fn add_assign(&mut self, rhs: f32) {
1564 self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1565 }
1566}
1567
1568impl AddAssign<&f32> for Vec4 {
1569 #[inline]
1570 fn add_assign(&mut self, rhs: &f32) {
1571 self.add_assign(*rhs);
1572 }
1573}
1574
1575impl Add<Vec4> for f32 {
1576 type Output = Vec4;
1577 #[inline]
1578 fn add(self, rhs: Vec4) -> Vec4 {
1579 Vec4(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1580 }
1581}
1582
1583impl Add<&Vec4> for f32 {
1584 type Output = Vec4;
1585 #[inline]
1586 fn add(self, rhs: &Vec4) -> Vec4 {
1587 self.add(*rhs)
1588 }
1589}
1590
1591impl Add<&Vec4> for &f32 {
1592 type Output = Vec4;
1593 #[inline]
1594 fn add(self, rhs: &Vec4) -> Vec4 {
1595 (*self).add(*rhs)
1596 }
1597}
1598
1599impl Add<Vec4> for &f32 {
1600 type Output = Vec4;
1601 #[inline]
1602 fn add(self, rhs: Vec4) -> Vec4 {
1603 (*self).add(rhs)
1604 }
1605}
1606
1607impl Sub for Vec4 {
1608 type Output = Self;
1609 #[inline]
1610 fn sub(self, rhs: Self) -> Self {
1611 Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1612 }
1613}
1614
1615impl Sub<&Self> for Vec4 {
1616 type Output = Self;
1617 #[inline]
1618 fn sub(self, rhs: &Self) -> Self {
1619 self.sub(*rhs)
1620 }
1621}
1622
1623impl Sub<&Vec4> for &Vec4 {
1624 type Output = Vec4;
1625 #[inline]
1626 fn sub(self, rhs: &Vec4) -> Vec4 {
1627 (*self).sub(*rhs)
1628 }
1629}
1630
1631impl Sub<Vec4> for &Vec4 {
1632 type Output = Vec4;
1633 #[inline]
1634 fn sub(self, rhs: Vec4) -> Vec4 {
1635 (*self).sub(rhs)
1636 }
1637}
1638
1639impl SubAssign for Vec4 {
1640 #[inline]
1641 fn sub_assign(&mut self, rhs: Self) {
1642 self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1643 }
1644}
1645
1646impl SubAssign<&Self> for Vec4 {
1647 #[inline]
1648 fn sub_assign(&mut self, rhs: &Self) {
1649 self.sub_assign(*rhs);
1650 }
1651}
1652
1653impl Sub<f32> for Vec4 {
1654 type Output = Self;
1655 #[inline]
1656 fn sub(self, rhs: f32) -> Self {
1657 Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1658 }
1659}
1660
1661impl Sub<&f32> for Vec4 {
1662 type Output = Self;
1663 #[inline]
1664 fn sub(self, rhs: &f32) -> Self {
1665 self.sub(*rhs)
1666 }
1667}
1668
1669impl Sub<&f32> for &Vec4 {
1670 type Output = Vec4;
1671 #[inline]
1672 fn sub(self, rhs: &f32) -> Vec4 {
1673 (*self).sub(*rhs)
1674 }
1675}
1676
1677impl Sub<f32> for &Vec4 {
1678 type Output = Vec4;
1679 #[inline]
1680 fn sub(self, rhs: f32) -> Vec4 {
1681 (*self).sub(rhs)
1682 }
1683}
1684
1685impl SubAssign<f32> for Vec4 {
1686 #[inline]
1687 fn sub_assign(&mut self, rhs: f32) {
1688 self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1689 }
1690}
1691
1692impl SubAssign<&f32> for Vec4 {
1693 #[inline]
1694 fn sub_assign(&mut self, rhs: &f32) {
1695 self.sub_assign(*rhs);
1696 }
1697}
1698
1699impl Sub<Vec4> for f32 {
1700 type Output = Vec4;
1701 #[inline]
1702 fn sub(self, rhs: Vec4) -> Vec4 {
1703 Vec4(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1704 }
1705}
1706
1707impl Sub<&Vec4> for f32 {
1708 type Output = Vec4;
1709 #[inline]
1710 fn sub(self, rhs: &Vec4) -> Vec4 {
1711 self.sub(*rhs)
1712 }
1713}
1714
1715impl Sub<&Vec4> for &f32 {
1716 type Output = Vec4;
1717 #[inline]
1718 fn sub(self, rhs: &Vec4) -> Vec4 {
1719 (*self).sub(*rhs)
1720 }
1721}
1722
1723impl Sub<Vec4> for &f32 {
1724 type Output = Vec4;
1725 #[inline]
1726 fn sub(self, rhs: Vec4) -> Vec4 {
1727 (*self).sub(rhs)
1728 }
1729}
1730
1731impl Rem for Vec4 {
1732 type Output = Self;
1733 #[inline]
1734 fn rem(self, rhs: Self) -> Self {
1735 unsafe {
1736 let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1737 Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1738 }
1739 }
1740}
1741
1742impl Rem<&Self> for Vec4 {
1743 type Output = Self;
1744 #[inline]
1745 fn rem(self, rhs: &Self) -> Self {
1746 self.rem(*rhs)
1747 }
1748}
1749
1750impl Rem<&Vec4> for &Vec4 {
1751 type Output = Vec4;
1752 #[inline]
1753 fn rem(self, rhs: &Vec4) -> Vec4 {
1754 (*self).rem(*rhs)
1755 }
1756}
1757
1758impl Rem<Vec4> for &Vec4 {
1759 type Output = Vec4;
1760 #[inline]
1761 fn rem(self, rhs: Vec4) -> Vec4 {
1762 (*self).rem(rhs)
1763 }
1764}
1765
1766impl RemAssign for Vec4 {
1767 #[inline]
1768 fn rem_assign(&mut self, rhs: Self) {
1769 *self = self.rem(rhs);
1770 }
1771}
1772
1773impl RemAssign<&Self> for Vec4 {
1774 #[inline]
1775 fn rem_assign(&mut self, rhs: &Self) {
1776 self.rem_assign(*rhs);
1777 }
1778}
1779
1780impl Rem<f32> for Vec4 {
1781 type Output = Self;
1782 #[inline]
1783 fn rem(self, rhs: f32) -> Self {
1784 self.rem(Self::splat(rhs))
1785 }
1786}
1787
1788impl Rem<&f32> for Vec4 {
1789 type Output = Self;
1790 #[inline]
1791 fn rem(self, rhs: &f32) -> Self {
1792 self.rem(*rhs)
1793 }
1794}
1795
1796impl Rem<&f32> for &Vec4 {
1797 type Output = Vec4;
1798 #[inline]
1799 fn rem(self, rhs: &f32) -> Vec4 {
1800 (*self).rem(*rhs)
1801 }
1802}
1803
1804impl Rem<f32> for &Vec4 {
1805 type Output = Vec4;
1806 #[inline]
1807 fn rem(self, rhs: f32) -> Vec4 {
1808 (*self).rem(rhs)
1809 }
1810}
1811
1812impl RemAssign<f32> for Vec4 {
1813 #[inline]
1814 fn rem_assign(&mut self, rhs: f32) {
1815 *self = self.rem(Self::splat(rhs));
1816 }
1817}
1818
1819impl RemAssign<&f32> for Vec4 {
1820 #[inline]
1821 fn rem_assign(&mut self, rhs: &f32) {
1822 self.rem_assign(*rhs);
1823 }
1824}
1825
1826impl Rem<Vec4> for f32 {
1827 type Output = Vec4;
1828 #[inline]
1829 fn rem(self, rhs: Vec4) -> Vec4 {
1830 Vec4::splat(self).rem(rhs)
1831 }
1832}
1833
1834impl Rem<&Vec4> for f32 {
1835 type Output = Vec4;
1836 #[inline]
1837 fn rem(self, rhs: &Vec4) -> Vec4 {
1838 self.rem(*rhs)
1839 }
1840}
1841
1842impl Rem<&Vec4> for &f32 {
1843 type Output = Vec4;
1844 #[inline]
1845 fn rem(self, rhs: &Vec4) -> Vec4 {
1846 (*self).rem(*rhs)
1847 }
1848}
1849
1850impl Rem<Vec4> for &f32 {
1851 type Output = Vec4;
1852 #[inline]
1853 fn rem(self, rhs: Vec4) -> Vec4 {
1854 (*self).rem(rhs)
1855 }
1856}
1857
1858impl AsRef<[f32; 4]> for Vec4 {
1859 #[inline]
1860 fn as_ref(&self) -> &[f32; 4] {
1861 unsafe { &*(self as *const Self as *const [f32; 4]) }
1862 }
1863}
1864
1865impl AsMut<[f32; 4]> for Vec4 {
1866 #[inline]
1867 fn as_mut(&mut self) -> &mut [f32; 4] {
1868 unsafe { &mut *(self as *mut Self as *mut [f32; 4]) }
1869 }
1870}
1871
1872impl Sum for Vec4 {
1873 #[inline]
1874 fn sum<I>(iter: I) -> Self
1875 where
1876 I: Iterator<Item = Self>,
1877 {
1878 iter.fold(Self::ZERO, Self::add)
1879 }
1880}
1881
1882impl<'a> Sum<&'a Self> for Vec4 {
1883 #[inline]
1884 fn sum<I>(iter: I) -> Self
1885 where
1886 I: Iterator<Item = &'a Self>,
1887 {
1888 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1889 }
1890}
1891
1892impl Product for Vec4 {
1893 #[inline]
1894 fn product<I>(iter: I) -> Self
1895 where
1896 I: Iterator<Item = Self>,
1897 {
1898 iter.fold(Self::ONE, Self::mul)
1899 }
1900}
1901
1902impl<'a> Product<&'a Self> for Vec4 {
1903 #[inline]
1904 fn product<I>(iter: I) -> Self
1905 where
1906 I: Iterator<Item = &'a Self>,
1907 {
1908 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1909 }
1910}
1911
1912impl Neg for Vec4 {
1913 type Output = Self;
1914 #[inline]
1915 fn neg(self) -> Self {
1916 Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1917 }
1918}
1919
1920impl Neg for &Vec4 {
1921 type Output = Vec4;
1922 #[inline]
1923 fn neg(self) -> Vec4 {
1924 (*self).neg()
1925 }
1926}
1927
1928impl Index<usize> for Vec4 {
1929 type Output = f32;
1930 #[inline]
1931 fn index(&self, index: usize) -> &Self::Output {
1932 match index {
1933 0 => &self.x,
1934 1 => &self.y,
1935 2 => &self.z,
1936 3 => &self.w,
1937 _ => panic!("index out of bounds"),
1938 }
1939 }
1940}
1941
1942impl IndexMut<usize> for Vec4 {
1943 #[inline]
1944 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1945 match index {
1946 0 => &mut self.x,
1947 1 => &mut self.y,
1948 2 => &mut self.z,
1949 3 => &mut self.w,
1950 _ => panic!("index out of bounds"),
1951 }
1952 }
1953}
1954
1955impl fmt::Display for Vec4 {
1956 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1957 if let Some(p) = f.precision() {
1958 write!(
1959 f,
1960 "[{:.*}, {:.*}, {:.*}, {:.*}]",
1961 p, self.x, p, self.y, p, self.z, p, self.w
1962 )
1963 } else {
1964 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1965 }
1966 }
1967}
1968
1969impl fmt::Debug for Vec4 {
1970 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1971 fmt.debug_tuple(stringify!(Vec4))
1972 .field(&self.x)
1973 .field(&self.y)
1974 .field(&self.z)
1975 .field(&self.w)
1976 .finish()
1977 }
1978}
1979
1980impl From<Vec4> for __m128 {
1981 #[inline(always)]
1982 fn from(t: Vec4) -> Self {
1983 t.0
1984 }
1985}
1986
1987impl From<__m128> for Vec4 {
1988 #[inline(always)]
1989 fn from(t: __m128) -> Self {
1990 Self(t)
1991 }
1992}
1993
1994impl From<[f32; 4]> for Vec4 {
1995 #[inline]
1996 fn from(a: [f32; 4]) -> Self {
1997 Self(unsafe { _mm_loadu_ps(a.as_ptr()) })
1998 }
1999}
2000
2001impl From<Vec4> for [f32; 4] {
2002 #[inline]
2003 fn from(v: Vec4) -> Self {
2004 use crate::Align16;
2005 use core::mem::MaybeUninit;
2006 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2007 unsafe {
2008 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
2009 out.assume_init().0
2010 }
2011 }
2012}
2013
2014impl From<(f32, f32, f32, f32)> for Vec4 {
2015 #[inline]
2016 fn from(t: (f32, f32, f32, f32)) -> Self {
2017 Self::new(t.0, t.1, t.2, t.3)
2018 }
2019}
2020
2021impl From<Vec4> for (f32, f32, f32, f32) {
2022 #[inline]
2023 fn from(v: Vec4) -> Self {
2024 (v.x, v.y, v.z, v.w)
2025 }
2026}
2027
2028impl From<(Vec3A, f32)> for Vec4 {
2029 #[inline]
2030 fn from((v, w): (Vec3A, f32)) -> Self {
2031 v.extend(w)
2032 }
2033}
2034
2035impl From<(f32, Vec3A)> for Vec4 {
2036 #[inline]
2037 fn from((x, v): (f32, Vec3A)) -> Self {
2038 Self::new(x, v.x, v.y, v.z)
2039 }
2040}
2041
2042impl From<(Vec3, f32)> for Vec4 {
2043 #[inline]
2044 fn from((v, w): (Vec3, f32)) -> Self {
2045 Self::new(v.x, v.y, v.z, w)
2046 }
2047}
2048
2049impl From<(f32, Vec3)> for Vec4 {
2050 #[inline]
2051 fn from((x, v): (f32, Vec3)) -> Self {
2052 Self::new(x, v.x, v.y, v.z)
2053 }
2054}
2055
2056impl From<(Vec2, f32, f32)> for Vec4 {
2057 #[inline]
2058 fn from((v, z, w): (Vec2, f32, f32)) -> Self {
2059 Self::new(v.x, v.y, z, w)
2060 }
2061}
2062
2063impl From<(Vec2, Vec2)> for Vec4 {
2064 #[inline]
2065 fn from((v, u): (Vec2, Vec2)) -> Self {
2066 Self::new(v.x, v.y, u.x, u.y)
2067 }
2068}
2069
2070impl Deref for Vec4 {
2071 type Target = crate::deref::Vec4<f32>;
2072 #[inline]
2073 fn deref(&self) -> &Self::Target {
2074 unsafe { &*(self as *const Self).cast() }
2075 }
2076}
2077
2078impl DerefMut for Vec4 {
2079 #[inline]
2080 fn deref_mut(&mut self) -> &mut Self::Target {
2081 unsafe { &mut *(self as *mut Self).cast() }
2082 }
2083}
2084
2085impl From<BVec4> for Vec4 {
2086 #[inline]
2087 fn from(v: BVec4) -> Self {
2088 Self::new(
2089 f32::from(v.x),
2090 f32::from(v.y),
2091 f32::from(v.z),
2092 f32::from(v.w),
2093 )
2094 }
2095}
2096
2097#[cfg(not(feature = "scalar-math"))]
2098impl From<BVec4A> for Vec4 {
2099 #[inline]
2100 fn from(v: BVec4A) -> Self {
2101 let bool_array: [bool; 4] = v.into();
2102 Self::new(
2103 f32::from(bool_array[0]),
2104 f32::from(bool_array[1]),
2105 f32::from(bool_array[2]),
2106 f32::from(bool_array[3]),
2107 )
2108 }
2109}