1use crate::{f32::math, sse2::*, BVec3, BVec3A, FloatExt, Quat, Vec2, Vec3, Vec4};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[cfg(target_arch = "x86")]
10use core::arch::x86::*;
11#[cfg(target_arch = "x86_64")]
12use core::arch::x86_64::*;
13
14#[cfg(feature = "zerocopy")]
15use zerocopy_derive::*;
16
17#[repr(C)]
18union UnionCast {
19 a: [f32; 4],
20 v: Vec3A,
21}
22
23#[inline(always)]
25#[must_use]
26pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
27 Vec3A::new(x, y, z)
28}
29
30#[derive(Clone, Copy)]
40#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
41#[cfg_attr(
42 feature = "zerocopy",
43 derive(FromBytes, Immutable, IntoBytes, KnownLayout)
44)]
45#[repr(transparent)]
46pub struct Vec3A(pub(crate) __m128);
47
48impl Vec3A {
49 pub const ZERO: Self = Self::splat(0.0);
51
52 pub const ONE: Self = Self::splat(1.0);
54
55 pub const NEG_ONE: Self = Self::splat(-1.0);
57
58 pub const MIN: Self = Self::splat(f32::MIN);
60
61 pub const MAX: Self = Self::splat(f32::MAX);
63
64 pub const NAN: Self = Self::splat(f32::NAN);
66
67 pub const INFINITY: Self = Self::splat(f32::INFINITY);
69
70 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
72
73 pub const X: Self = Self::new(1.0, 0.0, 0.0);
75
76 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
78
79 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
81
82 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
84
85 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
87
88 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
90
91 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
93
94 pub const USES_CORE_SIMD: bool = false;
96 pub const USES_NEON: bool = false;
98 pub const USES_SCALAR_MATH: bool = false;
100 pub const USES_SSE2: bool = true;
102 pub const USES_WASM32_SIMD: bool = false;
104
105 #[inline(always)]
107 #[must_use]
108 pub const fn new(x: f32, y: f32, z: f32) -> Self {
109 unsafe { UnionCast { a: [x, y, z, z] }.v }
110 }
111
112 #[inline]
114 #[must_use]
115 pub const fn splat(v: f32) -> Self {
116 unsafe { UnionCast { a: [v; 4] }.v }
117 }
118
119 #[inline]
121 #[must_use]
122 pub fn map<F>(self, f: F) -> Self
123 where
124 F: Fn(f32) -> f32,
125 {
126 Self::new(f(self.x), f(self.y), f(self.z))
127 }
128
129 #[inline]
135 #[must_use]
136 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
137 Self(unsafe {
138 _mm_or_ps(
139 _mm_andnot_ps(mask.0, if_false.0),
140 _mm_and_ps(if_true.0, mask.0),
141 )
142 })
143 }
144
145 #[inline]
147 #[must_use]
148 pub const fn from_array(a: [f32; 3]) -> Self {
149 Self::new(a[0], a[1], a[2])
150 }
151
152 #[inline]
154 #[must_use]
155 pub const fn to_array(&self) -> [f32; 3] {
156 unsafe { *(self as *const Self as *const [f32; 3]) }
157 }
158
159 #[inline]
165 #[must_use]
166 pub const fn from_slice(slice: &[f32]) -> Self {
167 assert!(slice.len() >= 3);
168 Self::new(slice[0], slice[1], slice[2])
169 }
170
171 #[inline]
177 pub fn write_to_slice(self, slice: &mut [f32]) {
178 slice[..3].copy_from_slice(&self.to_array());
179 }
180
181 #[inline]
185 #[must_use]
186 pub fn from_vec4(v: Vec4) -> Self {
187 Self(v.0)
188 }
189
190 #[inline]
192 #[must_use]
193 pub fn extend(self, w: f32) -> Vec4 {
194 Vec4::new(self.x, self.y, self.z, w)
195 }
196
197 #[inline]
201 #[must_use]
202 pub fn truncate(self) -> Vec2 {
203 use crate::swizzles::Vec3Swizzles;
204 self.xy()
205 }
206
207 #[inline]
209 #[must_use]
210 pub fn to_vec3(self) -> Vec3 {
211 Vec3::from(self)
212 }
213
214 #[inline]
216 #[must_use]
217 pub fn with_x(mut self, x: f32) -> Self {
218 self.x = x;
219 self
220 }
221
222 #[inline]
224 #[must_use]
225 pub fn with_y(mut self, y: f32) -> Self {
226 self.y = y;
227 self
228 }
229
230 #[inline]
232 #[must_use]
233 pub fn with_z(mut self, z: f32) -> Self {
234 self.z = z;
235 self
236 }
237
238 #[inline]
240 #[must_use]
241 pub fn dot(self, rhs: Self) -> f32 {
242 unsafe { dot3(self.0, rhs.0) }
243 }
244
245 #[inline]
247 #[must_use]
248 pub fn dot_into_vec(self, rhs: Self) -> Self {
249 Self(unsafe { dot3_into_m128(self.0, rhs.0) })
250 }
251
252 #[inline]
254 #[must_use]
255 pub fn cross(self, rhs: Self) -> Self {
256 unsafe {
257 let lhszxy = _mm_shuffle_ps(self.0, self.0, 0b01_01_00_10);
263 let rhszxy = _mm_shuffle_ps(rhs.0, rhs.0, 0b01_01_00_10);
264 let lhszxy_rhs = _mm_mul_ps(lhszxy, rhs.0);
265 let rhszxy_lhs = _mm_mul_ps(rhszxy, self.0);
266 let sub = _mm_sub_ps(lhszxy_rhs, rhszxy_lhs);
267 Self(_mm_shuffle_ps(sub, sub, 0b01_01_00_10))
268 }
269 }
270
271 #[inline]
278 #[must_use]
279 pub fn min(self, rhs: Self) -> Self {
280 Self(unsafe { _mm_min_ps(self.0, rhs.0) })
281 }
282
283 #[inline]
290 #[must_use]
291 pub fn max(self, rhs: Self) -> Self {
292 Self(unsafe { _mm_max_ps(self.0, rhs.0) })
293 }
294
295 #[inline]
306 #[must_use]
307 pub fn clamp(self, min: Self, max: Self) -> Self {
308 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
309 self.max(min).min(max)
310 }
311
312 #[inline]
319 #[must_use]
320 pub fn min_element(self) -> f32 {
321 unsafe {
322 let v = self.0;
323 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b01_01_10_10));
324 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
325 _mm_cvtss_f32(v)
326 }
327 }
328
329 #[inline]
336 #[must_use]
337 pub fn max_element(self) -> f32 {
338 unsafe {
339 let v = self.0;
340 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_10_10));
341 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
342 _mm_cvtss_f32(v)
343 }
344 }
345
346 #[doc(alias = "argmin")]
348 #[inline]
349 #[must_use]
350 pub fn min_position(self) -> usize {
351 let mut min = self.x;
352 let mut index = 0;
353 if self.y < min {
354 min = self.y;
355 index = 1;
356 }
357 if self.z < min {
358 index = 2;
359 }
360 index
361 }
362
363 #[doc(alias = "argmax")]
365 #[inline]
366 #[must_use]
367 pub fn max_position(self) -> usize {
368 let mut max = self.x;
369 let mut index = 0;
370 if self.y > max {
371 max = self.y;
372 index = 1;
373 }
374 if self.z > max {
375 index = 2;
376 }
377 index
378 }
379
380 #[inline]
384 #[must_use]
385 pub fn element_sum(self) -> f32 {
386 unsafe {
387 let v = self.0;
388 let v = _mm_add_ps(v, _mm_shuffle_ps(v, Self::ZERO.0, 0b00_11_00_01));
389 let v = _mm_add_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
390 _mm_cvtss_f32(v)
391 }
392 }
393
394 #[inline]
398 #[must_use]
399 pub fn element_product(self) -> f32 {
400 unsafe {
401 let v = self.0;
402 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, Self::ONE.0, 0b00_11_00_01));
403 let v = _mm_mul_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_10));
404 _mm_cvtss_f32(v)
405 }
406 }
407
408 #[inline]
414 #[must_use]
415 pub fn cmpeq(self, rhs: Self) -> BVec3A {
416 BVec3A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
417 }
418
419 #[inline]
425 #[must_use]
426 pub fn cmpne(self, rhs: Self) -> BVec3A {
427 BVec3A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
428 }
429
430 #[inline]
436 #[must_use]
437 pub fn cmpge(self, rhs: Self) -> BVec3A {
438 BVec3A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
439 }
440
441 #[inline]
447 #[must_use]
448 pub fn cmpgt(self, rhs: Self) -> BVec3A {
449 BVec3A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
450 }
451
452 #[inline]
458 #[must_use]
459 pub fn cmple(self, rhs: Self) -> BVec3A {
460 BVec3A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
461 }
462
463 #[inline]
469 #[must_use]
470 pub fn cmplt(self, rhs: Self) -> BVec3A {
471 BVec3A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
472 }
473
474 #[inline]
476 #[must_use]
477 pub fn abs(self) -> Self {
478 Self(unsafe { crate::sse2::m128_abs(self.0) })
479 }
480
481 #[inline]
487 #[must_use]
488 pub fn signum(self) -> Self {
489 let result = Self(unsafe { _mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0) });
490 let mask = self.is_nan_mask();
491 Self::select(mask, self, result)
492 }
493
494 #[inline]
496 #[must_use]
497 pub fn copysign(self, rhs: Self) -> Self {
498 let mask = Self::splat(-0.0);
499 Self(unsafe { _mm_or_ps(_mm_and_ps(rhs.0, mask.0), _mm_andnot_ps(mask.0, self.0)) })
500 }
501
502 #[inline]
510 #[must_use]
511 pub fn is_negative_bitmask(self) -> u32 {
512 unsafe { (_mm_movemask_ps(self.0) as u32) & 0x7 }
513 }
514
515 #[inline]
518 #[must_use]
519 pub fn is_finite(self) -> bool {
520 self.is_finite_mask().all()
521 }
522
523 #[inline]
527 #[must_use]
528 pub fn is_finite_mask(self) -> BVec3A {
529 BVec3A(unsafe { _mm_cmplt_ps(crate::sse2::m128_abs(self.0), Self::INFINITY.0) })
530 }
531
532 #[inline]
534 #[must_use]
535 pub fn is_nan(self) -> bool {
536 self.is_nan_mask().any()
537 }
538
539 #[inline]
543 #[must_use]
544 pub fn is_nan_mask(self) -> BVec3A {
545 BVec3A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
546 }
547
548 #[doc(alias = "magnitude")]
550 #[inline]
551 #[must_use]
552 pub fn length(self) -> f32 {
553 unsafe {
554 let dot = dot3_in_x(self.0, self.0);
555 _mm_cvtss_f32(_mm_sqrt_ps(dot))
556 }
557 }
558
559 #[doc(alias = "magnitude2")]
563 #[inline]
564 #[must_use]
565 pub fn length_squared(self) -> f32 {
566 self.dot(self)
567 }
568
569 #[inline]
573 #[must_use]
574 pub fn length_recip(self) -> f32 {
575 unsafe {
576 let dot = dot3_in_x(self.0, self.0);
577 _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
578 }
579 }
580
581 #[inline]
583 #[must_use]
584 pub fn distance(self, rhs: Self) -> f32 {
585 (self - rhs).length()
586 }
587
588 #[inline]
590 #[must_use]
591 pub fn distance_squared(self, rhs: Self) -> f32 {
592 (self - rhs).length_squared()
593 }
594
595 #[inline]
597 #[must_use]
598 pub fn div_euclid(self, rhs: Self) -> Self {
599 Self::new(
600 math::div_euclid(self.x, rhs.x),
601 math::div_euclid(self.y, rhs.y),
602 math::div_euclid(self.z, rhs.z),
603 )
604 }
605
606 #[inline]
610 #[must_use]
611 pub fn rem_euclid(self, rhs: Self) -> Self {
612 Self::new(
613 math::rem_euclid(self.x, rhs.x),
614 math::rem_euclid(self.y, rhs.y),
615 math::rem_euclid(self.z, rhs.z),
616 )
617 }
618
619 #[inline]
629 #[must_use]
630 pub fn normalize(self) -> Self {
631 unsafe {
632 let length = _mm_sqrt_ps(dot3_into_m128(self.0, self.0));
633 #[allow(clippy::let_and_return)]
634 let normalized = Self(_mm_div_ps(self.0, length));
635 glam_assert!(normalized.is_finite());
636 normalized
637 }
638 }
639
640 #[inline]
647 #[must_use]
648 pub fn try_normalize(self) -> Option<Self> {
649 let rcp = self.length_recip();
650 if rcp.is_finite() && rcp > 0.0 {
651 Some(self * rcp)
652 } else {
653 None
654 }
655 }
656
657 #[inline]
665 #[must_use]
666 pub fn normalize_or(self, fallback: Self) -> Self {
667 let rcp = self.length_recip();
668 if rcp.is_finite() && rcp > 0.0 {
669 self * rcp
670 } else {
671 fallback
672 }
673 }
674
675 #[inline]
682 #[must_use]
683 pub fn normalize_or_zero(self) -> Self {
684 self.normalize_or(Self::ZERO)
685 }
686
687 #[inline]
691 #[must_use]
692 pub fn normalize_and_length(self) -> (Self, f32) {
693 let length = self.length();
694 let rcp = 1.0 / length;
695 if rcp.is_finite() && rcp > 0.0 {
696 (self * rcp, length)
697 } else {
698 (Self::X, 0.0)
699 }
700 }
701
702 #[inline]
706 #[must_use]
707 pub fn is_normalized(self) -> bool {
708 math::abs(self.length_squared() - 1.0) <= 2e-4
709 }
710
711 #[inline]
719 #[must_use]
720 pub fn project_onto(self, rhs: Self) -> Self {
721 let other_len_sq_rcp = rhs.dot(rhs).recip();
722 glam_assert!(other_len_sq_rcp.is_finite());
723 rhs * self.dot(rhs) * other_len_sq_rcp
724 }
725
726 #[doc(alias("plane"))]
737 #[inline]
738 #[must_use]
739 pub fn reject_from(self, rhs: Self) -> Self {
740 self - self.project_onto(rhs)
741 }
742
743 #[inline]
751 #[must_use]
752 pub fn project_onto_normalized(self, rhs: Self) -> Self {
753 glam_assert!(rhs.is_normalized());
754 rhs * self.dot(rhs)
755 }
756
757 #[doc(alias("plane"))]
768 #[inline]
769 #[must_use]
770 pub fn reject_from_normalized(self, rhs: Self) -> Self {
771 self - self.project_onto_normalized(rhs)
772 }
773
774 #[inline]
777 #[must_use]
778 pub fn round(self) -> Self {
779 Self(unsafe { m128_round(self.0) })
780 }
781
782 #[inline]
785 #[must_use]
786 pub fn floor(self) -> Self {
787 Self(unsafe { m128_floor(self.0) })
788 }
789
790 #[inline]
793 #[must_use]
794 pub fn ceil(self) -> Self {
795 Self(unsafe { m128_ceil(self.0) })
796 }
797
798 #[inline]
801 #[must_use]
802 pub fn trunc(self) -> Self {
803 Self(unsafe { m128_trunc(self.0) })
804 }
805
806 #[inline]
813 #[must_use]
814 pub fn fract(self) -> Self {
815 self - self.trunc()
816 }
817
818 #[inline]
825 #[must_use]
826 pub fn fract_gl(self) -> Self {
827 self - self.floor()
828 }
829
830 #[inline]
833 #[must_use]
834 pub fn exp(self) -> Self {
835 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
836 }
837
838 #[inline]
840 #[must_use]
841 pub fn powf(self, n: f32) -> Self {
842 Self::new(
843 math::powf(self.x, n),
844 math::powf(self.y, n),
845 math::powf(self.z, n),
846 )
847 }
848
849 #[inline]
851 #[must_use]
852 pub fn recip(self) -> Self {
853 Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
854 }
855
856 #[doc(alias = "mix")]
862 #[inline]
863 #[must_use]
864 pub fn lerp(self, rhs: Self, s: f32) -> Self {
865 self * (1.0 - s) + rhs * s
866 }
867
868 #[inline]
873 #[must_use]
874 pub fn move_towards(&self, rhs: Self, d: f32) -> Self {
875 let a = rhs - *self;
876 let len = a.length();
877 if len <= d || len <= 1e-4 {
878 return rhs;
879 }
880 *self + a / len * d
881 }
882
883 #[inline]
889 pub fn midpoint(self, rhs: Self) -> Self {
890 (self + rhs) * 0.5
891 }
892
893 #[inline]
903 #[must_use]
904 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
905 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
906 }
907
908 #[inline]
914 #[must_use]
915 pub fn clamp_length(self, min: f32, max: f32) -> Self {
916 glam_assert!(0.0 <= min);
917 glam_assert!(min <= max);
918 let length_sq = self.length_squared();
919 if length_sq < min * min {
920 min * (self / math::sqrt(length_sq))
921 } else if length_sq > max * max {
922 max * (self / math::sqrt(length_sq))
923 } else {
924 self
925 }
926 }
927
928 #[inline]
934 #[must_use]
935 pub fn clamp_length_max(self, max: f32) -> Self {
936 glam_assert!(0.0 <= max);
937 let length_sq = self.length_squared();
938 if length_sq > max * max {
939 max * (self / math::sqrt(length_sq))
940 } else {
941 self
942 }
943 }
944
945 #[inline]
951 #[must_use]
952 pub fn clamp_length_min(self, min: f32) -> Self {
953 glam_assert!(0.0 <= min);
954 let length_sq = self.length_squared();
955 if length_sq < min * min {
956 min * (self / math::sqrt(length_sq))
957 } else {
958 self
959 }
960 }
961
962 #[inline]
970 #[must_use]
971 pub fn mul_add(self, a: Self, b: Self) -> Self {
972 #[cfg(target_feature = "fma")]
973 unsafe {
974 Self(_mm_fmadd_ps(self.0, a.0, b.0))
975 }
976 #[cfg(not(target_feature = "fma"))]
977 Self::new(
978 math::mul_add(self.x, a.x, b.x),
979 math::mul_add(self.y, a.y, b.y),
980 math::mul_add(self.z, a.z, b.z),
981 )
982 }
983
984 #[inline]
993 #[must_use]
994 pub fn reflect(self, normal: Self) -> Self {
995 glam_assert!(normal.is_normalized());
996 self - 2.0 * self.dot(normal) * normal
997 }
998
999 #[inline]
1009 #[must_use]
1010 pub fn refract(self, normal: Self, eta: f32) -> Self {
1011 glam_assert!(self.is_normalized());
1012 glam_assert!(normal.is_normalized());
1013 let n_dot_i = normal.dot(self);
1014 let k = 1.0 - eta * eta * (1.0 - n_dot_i * n_dot_i);
1015 if k >= 0.0 {
1016 eta * self - (eta * n_dot_i + math::sqrt(k)) * normal
1017 } else {
1018 Self::ZERO
1019 }
1020 }
1021
1022 #[inline]
1026 #[must_use]
1027 pub fn angle_between(self, rhs: Self) -> f32 {
1028 math::acos_approx(
1029 self.dot(rhs)
1030 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
1031 )
1032 }
1033
1034 #[inline]
1036 #[must_use]
1037 pub fn rotate_x(self, angle: f32) -> Self {
1038 let (sina, cosa) = math::sin_cos(angle);
1039 Self::new(
1040 self.x,
1041 self.y * cosa - self.z * sina,
1042 self.y * sina + self.z * cosa,
1043 )
1044 }
1045
1046 #[inline]
1048 #[must_use]
1049 pub fn rotate_y(self, angle: f32) -> Self {
1050 let (sina, cosa) = math::sin_cos(angle);
1051 Self::new(
1052 self.x * cosa + self.z * sina,
1053 self.y,
1054 self.x * -sina + self.z * cosa,
1055 )
1056 }
1057
1058 #[inline]
1060 #[must_use]
1061 pub fn rotate_z(self, angle: f32) -> Self {
1062 let (sina, cosa) = math::sin_cos(angle);
1063 Self::new(
1064 self.x * cosa - self.y * sina,
1065 self.x * sina + self.y * cosa,
1066 self.z,
1067 )
1068 }
1069
1070 #[inline]
1078 #[must_use]
1079 pub fn rotate_axis(self, axis: Self, angle: f32) -> Self {
1080 Quat::from_axis_angle(axis.into(), angle) * self
1081 }
1082
1083 #[inline]
1089 #[must_use]
1090 pub fn rotate_towards(self, rhs: Self, max_angle: f32) -> Self {
1091 let angle_between = self.angle_between(rhs);
1092 let angle = max_angle.clamp(angle_between - core::f32::consts::PI, angle_between);
1094 let axis = self
1095 .cross(rhs)
1096 .try_normalize()
1097 .unwrap_or_else(|| self.any_orthogonal_vector().normalize());
1098 Quat::from_axis_angle(axis.into(), angle) * self
1099 }
1100
1101 #[inline]
1108 #[must_use]
1109 pub fn any_orthogonal_vector(&self) -> Self {
1110 if math::abs(self.x) > math::abs(self.y) {
1112 Self::new(-self.z, 0.0, self.x) } else {
1114 Self::new(0.0, self.z, -self.y) }
1116 }
1117
1118 #[inline]
1126 #[must_use]
1127 pub fn any_orthonormal_vector(&self) -> Self {
1128 glam_assert!(self.is_normalized());
1129 let sign = math::signum(self.z);
1131 let a = -1.0 / (sign + self.z);
1132 let b = self.x * self.y * a;
1133 Self::new(b, sign + self.y * self.y * a, -self.y)
1134 }
1135
1136 #[inline]
1143 #[must_use]
1144 pub fn any_orthonormal_pair(&self) -> (Self, Self) {
1145 glam_assert!(self.is_normalized());
1146 let sign = math::signum(self.z);
1148 let a = -1.0 / (sign + self.z);
1149 let b = self.x * self.y * a;
1150 (
1151 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
1152 Self::new(b, sign + self.y * self.y * a, -self.y),
1153 )
1154 }
1155
1156 #[inline]
1162 #[must_use]
1163 pub fn slerp(self, rhs: Self, s: f32) -> Self {
1164 let self_length = self.length();
1165 let rhs_length = rhs.length();
1166 let dot = self.dot(rhs) / (self_length * rhs_length);
1168 if math::abs(dot) < 1.0 - 3e-7 {
1170 let theta = math::acos_approx(dot);
1172 let sin_theta = math::sin(theta);
1174 let t1 = math::sin(theta * (1. - s));
1175 let t2 = math::sin(theta * s);
1176
1177 let result_length = self_length.lerp(rhs_length, s);
1179 return (self * (result_length / self_length) * t1
1181 + rhs * (result_length / rhs_length) * t2)
1182 * sin_theta.recip();
1183 }
1184 if dot < 0.0 {
1185 let axis = self.any_orthogonal_vector().normalize().into();
1189 let rotation = Quat::from_axis_angle(axis, core::f32::consts::PI * s);
1190 let result_length = self_length.lerp(rhs_length, s);
1192 rotation * self * (result_length / self_length)
1193 } else {
1194 self.lerp(rhs, s)
1196 }
1197 }
1198
1199 #[inline]
1201 #[must_use]
1202 pub fn as_dvec3(&self) -> crate::DVec3 {
1203 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
1204 }
1205
1206 #[inline]
1208 #[must_use]
1209 pub fn as_i8vec3(&self) -> crate::I8Vec3 {
1210 crate::I8Vec3::new(self.x as i8, self.y as i8, self.z as i8)
1211 }
1212
1213 #[inline]
1215 #[must_use]
1216 pub fn as_u8vec3(&self) -> crate::U8Vec3 {
1217 crate::U8Vec3::new(self.x as u8, self.y as u8, self.z as u8)
1218 }
1219
1220 #[inline]
1222 #[must_use]
1223 pub fn as_i16vec3(&self) -> crate::I16Vec3 {
1224 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
1225 }
1226
1227 #[inline]
1229 #[must_use]
1230 pub fn as_u16vec3(&self) -> crate::U16Vec3 {
1231 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
1232 }
1233
1234 #[inline]
1236 #[must_use]
1237 pub fn as_ivec3(&self) -> crate::IVec3 {
1238 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
1239 }
1240
1241 #[inline]
1243 #[must_use]
1244 pub fn as_uvec3(&self) -> crate::UVec3 {
1245 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
1246 }
1247
1248 #[inline]
1250 #[must_use]
1251 pub fn as_i64vec3(&self) -> crate::I64Vec3 {
1252 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
1253 }
1254
1255 #[inline]
1257 #[must_use]
1258 pub fn as_u64vec3(&self) -> crate::U64Vec3 {
1259 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
1260 }
1261
1262 #[inline]
1264 #[must_use]
1265 pub fn as_usizevec3(&self) -> crate::USizeVec3 {
1266 crate::USizeVec3::new(self.x as usize, self.y as usize, self.z as usize)
1267 }
1268}
1269
1270impl Default for Vec3A {
1271 #[inline(always)]
1272 fn default() -> Self {
1273 Self::ZERO
1274 }
1275}
1276
1277impl PartialEq for Vec3A {
1278 #[inline]
1279 fn eq(&self, rhs: &Self) -> bool {
1280 self.cmpeq(*rhs).all()
1281 }
1282}
1283
1284impl Div for Vec3A {
1285 type Output = Self;
1286 #[inline]
1287 fn div(self, rhs: Self) -> Self {
1288 Self(unsafe { _mm_div_ps(self.0, rhs.0) })
1289 }
1290}
1291
1292impl Div<&Self> for Vec3A {
1293 type Output = Self;
1294 #[inline]
1295 fn div(self, rhs: &Self) -> Self {
1296 self.div(*rhs)
1297 }
1298}
1299
1300impl Div<&Vec3A> for &Vec3A {
1301 type Output = Vec3A;
1302 #[inline]
1303 fn div(self, rhs: &Vec3A) -> Vec3A {
1304 (*self).div(*rhs)
1305 }
1306}
1307
1308impl Div<Vec3A> for &Vec3A {
1309 type Output = Vec3A;
1310 #[inline]
1311 fn div(self, rhs: Vec3A) -> Vec3A {
1312 (*self).div(rhs)
1313 }
1314}
1315
1316impl DivAssign for Vec3A {
1317 #[inline]
1318 fn div_assign(&mut self, rhs: Self) {
1319 self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
1320 }
1321}
1322
1323impl DivAssign<&Self> for Vec3A {
1324 #[inline]
1325 fn div_assign(&mut self, rhs: &Self) {
1326 self.div_assign(*rhs);
1327 }
1328}
1329
1330impl Div<f32> for Vec3A {
1331 type Output = Self;
1332 #[inline]
1333 fn div(self, rhs: f32) -> Self {
1334 Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
1335 }
1336}
1337
1338impl Div<&f32> for Vec3A {
1339 type Output = Self;
1340 #[inline]
1341 fn div(self, rhs: &f32) -> Self {
1342 self.div(*rhs)
1343 }
1344}
1345
1346impl Div<&f32> for &Vec3A {
1347 type Output = Vec3A;
1348 #[inline]
1349 fn div(self, rhs: &f32) -> Vec3A {
1350 (*self).div(*rhs)
1351 }
1352}
1353
1354impl Div<f32> for &Vec3A {
1355 type Output = Vec3A;
1356 #[inline]
1357 fn div(self, rhs: f32) -> Vec3A {
1358 (*self).div(rhs)
1359 }
1360}
1361
1362impl DivAssign<f32> for Vec3A {
1363 #[inline]
1364 fn div_assign(&mut self, rhs: f32) {
1365 self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
1366 }
1367}
1368
1369impl DivAssign<&f32> for Vec3A {
1370 #[inline]
1371 fn div_assign(&mut self, rhs: &f32) {
1372 self.div_assign(*rhs);
1373 }
1374}
1375
1376impl Div<Vec3A> for f32 {
1377 type Output = Vec3A;
1378 #[inline]
1379 fn div(self, rhs: Vec3A) -> Vec3A {
1380 Vec3A(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
1381 }
1382}
1383
1384impl Div<&Vec3A> for f32 {
1385 type Output = Vec3A;
1386 #[inline]
1387 fn div(self, rhs: &Vec3A) -> Vec3A {
1388 self.div(*rhs)
1389 }
1390}
1391
1392impl Div<&Vec3A> for &f32 {
1393 type Output = Vec3A;
1394 #[inline]
1395 fn div(self, rhs: &Vec3A) -> Vec3A {
1396 (*self).div(*rhs)
1397 }
1398}
1399
1400impl Div<Vec3A> for &f32 {
1401 type Output = Vec3A;
1402 #[inline]
1403 fn div(self, rhs: Vec3A) -> Vec3A {
1404 (*self).div(rhs)
1405 }
1406}
1407
1408impl Mul for Vec3A {
1409 type Output = Self;
1410 #[inline]
1411 fn mul(self, rhs: Self) -> Self {
1412 Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
1413 }
1414}
1415
1416impl Mul<&Self> for Vec3A {
1417 type Output = Self;
1418 #[inline]
1419 fn mul(self, rhs: &Self) -> Self {
1420 self.mul(*rhs)
1421 }
1422}
1423
1424impl Mul<&Vec3A> for &Vec3A {
1425 type Output = Vec3A;
1426 #[inline]
1427 fn mul(self, rhs: &Vec3A) -> Vec3A {
1428 (*self).mul(*rhs)
1429 }
1430}
1431
1432impl Mul<Vec3A> for &Vec3A {
1433 type Output = Vec3A;
1434 #[inline]
1435 fn mul(self, rhs: Vec3A) -> Vec3A {
1436 (*self).mul(rhs)
1437 }
1438}
1439
1440impl MulAssign for Vec3A {
1441 #[inline]
1442 fn mul_assign(&mut self, rhs: Self) {
1443 self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
1444 }
1445}
1446
1447impl MulAssign<&Self> for Vec3A {
1448 #[inline]
1449 fn mul_assign(&mut self, rhs: &Self) {
1450 self.mul_assign(*rhs);
1451 }
1452}
1453
1454impl Mul<f32> for Vec3A {
1455 type Output = Self;
1456 #[inline]
1457 fn mul(self, rhs: f32) -> Self {
1458 Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
1459 }
1460}
1461
1462impl Mul<&f32> for Vec3A {
1463 type Output = Self;
1464 #[inline]
1465 fn mul(self, rhs: &f32) -> Self {
1466 self.mul(*rhs)
1467 }
1468}
1469
1470impl Mul<&f32> for &Vec3A {
1471 type Output = Vec3A;
1472 #[inline]
1473 fn mul(self, rhs: &f32) -> Vec3A {
1474 (*self).mul(*rhs)
1475 }
1476}
1477
1478impl Mul<f32> for &Vec3A {
1479 type Output = Vec3A;
1480 #[inline]
1481 fn mul(self, rhs: f32) -> Vec3A {
1482 (*self).mul(rhs)
1483 }
1484}
1485
1486impl MulAssign<f32> for Vec3A {
1487 #[inline]
1488 fn mul_assign(&mut self, rhs: f32) {
1489 self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
1490 }
1491}
1492
1493impl MulAssign<&f32> for Vec3A {
1494 #[inline]
1495 fn mul_assign(&mut self, rhs: &f32) {
1496 self.mul_assign(*rhs);
1497 }
1498}
1499
1500impl Mul<Vec3A> for f32 {
1501 type Output = Vec3A;
1502 #[inline]
1503 fn mul(self, rhs: Vec3A) -> Vec3A {
1504 Vec3A(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
1505 }
1506}
1507
1508impl Mul<&Vec3A> for f32 {
1509 type Output = Vec3A;
1510 #[inline]
1511 fn mul(self, rhs: &Vec3A) -> Vec3A {
1512 self.mul(*rhs)
1513 }
1514}
1515
1516impl Mul<&Vec3A> for &f32 {
1517 type Output = Vec3A;
1518 #[inline]
1519 fn mul(self, rhs: &Vec3A) -> Vec3A {
1520 (*self).mul(*rhs)
1521 }
1522}
1523
1524impl Mul<Vec3A> for &f32 {
1525 type Output = Vec3A;
1526 #[inline]
1527 fn mul(self, rhs: Vec3A) -> Vec3A {
1528 (*self).mul(rhs)
1529 }
1530}
1531
1532impl Add for Vec3A {
1533 type Output = Self;
1534 #[inline]
1535 fn add(self, rhs: Self) -> Self {
1536 Self(unsafe { _mm_add_ps(self.0, rhs.0) })
1537 }
1538}
1539
1540impl Add<&Self> for Vec3A {
1541 type Output = Self;
1542 #[inline]
1543 fn add(self, rhs: &Self) -> Self {
1544 self.add(*rhs)
1545 }
1546}
1547
1548impl Add<&Vec3A> for &Vec3A {
1549 type Output = Vec3A;
1550 #[inline]
1551 fn add(self, rhs: &Vec3A) -> Vec3A {
1552 (*self).add(*rhs)
1553 }
1554}
1555
1556impl Add<Vec3A> for &Vec3A {
1557 type Output = Vec3A;
1558 #[inline]
1559 fn add(self, rhs: Vec3A) -> Vec3A {
1560 (*self).add(rhs)
1561 }
1562}
1563
1564impl AddAssign for Vec3A {
1565 #[inline]
1566 fn add_assign(&mut self, rhs: Self) {
1567 self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
1568 }
1569}
1570
1571impl AddAssign<&Self> for Vec3A {
1572 #[inline]
1573 fn add_assign(&mut self, rhs: &Self) {
1574 self.add_assign(*rhs);
1575 }
1576}
1577
1578impl Add<f32> for Vec3A {
1579 type Output = Self;
1580 #[inline]
1581 fn add(self, rhs: f32) -> Self {
1582 Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
1583 }
1584}
1585
1586impl Add<&f32> for Vec3A {
1587 type Output = Self;
1588 #[inline]
1589 fn add(self, rhs: &f32) -> Self {
1590 self.add(*rhs)
1591 }
1592}
1593
1594impl Add<&f32> for &Vec3A {
1595 type Output = Vec3A;
1596 #[inline]
1597 fn add(self, rhs: &f32) -> Vec3A {
1598 (*self).add(*rhs)
1599 }
1600}
1601
1602impl Add<f32> for &Vec3A {
1603 type Output = Vec3A;
1604 #[inline]
1605 fn add(self, rhs: f32) -> Vec3A {
1606 (*self).add(rhs)
1607 }
1608}
1609
1610impl AddAssign<f32> for Vec3A {
1611 #[inline]
1612 fn add_assign(&mut self, rhs: f32) {
1613 self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
1614 }
1615}
1616
1617impl AddAssign<&f32> for Vec3A {
1618 #[inline]
1619 fn add_assign(&mut self, rhs: &f32) {
1620 self.add_assign(*rhs);
1621 }
1622}
1623
1624impl Add<Vec3A> for f32 {
1625 type Output = Vec3A;
1626 #[inline]
1627 fn add(self, rhs: Vec3A) -> Vec3A {
1628 Vec3A(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1629 }
1630}
1631
1632impl Add<&Vec3A> for f32 {
1633 type Output = Vec3A;
1634 #[inline]
1635 fn add(self, rhs: &Vec3A) -> Vec3A {
1636 self.add(*rhs)
1637 }
1638}
1639
1640impl Add<&Vec3A> for &f32 {
1641 type Output = Vec3A;
1642 #[inline]
1643 fn add(self, rhs: &Vec3A) -> Vec3A {
1644 (*self).add(*rhs)
1645 }
1646}
1647
1648impl Add<Vec3A> for &f32 {
1649 type Output = Vec3A;
1650 #[inline]
1651 fn add(self, rhs: Vec3A) -> Vec3A {
1652 (*self).add(rhs)
1653 }
1654}
1655
1656impl Sub for Vec3A {
1657 type Output = Self;
1658 #[inline]
1659 fn sub(self, rhs: Self) -> Self {
1660 Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1661 }
1662}
1663
1664impl Sub<&Self> for Vec3A {
1665 type Output = Self;
1666 #[inline]
1667 fn sub(self, rhs: &Self) -> Self {
1668 self.sub(*rhs)
1669 }
1670}
1671
1672impl Sub<&Vec3A> for &Vec3A {
1673 type Output = Vec3A;
1674 #[inline]
1675 fn sub(self, rhs: &Vec3A) -> Vec3A {
1676 (*self).sub(*rhs)
1677 }
1678}
1679
1680impl Sub<Vec3A> for &Vec3A {
1681 type Output = Vec3A;
1682 #[inline]
1683 fn sub(self, rhs: Vec3A) -> Vec3A {
1684 (*self).sub(rhs)
1685 }
1686}
1687
1688impl SubAssign for Vec3A {
1689 #[inline]
1690 fn sub_assign(&mut self, rhs: Self) {
1691 self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1692 }
1693}
1694
1695impl SubAssign<&Self> for Vec3A {
1696 #[inline]
1697 fn sub_assign(&mut self, rhs: &Self) {
1698 self.sub_assign(*rhs);
1699 }
1700}
1701
1702impl Sub<f32> for Vec3A {
1703 type Output = Self;
1704 #[inline]
1705 fn sub(self, rhs: f32) -> Self {
1706 Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1707 }
1708}
1709
1710impl Sub<&f32> for Vec3A {
1711 type Output = Self;
1712 #[inline]
1713 fn sub(self, rhs: &f32) -> Self {
1714 self.sub(*rhs)
1715 }
1716}
1717
1718impl Sub<&f32> for &Vec3A {
1719 type Output = Vec3A;
1720 #[inline]
1721 fn sub(self, rhs: &f32) -> Vec3A {
1722 (*self).sub(*rhs)
1723 }
1724}
1725
1726impl Sub<f32> for &Vec3A {
1727 type Output = Vec3A;
1728 #[inline]
1729 fn sub(self, rhs: f32) -> Vec3A {
1730 (*self).sub(rhs)
1731 }
1732}
1733
1734impl SubAssign<f32> for Vec3A {
1735 #[inline]
1736 fn sub_assign(&mut self, rhs: f32) {
1737 self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1738 }
1739}
1740
1741impl SubAssign<&f32> for Vec3A {
1742 #[inline]
1743 fn sub_assign(&mut self, rhs: &f32) {
1744 self.sub_assign(*rhs);
1745 }
1746}
1747
1748impl Sub<Vec3A> for f32 {
1749 type Output = Vec3A;
1750 #[inline]
1751 fn sub(self, rhs: Vec3A) -> Vec3A {
1752 Vec3A(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1753 }
1754}
1755
1756impl Sub<&Vec3A> for f32 {
1757 type Output = Vec3A;
1758 #[inline]
1759 fn sub(self, rhs: &Vec3A) -> Vec3A {
1760 self.sub(*rhs)
1761 }
1762}
1763
1764impl Sub<&Vec3A> for &f32 {
1765 type Output = Vec3A;
1766 #[inline]
1767 fn sub(self, rhs: &Vec3A) -> Vec3A {
1768 (*self).sub(*rhs)
1769 }
1770}
1771
1772impl Sub<Vec3A> for &f32 {
1773 type Output = Vec3A;
1774 #[inline]
1775 fn sub(self, rhs: Vec3A) -> Vec3A {
1776 (*self).sub(rhs)
1777 }
1778}
1779
1780impl Rem for Vec3A {
1781 type Output = Self;
1782 #[inline]
1783 fn rem(self, rhs: Self) -> Self {
1784 unsafe {
1785 let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1786 Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1787 }
1788 }
1789}
1790
1791impl Rem<&Self> for Vec3A {
1792 type Output = Self;
1793 #[inline]
1794 fn rem(self, rhs: &Self) -> Self {
1795 self.rem(*rhs)
1796 }
1797}
1798
1799impl Rem<&Vec3A> for &Vec3A {
1800 type Output = Vec3A;
1801 #[inline]
1802 fn rem(self, rhs: &Vec3A) -> Vec3A {
1803 (*self).rem(*rhs)
1804 }
1805}
1806
1807impl Rem<Vec3A> for &Vec3A {
1808 type Output = Vec3A;
1809 #[inline]
1810 fn rem(self, rhs: Vec3A) -> Vec3A {
1811 (*self).rem(rhs)
1812 }
1813}
1814
1815impl RemAssign for Vec3A {
1816 #[inline]
1817 fn rem_assign(&mut self, rhs: Self) {
1818 *self = self.rem(rhs);
1819 }
1820}
1821
1822impl RemAssign<&Self> for Vec3A {
1823 #[inline]
1824 fn rem_assign(&mut self, rhs: &Self) {
1825 self.rem_assign(*rhs);
1826 }
1827}
1828
1829impl Rem<f32> for Vec3A {
1830 type Output = Self;
1831 #[inline]
1832 fn rem(self, rhs: f32) -> Self {
1833 self.rem(Self::splat(rhs))
1834 }
1835}
1836
1837impl Rem<&f32> for Vec3A {
1838 type Output = Self;
1839 #[inline]
1840 fn rem(self, rhs: &f32) -> Self {
1841 self.rem(*rhs)
1842 }
1843}
1844
1845impl Rem<&f32> for &Vec3A {
1846 type Output = Vec3A;
1847 #[inline]
1848 fn rem(self, rhs: &f32) -> Vec3A {
1849 (*self).rem(*rhs)
1850 }
1851}
1852
1853impl Rem<f32> for &Vec3A {
1854 type Output = Vec3A;
1855 #[inline]
1856 fn rem(self, rhs: f32) -> Vec3A {
1857 (*self).rem(rhs)
1858 }
1859}
1860
1861impl RemAssign<f32> for Vec3A {
1862 #[inline]
1863 fn rem_assign(&mut self, rhs: f32) {
1864 *self = self.rem(Self::splat(rhs));
1865 }
1866}
1867
1868impl RemAssign<&f32> for Vec3A {
1869 #[inline]
1870 fn rem_assign(&mut self, rhs: &f32) {
1871 self.rem_assign(*rhs);
1872 }
1873}
1874
1875impl Rem<Vec3A> for f32 {
1876 type Output = Vec3A;
1877 #[inline]
1878 fn rem(self, rhs: Vec3A) -> Vec3A {
1879 Vec3A::splat(self).rem(rhs)
1880 }
1881}
1882
1883impl Rem<&Vec3A> for f32 {
1884 type Output = Vec3A;
1885 #[inline]
1886 fn rem(self, rhs: &Vec3A) -> Vec3A {
1887 self.rem(*rhs)
1888 }
1889}
1890
1891impl Rem<&Vec3A> for &f32 {
1892 type Output = Vec3A;
1893 #[inline]
1894 fn rem(self, rhs: &Vec3A) -> Vec3A {
1895 (*self).rem(*rhs)
1896 }
1897}
1898
1899impl Rem<Vec3A> for &f32 {
1900 type Output = Vec3A;
1901 #[inline]
1902 fn rem(self, rhs: Vec3A) -> Vec3A {
1903 (*self).rem(rhs)
1904 }
1905}
1906
1907impl AsRef<[f32; 3]> for Vec3A {
1908 #[inline]
1909 fn as_ref(&self) -> &[f32; 3] {
1910 unsafe { &*(self as *const Self as *const [f32; 3]) }
1911 }
1912}
1913
1914impl AsMut<[f32; 3]> for Vec3A {
1915 #[inline]
1916 fn as_mut(&mut self) -> &mut [f32; 3] {
1917 unsafe { &mut *(self as *mut Self as *mut [f32; 3]) }
1918 }
1919}
1920
1921impl Sum for Vec3A {
1922 #[inline]
1923 fn sum<I>(iter: I) -> Self
1924 where
1925 I: Iterator<Item = Self>,
1926 {
1927 iter.fold(Self::ZERO, Self::add)
1928 }
1929}
1930
1931impl<'a> Sum<&'a Self> for Vec3A {
1932 #[inline]
1933 fn sum<I>(iter: I) -> Self
1934 where
1935 I: Iterator<Item = &'a Self>,
1936 {
1937 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1938 }
1939}
1940
1941impl Product for Vec3A {
1942 #[inline]
1943 fn product<I>(iter: I) -> Self
1944 where
1945 I: Iterator<Item = Self>,
1946 {
1947 iter.fold(Self::ONE, Self::mul)
1948 }
1949}
1950
1951impl<'a> Product<&'a Self> for Vec3A {
1952 #[inline]
1953 fn product<I>(iter: I) -> Self
1954 where
1955 I: Iterator<Item = &'a Self>,
1956 {
1957 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1958 }
1959}
1960
1961impl Neg for Vec3A {
1962 type Output = Self;
1963 #[inline]
1964 fn neg(self) -> Self {
1965 Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1966 }
1967}
1968
1969impl Neg for &Vec3A {
1970 type Output = Vec3A;
1971 #[inline]
1972 fn neg(self) -> Vec3A {
1973 (*self).neg()
1974 }
1975}
1976
1977impl Index<usize> for Vec3A {
1978 type Output = f32;
1979 #[inline]
1980 fn index(&self, index: usize) -> &Self::Output {
1981 match index {
1982 0 => &self.x,
1983 1 => &self.y,
1984 2 => &self.z,
1985 _ => panic!("index out of bounds"),
1986 }
1987 }
1988}
1989
1990impl IndexMut<usize> for Vec3A {
1991 #[inline]
1992 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1993 match index {
1994 0 => &mut self.x,
1995 1 => &mut self.y,
1996 2 => &mut self.z,
1997 _ => panic!("index out of bounds"),
1998 }
1999 }
2000}
2001
2002impl fmt::Display for Vec3A {
2003 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2004 if let Some(p) = f.precision() {
2005 write!(f, "[{:.*}, {:.*}, {:.*}]", p, self.x, p, self.y, p, self.z)
2006 } else {
2007 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
2008 }
2009 }
2010}
2011
2012impl fmt::Debug for Vec3A {
2013 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2014 fmt.debug_tuple(stringify!(Vec3A))
2015 .field(&self.x)
2016 .field(&self.y)
2017 .field(&self.z)
2018 .finish()
2019 }
2020}
2021
2022impl From<Vec3A> for __m128 {
2023 #[inline(always)]
2024 fn from(t: Vec3A) -> Self {
2025 t.0
2026 }
2027}
2028
2029impl From<__m128> for Vec3A {
2030 #[inline(always)]
2031 fn from(t: __m128) -> Self {
2032 Self(t)
2033 }
2034}
2035
2036impl From<[f32; 3]> for Vec3A {
2037 #[inline]
2038 fn from(a: [f32; 3]) -> Self {
2039 Self::new(a[0], a[1], a[2])
2040 }
2041}
2042
2043impl From<Vec3A> for [f32; 3] {
2044 #[inline]
2045 fn from(v: Vec3A) -> Self {
2046 use crate::Align16;
2047 use core::mem::MaybeUninit;
2048 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2049 unsafe {
2050 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
2051 out.assume_init().0
2052 }
2053 }
2054}
2055
2056impl From<(f32, f32, f32)> for Vec3A {
2057 #[inline]
2058 fn from(t: (f32, f32, f32)) -> Self {
2059 Self::new(t.0, t.1, t.2)
2060 }
2061}
2062
2063impl From<Vec3A> for (f32, f32, f32) {
2064 #[inline]
2065 fn from(v: Vec3A) -> Self {
2066 (v.x, v.y, v.z)
2067 }
2068}
2069
2070impl From<Vec3> for Vec3A {
2071 #[inline]
2072 fn from(v: Vec3) -> Self {
2073 Self::new(v.x, v.y, v.z)
2074 }
2075}
2076
2077impl From<Vec3A> for Vec3 {
2078 #[inline]
2079 fn from(v: Vec3A) -> Self {
2080 use crate::Align16;
2081 use core::mem::MaybeUninit;
2082 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
2083 unsafe {
2084 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
2085 out.assume_init().0
2086 }
2087 }
2088}
2089
2090impl From<(Vec2, f32)> for Vec3A {
2091 #[inline]
2092 fn from((v, z): (Vec2, f32)) -> Self {
2093 Self::new(v.x, v.y, z)
2094 }
2095}
2096
2097impl Deref for Vec3A {
2098 type Target = crate::deref::Vec3<f32>;
2099 #[inline]
2100 fn deref(&self) -> &Self::Target {
2101 unsafe { &*(self as *const Self).cast() }
2102 }
2103}
2104
2105impl DerefMut for Vec3A {
2106 #[inline]
2107 fn deref_mut(&mut self) -> &mut Self::Target {
2108 unsafe { &mut *(self as *mut Self).cast() }
2109 }
2110}
2111
2112impl From<BVec3> for Vec3A {
2113 #[inline]
2114 fn from(v: BVec3) -> Self {
2115 Self::new(f32::from(v.x), f32::from(v.y), f32::from(v.z))
2116 }
2117}
2118
2119impl From<BVec3A> for Vec3A {
2120 #[inline]
2121 fn from(v: BVec3A) -> Self {
2122 let bool_array: [bool; 3] = v.into();
2123 Self::new(
2124 f32::from(bool_array[0]),
2125 f32::from(bool_array[1]),
2126 f32::from(bool_array[2]),
2127 )
2128 }
2129}