1use crate::{BVec2, I16Vec2, I64Vec2, I8Vec2, IVec2, U16Vec2, U64Vec3, U8Vec2, USizeVec2, UVec2};
4
5use core::fmt;
6use core::iter::{Product, Sum};
7use core::{f32, ops::*};
8
9#[cfg(feature = "zerocopy")]
10use zerocopy_derive::*;
11
12#[inline(always)]
14#[must_use]
15pub const fn u64vec2(x: u64, y: u64) -> U64Vec2 {
16 U64Vec2::new(x, y)
17}
18
19#[derive(Clone, Copy, PartialEq, Eq, Hash)]
21#[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))]
22#[cfg_attr(
23 feature = "zerocopy",
24 derive(FromBytes, Immutable, IntoBytes, KnownLayout)
25)]
26#[cfg_attr(feature = "cuda", repr(align(16)))]
27#[repr(C)]
28#[cfg_attr(target_arch = "spirv", rust_gpu::vector::v1)]
29pub struct U64Vec2 {
30 pub x: u64,
31 pub y: u64,
32}
33
34impl U64Vec2 {
35 pub const ZERO: Self = Self::splat(0);
37
38 pub const ONE: Self = Self::splat(1);
40
41 pub const MIN: Self = Self::splat(u64::MIN);
43
44 pub const MAX: Self = Self::splat(u64::MAX);
46
47 pub const X: Self = Self::new(1, 0);
49
50 pub const Y: Self = Self::new(0, 1);
52
53 pub const AXES: [Self; 2] = [Self::X, Self::Y];
55
56 #[inline(always)]
58 #[must_use]
59 pub const fn new(x: u64, y: u64) -> Self {
60 Self { x, y }
61 }
62
63 #[inline]
65 #[must_use]
66 pub const fn splat(v: u64) -> Self {
67 Self { x: v, y: v }
68 }
69
70 #[inline]
72 #[must_use]
73 pub fn map<F>(self, f: F) -> Self
74 where
75 F: Fn(u64) -> u64,
76 {
77 Self::new(f(self.x), f(self.y))
78 }
79
80 #[inline]
86 #[must_use]
87 pub fn select(mask: BVec2, if_true: Self, if_false: Self) -> Self {
88 Self {
89 x: if mask.test(0) { if_true.x } else { if_false.x },
90 y: if mask.test(1) { if_true.y } else { if_false.y },
91 }
92 }
93
94 #[inline]
96 #[must_use]
97 pub const fn from_array(a: [u64; 2]) -> Self {
98 Self::new(a[0], a[1])
99 }
100
101 #[inline]
103 #[must_use]
104 pub const fn to_array(&self) -> [u64; 2] {
105 [self.x, self.y]
106 }
107
108 #[inline]
114 #[must_use]
115 pub const fn from_slice(slice: &[u64]) -> Self {
116 assert!(slice.len() >= 2);
117 Self::new(slice[0], slice[1])
118 }
119
120 #[inline]
126 pub fn write_to_slice(self, slice: &mut [u64]) {
127 slice[..2].copy_from_slice(&self.to_array());
128 }
129
130 #[inline]
132 #[must_use]
133 pub const fn extend(self, z: u64) -> U64Vec3 {
134 U64Vec3::new(self.x, self.y, z)
135 }
136
137 #[inline]
139 #[must_use]
140 pub fn with_x(mut self, x: u64) -> Self {
141 self.x = x;
142 self
143 }
144
145 #[inline]
147 #[must_use]
148 pub fn with_y(mut self, y: u64) -> Self {
149 self.y = y;
150 self
151 }
152
153 #[inline]
155 #[must_use]
156 pub fn dot(self, rhs: Self) -> u64 {
157 (self.x * rhs.x) + (self.y * rhs.y)
158 }
159
160 #[inline]
162 #[must_use]
163 pub fn dot_into_vec(self, rhs: Self) -> Self {
164 Self::splat(self.dot(rhs))
165 }
166
167 #[inline]
171 #[must_use]
172 pub fn min(self, rhs: Self) -> Self {
173 Self {
174 x: if self.x < rhs.x { self.x } else { rhs.x },
175 y: if self.y < rhs.y { self.y } else { rhs.y },
176 }
177 }
178
179 #[inline]
183 #[must_use]
184 pub fn max(self, rhs: Self) -> Self {
185 Self {
186 x: if self.x > rhs.x { self.x } else { rhs.x },
187 y: if self.y > rhs.y { self.y } else { rhs.y },
188 }
189 }
190
191 #[inline]
199 #[must_use]
200 pub fn clamp(self, min: Self, max: Self) -> Self {
201 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
202 self.max(min).min(max)
203 }
204
205 #[inline]
209 #[must_use]
210 pub fn min_element(self) -> u64 {
211 let min = |a, b| if a < b { a } else { b };
212 min(self.x, self.y)
213 }
214
215 #[inline]
219 #[must_use]
220 pub fn max_element(self) -> u64 {
221 let max = |a, b| if a > b { a } else { b };
222 max(self.x, self.y)
223 }
224
225 #[doc(alias = "argmin")]
227 #[inline]
228 #[must_use]
229 pub fn min_position(self) -> usize {
230 if self.x <= self.y {
231 0
232 } else {
233 1
234 }
235 }
236
237 #[doc(alias = "argmax")]
239 #[inline]
240 #[must_use]
241 pub fn max_position(self) -> usize {
242 if self.x >= self.y {
243 0
244 } else {
245 1
246 }
247 }
248
249 #[inline]
253 #[must_use]
254 pub fn element_sum(self) -> u64 {
255 self.x + self.y
256 }
257
258 #[inline]
262 #[must_use]
263 pub fn element_product(self) -> u64 {
264 self.x * self.y
265 }
266
267 #[inline]
273 #[must_use]
274 pub fn cmpeq(self, rhs: Self) -> BVec2 {
275 BVec2::new(self.x.eq(&rhs.x), self.y.eq(&rhs.y))
276 }
277
278 #[inline]
284 #[must_use]
285 pub fn cmpne(self, rhs: Self) -> BVec2 {
286 BVec2::new(self.x.ne(&rhs.x), self.y.ne(&rhs.y))
287 }
288
289 #[inline]
295 #[must_use]
296 pub fn cmpge(self, rhs: Self) -> BVec2 {
297 BVec2::new(self.x.ge(&rhs.x), self.y.ge(&rhs.y))
298 }
299
300 #[inline]
306 #[must_use]
307 pub fn cmpgt(self, rhs: Self) -> BVec2 {
308 BVec2::new(self.x.gt(&rhs.x), self.y.gt(&rhs.y))
309 }
310
311 #[inline]
317 #[must_use]
318 pub fn cmple(self, rhs: Self) -> BVec2 {
319 BVec2::new(self.x.le(&rhs.x), self.y.le(&rhs.y))
320 }
321
322 #[inline]
328 #[must_use]
329 pub fn cmplt(self, rhs: Self) -> BVec2 {
330 BVec2::new(self.x.lt(&rhs.x), self.y.lt(&rhs.y))
331 }
332
333 #[doc(alias = "magnitude2")]
335 #[inline]
336 #[must_use]
337 pub fn length_squared(self) -> u64 {
338 self.dot(self)
339 }
340
341 #[inline]
350 #[must_use]
351 pub fn manhattan_distance(self, rhs: Self) -> u64 {
352 self.x.abs_diff(rhs.x) + self.y.abs_diff(rhs.y)
353 }
354
355 #[inline]
361 #[must_use]
362 pub fn checked_manhattan_distance(self, rhs: Self) -> Option<u64> {
363 let d = self.x.abs_diff(rhs.x);
364 d.checked_add(self.y.abs_diff(rhs.y))
365 }
366
367 #[inline]
371 #[must_use]
372 pub fn chebyshev_distance(self, rhs: Self) -> u64 {
373 [self.x.abs_diff(rhs.x), self.y.abs_diff(rhs.y)]
375 .into_iter()
376 .max()
377 .unwrap()
378 }
379
380 #[inline]
382 #[must_use]
383 pub fn as_vec2(&self) -> crate::Vec2 {
384 crate::Vec2::new(self.x as f32, self.y as f32)
385 }
386
387 #[inline]
389 #[must_use]
390 pub fn as_dvec2(&self) -> crate::DVec2 {
391 crate::DVec2::new(self.x as f64, self.y as f64)
392 }
393
394 #[inline]
396 #[must_use]
397 pub fn as_i8vec2(&self) -> crate::I8Vec2 {
398 crate::I8Vec2::new(self.x as i8, self.y as i8)
399 }
400
401 #[inline]
403 #[must_use]
404 pub fn as_u8vec2(&self) -> crate::U8Vec2 {
405 crate::U8Vec2::new(self.x as u8, self.y as u8)
406 }
407
408 #[inline]
410 #[must_use]
411 pub fn as_i16vec2(&self) -> crate::I16Vec2 {
412 crate::I16Vec2::new(self.x as i16, self.y as i16)
413 }
414
415 #[inline]
417 #[must_use]
418 pub fn as_u16vec2(&self) -> crate::U16Vec2 {
419 crate::U16Vec2::new(self.x as u16, self.y as u16)
420 }
421
422 #[inline]
424 #[must_use]
425 pub fn as_ivec2(&self) -> crate::IVec2 {
426 crate::IVec2::new(self.x as i32, self.y as i32)
427 }
428
429 #[inline]
431 #[must_use]
432 pub fn as_uvec2(&self) -> crate::UVec2 {
433 crate::UVec2::new(self.x as u32, self.y as u32)
434 }
435
436 #[inline]
438 #[must_use]
439 pub fn as_i64vec2(&self) -> crate::I64Vec2 {
440 crate::I64Vec2::new(self.x as i64, self.y as i64)
441 }
442
443 #[inline]
445 #[must_use]
446 pub fn as_usizevec2(&self) -> crate::USizeVec2 {
447 crate::USizeVec2::new(self.x as usize, self.y as usize)
448 }
449
450 #[inline]
454 #[must_use]
455 pub const fn checked_add(self, rhs: Self) -> Option<Self> {
456 let x = match self.x.checked_add(rhs.x) {
457 Some(v) => v,
458 None => return None,
459 };
460 let y = match self.y.checked_add(rhs.y) {
461 Some(v) => v,
462 None => return None,
463 };
464
465 Some(Self { x, y })
466 }
467
468 #[inline]
472 #[must_use]
473 pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
474 let x = match self.x.checked_sub(rhs.x) {
475 Some(v) => v,
476 None => return None,
477 };
478 let y = match self.y.checked_sub(rhs.y) {
479 Some(v) => v,
480 None => return None,
481 };
482
483 Some(Self { x, y })
484 }
485
486 #[inline]
490 #[must_use]
491 pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
492 let x = match self.x.checked_mul(rhs.x) {
493 Some(v) => v,
494 None => return None,
495 };
496 let y = match self.y.checked_mul(rhs.y) {
497 Some(v) => v,
498 None => return None,
499 };
500
501 Some(Self { x, y })
502 }
503
504 #[inline]
508 #[must_use]
509 pub const fn checked_div(self, rhs: Self) -> Option<Self> {
510 let x = match self.x.checked_div(rhs.x) {
511 Some(v) => v,
512 None => return None,
513 };
514 let y = match self.y.checked_div(rhs.y) {
515 Some(v) => v,
516 None => return None,
517 };
518
519 Some(Self { x, y })
520 }
521
522 #[inline]
526 #[must_use]
527 pub const fn wrapping_add(self, rhs: Self) -> Self {
528 Self {
529 x: self.x.wrapping_add(rhs.x),
530 y: self.y.wrapping_add(rhs.y),
531 }
532 }
533
534 #[inline]
538 #[must_use]
539 pub const fn wrapping_sub(self, rhs: Self) -> Self {
540 Self {
541 x: self.x.wrapping_sub(rhs.x),
542 y: self.y.wrapping_sub(rhs.y),
543 }
544 }
545
546 #[inline]
550 #[must_use]
551 pub const fn wrapping_mul(self, rhs: Self) -> Self {
552 Self {
553 x: self.x.wrapping_mul(rhs.x),
554 y: self.y.wrapping_mul(rhs.y),
555 }
556 }
557
558 #[inline]
562 #[must_use]
563 pub const fn wrapping_div(self, rhs: Self) -> Self {
564 Self {
565 x: self.x.wrapping_div(rhs.x),
566 y: self.y.wrapping_div(rhs.y),
567 }
568 }
569
570 #[inline]
574 #[must_use]
575 pub const fn saturating_add(self, rhs: Self) -> Self {
576 Self {
577 x: self.x.saturating_add(rhs.x),
578 y: self.y.saturating_add(rhs.y),
579 }
580 }
581
582 #[inline]
586 #[must_use]
587 pub const fn saturating_sub(self, rhs: Self) -> Self {
588 Self {
589 x: self.x.saturating_sub(rhs.x),
590 y: self.y.saturating_sub(rhs.y),
591 }
592 }
593
594 #[inline]
598 #[must_use]
599 pub const fn saturating_mul(self, rhs: Self) -> Self {
600 Self {
601 x: self.x.saturating_mul(rhs.x),
602 y: self.y.saturating_mul(rhs.y),
603 }
604 }
605
606 #[inline]
610 #[must_use]
611 pub const fn saturating_div(self, rhs: Self) -> Self {
612 Self {
613 x: self.x.saturating_div(rhs.x),
614 y: self.y.saturating_div(rhs.y),
615 }
616 }
617
618 #[inline]
622 #[must_use]
623 pub const fn checked_add_signed(self, rhs: I64Vec2) -> Option<Self> {
624 let x = match self.x.checked_add_signed(rhs.x) {
625 Some(v) => v,
626 None => return None,
627 };
628 let y = match self.y.checked_add_signed(rhs.y) {
629 Some(v) => v,
630 None => return None,
631 };
632
633 Some(Self { x, y })
634 }
635
636 #[inline]
640 #[must_use]
641 pub const fn wrapping_add_signed(self, rhs: I64Vec2) -> Self {
642 Self {
643 x: self.x.wrapping_add_signed(rhs.x),
644 y: self.y.wrapping_add_signed(rhs.y),
645 }
646 }
647
648 #[inline]
652 #[must_use]
653 pub const fn saturating_add_signed(self, rhs: I64Vec2) -> Self {
654 Self {
655 x: self.x.saturating_add_signed(rhs.x),
656 y: self.y.saturating_add_signed(rhs.y),
657 }
658 }
659}
660
661impl Default for U64Vec2 {
662 #[inline(always)]
663 fn default() -> Self {
664 Self::ZERO
665 }
666}
667
668impl Div for U64Vec2 {
669 type Output = Self;
670 #[inline]
671 fn div(self, rhs: Self) -> Self {
672 Self {
673 x: self.x.div(rhs.x),
674 y: self.y.div(rhs.y),
675 }
676 }
677}
678
679impl Div<&Self> for U64Vec2 {
680 type Output = Self;
681 #[inline]
682 fn div(self, rhs: &Self) -> Self {
683 self.div(*rhs)
684 }
685}
686
687impl Div<&U64Vec2> for &U64Vec2 {
688 type Output = U64Vec2;
689 #[inline]
690 fn div(self, rhs: &U64Vec2) -> U64Vec2 {
691 (*self).div(*rhs)
692 }
693}
694
695impl Div<U64Vec2> for &U64Vec2 {
696 type Output = U64Vec2;
697 #[inline]
698 fn div(self, rhs: U64Vec2) -> U64Vec2 {
699 (*self).div(rhs)
700 }
701}
702
703impl DivAssign for U64Vec2 {
704 #[inline]
705 fn div_assign(&mut self, rhs: Self) {
706 self.x.div_assign(rhs.x);
707 self.y.div_assign(rhs.y);
708 }
709}
710
711impl DivAssign<&Self> for U64Vec2 {
712 #[inline]
713 fn div_assign(&mut self, rhs: &Self) {
714 self.div_assign(*rhs);
715 }
716}
717
718impl Div<u64> for U64Vec2 {
719 type Output = Self;
720 #[inline]
721 fn div(self, rhs: u64) -> Self {
722 Self {
723 x: self.x.div(rhs),
724 y: self.y.div(rhs),
725 }
726 }
727}
728
729impl Div<&u64> for U64Vec2 {
730 type Output = Self;
731 #[inline]
732 fn div(self, rhs: &u64) -> Self {
733 self.div(*rhs)
734 }
735}
736
737impl Div<&u64> for &U64Vec2 {
738 type Output = U64Vec2;
739 #[inline]
740 fn div(self, rhs: &u64) -> U64Vec2 {
741 (*self).div(*rhs)
742 }
743}
744
745impl Div<u64> for &U64Vec2 {
746 type Output = U64Vec2;
747 #[inline]
748 fn div(self, rhs: u64) -> U64Vec2 {
749 (*self).div(rhs)
750 }
751}
752
753impl DivAssign<u64> for U64Vec2 {
754 #[inline]
755 fn div_assign(&mut self, rhs: u64) {
756 self.x.div_assign(rhs);
757 self.y.div_assign(rhs);
758 }
759}
760
761impl DivAssign<&u64> for U64Vec2 {
762 #[inline]
763 fn div_assign(&mut self, rhs: &u64) {
764 self.div_assign(*rhs);
765 }
766}
767
768impl Div<U64Vec2> for u64 {
769 type Output = U64Vec2;
770 #[inline]
771 fn div(self, rhs: U64Vec2) -> U64Vec2 {
772 U64Vec2 {
773 x: self.div(rhs.x),
774 y: self.div(rhs.y),
775 }
776 }
777}
778
779impl Div<&U64Vec2> for u64 {
780 type Output = U64Vec2;
781 #[inline]
782 fn div(self, rhs: &U64Vec2) -> U64Vec2 {
783 self.div(*rhs)
784 }
785}
786
787impl Div<&U64Vec2> for &u64 {
788 type Output = U64Vec2;
789 #[inline]
790 fn div(self, rhs: &U64Vec2) -> U64Vec2 {
791 (*self).div(*rhs)
792 }
793}
794
795impl Div<U64Vec2> for &u64 {
796 type Output = U64Vec2;
797 #[inline]
798 fn div(self, rhs: U64Vec2) -> U64Vec2 {
799 (*self).div(rhs)
800 }
801}
802
803impl Mul for U64Vec2 {
804 type Output = Self;
805 #[inline]
806 fn mul(self, rhs: Self) -> Self {
807 Self {
808 x: self.x.mul(rhs.x),
809 y: self.y.mul(rhs.y),
810 }
811 }
812}
813
814impl Mul<&Self> for U64Vec2 {
815 type Output = Self;
816 #[inline]
817 fn mul(self, rhs: &Self) -> Self {
818 self.mul(*rhs)
819 }
820}
821
822impl Mul<&U64Vec2> for &U64Vec2 {
823 type Output = U64Vec2;
824 #[inline]
825 fn mul(self, rhs: &U64Vec2) -> U64Vec2 {
826 (*self).mul(*rhs)
827 }
828}
829
830impl Mul<U64Vec2> for &U64Vec2 {
831 type Output = U64Vec2;
832 #[inline]
833 fn mul(self, rhs: U64Vec2) -> U64Vec2 {
834 (*self).mul(rhs)
835 }
836}
837
838impl MulAssign for U64Vec2 {
839 #[inline]
840 fn mul_assign(&mut self, rhs: Self) {
841 self.x.mul_assign(rhs.x);
842 self.y.mul_assign(rhs.y);
843 }
844}
845
846impl MulAssign<&Self> for U64Vec2 {
847 #[inline]
848 fn mul_assign(&mut self, rhs: &Self) {
849 self.mul_assign(*rhs);
850 }
851}
852
853impl Mul<u64> for U64Vec2 {
854 type Output = Self;
855 #[inline]
856 fn mul(self, rhs: u64) -> Self {
857 Self {
858 x: self.x.mul(rhs),
859 y: self.y.mul(rhs),
860 }
861 }
862}
863
864impl Mul<&u64> for U64Vec2 {
865 type Output = Self;
866 #[inline]
867 fn mul(self, rhs: &u64) -> Self {
868 self.mul(*rhs)
869 }
870}
871
872impl Mul<&u64> for &U64Vec2 {
873 type Output = U64Vec2;
874 #[inline]
875 fn mul(self, rhs: &u64) -> U64Vec2 {
876 (*self).mul(*rhs)
877 }
878}
879
880impl Mul<u64> for &U64Vec2 {
881 type Output = U64Vec2;
882 #[inline]
883 fn mul(self, rhs: u64) -> U64Vec2 {
884 (*self).mul(rhs)
885 }
886}
887
888impl MulAssign<u64> for U64Vec2 {
889 #[inline]
890 fn mul_assign(&mut self, rhs: u64) {
891 self.x.mul_assign(rhs);
892 self.y.mul_assign(rhs);
893 }
894}
895
896impl MulAssign<&u64> for U64Vec2 {
897 #[inline]
898 fn mul_assign(&mut self, rhs: &u64) {
899 self.mul_assign(*rhs);
900 }
901}
902
903impl Mul<U64Vec2> for u64 {
904 type Output = U64Vec2;
905 #[inline]
906 fn mul(self, rhs: U64Vec2) -> U64Vec2 {
907 U64Vec2 {
908 x: self.mul(rhs.x),
909 y: self.mul(rhs.y),
910 }
911 }
912}
913
914impl Mul<&U64Vec2> for u64 {
915 type Output = U64Vec2;
916 #[inline]
917 fn mul(self, rhs: &U64Vec2) -> U64Vec2 {
918 self.mul(*rhs)
919 }
920}
921
922impl Mul<&U64Vec2> for &u64 {
923 type Output = U64Vec2;
924 #[inline]
925 fn mul(self, rhs: &U64Vec2) -> U64Vec2 {
926 (*self).mul(*rhs)
927 }
928}
929
930impl Mul<U64Vec2> for &u64 {
931 type Output = U64Vec2;
932 #[inline]
933 fn mul(self, rhs: U64Vec2) -> U64Vec2 {
934 (*self).mul(rhs)
935 }
936}
937
938impl Add for U64Vec2 {
939 type Output = Self;
940 #[inline]
941 fn add(self, rhs: Self) -> Self {
942 Self {
943 x: self.x.add(rhs.x),
944 y: self.y.add(rhs.y),
945 }
946 }
947}
948
949impl Add<&Self> for U64Vec2 {
950 type Output = Self;
951 #[inline]
952 fn add(self, rhs: &Self) -> Self {
953 self.add(*rhs)
954 }
955}
956
957impl Add<&U64Vec2> for &U64Vec2 {
958 type Output = U64Vec2;
959 #[inline]
960 fn add(self, rhs: &U64Vec2) -> U64Vec2 {
961 (*self).add(*rhs)
962 }
963}
964
965impl Add<U64Vec2> for &U64Vec2 {
966 type Output = U64Vec2;
967 #[inline]
968 fn add(self, rhs: U64Vec2) -> U64Vec2 {
969 (*self).add(rhs)
970 }
971}
972
973impl AddAssign for U64Vec2 {
974 #[inline]
975 fn add_assign(&mut self, rhs: Self) {
976 self.x.add_assign(rhs.x);
977 self.y.add_assign(rhs.y);
978 }
979}
980
981impl AddAssign<&Self> for U64Vec2 {
982 #[inline]
983 fn add_assign(&mut self, rhs: &Self) {
984 self.add_assign(*rhs);
985 }
986}
987
988impl Add<u64> for U64Vec2 {
989 type Output = Self;
990 #[inline]
991 fn add(self, rhs: u64) -> Self {
992 Self {
993 x: self.x.add(rhs),
994 y: self.y.add(rhs),
995 }
996 }
997}
998
999impl Add<&u64> for U64Vec2 {
1000 type Output = Self;
1001 #[inline]
1002 fn add(self, rhs: &u64) -> Self {
1003 self.add(*rhs)
1004 }
1005}
1006
1007impl Add<&u64> for &U64Vec2 {
1008 type Output = U64Vec2;
1009 #[inline]
1010 fn add(self, rhs: &u64) -> U64Vec2 {
1011 (*self).add(*rhs)
1012 }
1013}
1014
1015impl Add<u64> for &U64Vec2 {
1016 type Output = U64Vec2;
1017 #[inline]
1018 fn add(self, rhs: u64) -> U64Vec2 {
1019 (*self).add(rhs)
1020 }
1021}
1022
1023impl AddAssign<u64> for U64Vec2 {
1024 #[inline]
1025 fn add_assign(&mut self, rhs: u64) {
1026 self.x.add_assign(rhs);
1027 self.y.add_assign(rhs);
1028 }
1029}
1030
1031impl AddAssign<&u64> for U64Vec2 {
1032 #[inline]
1033 fn add_assign(&mut self, rhs: &u64) {
1034 self.add_assign(*rhs);
1035 }
1036}
1037
1038impl Add<U64Vec2> for u64 {
1039 type Output = U64Vec2;
1040 #[inline]
1041 fn add(self, rhs: U64Vec2) -> U64Vec2 {
1042 U64Vec2 {
1043 x: self.add(rhs.x),
1044 y: self.add(rhs.y),
1045 }
1046 }
1047}
1048
1049impl Add<&U64Vec2> for u64 {
1050 type Output = U64Vec2;
1051 #[inline]
1052 fn add(self, rhs: &U64Vec2) -> U64Vec2 {
1053 self.add(*rhs)
1054 }
1055}
1056
1057impl Add<&U64Vec2> for &u64 {
1058 type Output = U64Vec2;
1059 #[inline]
1060 fn add(self, rhs: &U64Vec2) -> U64Vec2 {
1061 (*self).add(*rhs)
1062 }
1063}
1064
1065impl Add<U64Vec2> for &u64 {
1066 type Output = U64Vec2;
1067 #[inline]
1068 fn add(self, rhs: U64Vec2) -> U64Vec2 {
1069 (*self).add(rhs)
1070 }
1071}
1072
1073impl Sub for U64Vec2 {
1074 type Output = Self;
1075 #[inline]
1076 fn sub(self, rhs: Self) -> Self {
1077 Self {
1078 x: self.x.sub(rhs.x),
1079 y: self.y.sub(rhs.y),
1080 }
1081 }
1082}
1083
1084impl Sub<&Self> for U64Vec2 {
1085 type Output = Self;
1086 #[inline]
1087 fn sub(self, rhs: &Self) -> Self {
1088 self.sub(*rhs)
1089 }
1090}
1091
1092impl Sub<&U64Vec2> for &U64Vec2 {
1093 type Output = U64Vec2;
1094 #[inline]
1095 fn sub(self, rhs: &U64Vec2) -> U64Vec2 {
1096 (*self).sub(*rhs)
1097 }
1098}
1099
1100impl Sub<U64Vec2> for &U64Vec2 {
1101 type Output = U64Vec2;
1102 #[inline]
1103 fn sub(self, rhs: U64Vec2) -> U64Vec2 {
1104 (*self).sub(rhs)
1105 }
1106}
1107
1108impl SubAssign for U64Vec2 {
1109 #[inline]
1110 fn sub_assign(&mut self, rhs: Self) {
1111 self.x.sub_assign(rhs.x);
1112 self.y.sub_assign(rhs.y);
1113 }
1114}
1115
1116impl SubAssign<&Self> for U64Vec2 {
1117 #[inline]
1118 fn sub_assign(&mut self, rhs: &Self) {
1119 self.sub_assign(*rhs);
1120 }
1121}
1122
1123impl Sub<u64> for U64Vec2 {
1124 type Output = Self;
1125 #[inline]
1126 fn sub(self, rhs: u64) -> Self {
1127 Self {
1128 x: self.x.sub(rhs),
1129 y: self.y.sub(rhs),
1130 }
1131 }
1132}
1133
1134impl Sub<&u64> for U64Vec2 {
1135 type Output = Self;
1136 #[inline]
1137 fn sub(self, rhs: &u64) -> Self {
1138 self.sub(*rhs)
1139 }
1140}
1141
1142impl Sub<&u64> for &U64Vec2 {
1143 type Output = U64Vec2;
1144 #[inline]
1145 fn sub(self, rhs: &u64) -> U64Vec2 {
1146 (*self).sub(*rhs)
1147 }
1148}
1149
1150impl Sub<u64> for &U64Vec2 {
1151 type Output = U64Vec2;
1152 #[inline]
1153 fn sub(self, rhs: u64) -> U64Vec2 {
1154 (*self).sub(rhs)
1155 }
1156}
1157
1158impl SubAssign<u64> for U64Vec2 {
1159 #[inline]
1160 fn sub_assign(&mut self, rhs: u64) {
1161 self.x.sub_assign(rhs);
1162 self.y.sub_assign(rhs);
1163 }
1164}
1165
1166impl SubAssign<&u64> for U64Vec2 {
1167 #[inline]
1168 fn sub_assign(&mut self, rhs: &u64) {
1169 self.sub_assign(*rhs);
1170 }
1171}
1172
1173impl Sub<U64Vec2> for u64 {
1174 type Output = U64Vec2;
1175 #[inline]
1176 fn sub(self, rhs: U64Vec2) -> U64Vec2 {
1177 U64Vec2 {
1178 x: self.sub(rhs.x),
1179 y: self.sub(rhs.y),
1180 }
1181 }
1182}
1183
1184impl Sub<&U64Vec2> for u64 {
1185 type Output = U64Vec2;
1186 #[inline]
1187 fn sub(self, rhs: &U64Vec2) -> U64Vec2 {
1188 self.sub(*rhs)
1189 }
1190}
1191
1192impl Sub<&U64Vec2> for &u64 {
1193 type Output = U64Vec2;
1194 #[inline]
1195 fn sub(self, rhs: &U64Vec2) -> U64Vec2 {
1196 (*self).sub(*rhs)
1197 }
1198}
1199
1200impl Sub<U64Vec2> for &u64 {
1201 type Output = U64Vec2;
1202 #[inline]
1203 fn sub(self, rhs: U64Vec2) -> U64Vec2 {
1204 (*self).sub(rhs)
1205 }
1206}
1207
1208impl Rem for U64Vec2 {
1209 type Output = Self;
1210 #[inline]
1211 fn rem(self, rhs: Self) -> Self {
1212 Self {
1213 x: self.x.rem(rhs.x),
1214 y: self.y.rem(rhs.y),
1215 }
1216 }
1217}
1218
1219impl Rem<&Self> for U64Vec2 {
1220 type Output = Self;
1221 #[inline]
1222 fn rem(self, rhs: &Self) -> Self {
1223 self.rem(*rhs)
1224 }
1225}
1226
1227impl Rem<&U64Vec2> for &U64Vec2 {
1228 type Output = U64Vec2;
1229 #[inline]
1230 fn rem(self, rhs: &U64Vec2) -> U64Vec2 {
1231 (*self).rem(*rhs)
1232 }
1233}
1234
1235impl Rem<U64Vec2> for &U64Vec2 {
1236 type Output = U64Vec2;
1237 #[inline]
1238 fn rem(self, rhs: U64Vec2) -> U64Vec2 {
1239 (*self).rem(rhs)
1240 }
1241}
1242
1243impl RemAssign for U64Vec2 {
1244 #[inline]
1245 fn rem_assign(&mut self, rhs: Self) {
1246 self.x.rem_assign(rhs.x);
1247 self.y.rem_assign(rhs.y);
1248 }
1249}
1250
1251impl RemAssign<&Self> for U64Vec2 {
1252 #[inline]
1253 fn rem_assign(&mut self, rhs: &Self) {
1254 self.rem_assign(*rhs);
1255 }
1256}
1257
1258impl Rem<u64> for U64Vec2 {
1259 type Output = Self;
1260 #[inline]
1261 fn rem(self, rhs: u64) -> Self {
1262 Self {
1263 x: self.x.rem(rhs),
1264 y: self.y.rem(rhs),
1265 }
1266 }
1267}
1268
1269impl Rem<&u64> for U64Vec2 {
1270 type Output = Self;
1271 #[inline]
1272 fn rem(self, rhs: &u64) -> Self {
1273 self.rem(*rhs)
1274 }
1275}
1276
1277impl Rem<&u64> for &U64Vec2 {
1278 type Output = U64Vec2;
1279 #[inline]
1280 fn rem(self, rhs: &u64) -> U64Vec2 {
1281 (*self).rem(*rhs)
1282 }
1283}
1284
1285impl Rem<u64> for &U64Vec2 {
1286 type Output = U64Vec2;
1287 #[inline]
1288 fn rem(self, rhs: u64) -> U64Vec2 {
1289 (*self).rem(rhs)
1290 }
1291}
1292
1293impl RemAssign<u64> for U64Vec2 {
1294 #[inline]
1295 fn rem_assign(&mut self, rhs: u64) {
1296 self.x.rem_assign(rhs);
1297 self.y.rem_assign(rhs);
1298 }
1299}
1300
1301impl RemAssign<&u64> for U64Vec2 {
1302 #[inline]
1303 fn rem_assign(&mut self, rhs: &u64) {
1304 self.rem_assign(*rhs);
1305 }
1306}
1307
1308impl Rem<U64Vec2> for u64 {
1309 type Output = U64Vec2;
1310 #[inline]
1311 fn rem(self, rhs: U64Vec2) -> U64Vec2 {
1312 U64Vec2 {
1313 x: self.rem(rhs.x),
1314 y: self.rem(rhs.y),
1315 }
1316 }
1317}
1318
1319impl Rem<&U64Vec2> for u64 {
1320 type Output = U64Vec2;
1321 #[inline]
1322 fn rem(self, rhs: &U64Vec2) -> U64Vec2 {
1323 self.rem(*rhs)
1324 }
1325}
1326
1327impl Rem<&U64Vec2> for &u64 {
1328 type Output = U64Vec2;
1329 #[inline]
1330 fn rem(self, rhs: &U64Vec2) -> U64Vec2 {
1331 (*self).rem(*rhs)
1332 }
1333}
1334
1335impl Rem<U64Vec2> for &u64 {
1336 type Output = U64Vec2;
1337 #[inline]
1338 fn rem(self, rhs: U64Vec2) -> U64Vec2 {
1339 (*self).rem(rhs)
1340 }
1341}
1342
1343impl AsRef<[u64; 2]> for U64Vec2 {
1344 #[inline]
1345 fn as_ref(&self) -> &[u64; 2] {
1346 unsafe { &*(self as *const Self as *const [u64; 2]) }
1347 }
1348}
1349
1350impl AsMut<[u64; 2]> for U64Vec2 {
1351 #[inline]
1352 fn as_mut(&mut self) -> &mut [u64; 2] {
1353 unsafe { &mut *(self as *mut Self as *mut [u64; 2]) }
1354 }
1355}
1356
1357impl Sum for U64Vec2 {
1358 #[inline]
1359 fn sum<I>(iter: I) -> Self
1360 where
1361 I: Iterator<Item = Self>,
1362 {
1363 iter.fold(Self::ZERO, Self::add)
1364 }
1365}
1366
1367impl<'a> Sum<&'a Self> for U64Vec2 {
1368 #[inline]
1369 fn sum<I>(iter: I) -> Self
1370 where
1371 I: Iterator<Item = &'a Self>,
1372 {
1373 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1374 }
1375}
1376
1377impl Product for U64Vec2 {
1378 #[inline]
1379 fn product<I>(iter: I) -> Self
1380 where
1381 I: Iterator<Item = Self>,
1382 {
1383 iter.fold(Self::ONE, Self::mul)
1384 }
1385}
1386
1387impl<'a> Product<&'a Self> for U64Vec2 {
1388 #[inline]
1389 fn product<I>(iter: I) -> Self
1390 where
1391 I: Iterator<Item = &'a Self>,
1392 {
1393 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1394 }
1395}
1396
1397impl Not for U64Vec2 {
1398 type Output = Self;
1399 #[inline]
1400 fn not(self) -> Self {
1401 Self {
1402 x: self.x.not(),
1403 y: self.y.not(),
1404 }
1405 }
1406}
1407
1408impl Not for &U64Vec2 {
1409 type Output = U64Vec2;
1410 #[inline]
1411 fn not(self) -> U64Vec2 {
1412 (*self).not()
1413 }
1414}
1415
1416impl BitAnd for U64Vec2 {
1417 type Output = Self;
1418 #[inline]
1419 fn bitand(self, rhs: Self) -> Self::Output {
1420 Self {
1421 x: self.x.bitand(rhs.x),
1422 y: self.y.bitand(rhs.y),
1423 }
1424 }
1425}
1426
1427impl BitAnd<&Self> for U64Vec2 {
1428 type Output = Self;
1429 #[inline]
1430 fn bitand(self, rhs: &Self) -> Self {
1431 self.bitand(*rhs)
1432 }
1433}
1434
1435impl BitAnd<&U64Vec2> for &U64Vec2 {
1436 type Output = U64Vec2;
1437 #[inline]
1438 fn bitand(self, rhs: &U64Vec2) -> U64Vec2 {
1439 (*self).bitand(*rhs)
1440 }
1441}
1442
1443impl BitAnd<U64Vec2> for &U64Vec2 {
1444 type Output = U64Vec2;
1445 #[inline]
1446 fn bitand(self, rhs: U64Vec2) -> U64Vec2 {
1447 (*self).bitand(rhs)
1448 }
1449}
1450
1451impl BitAndAssign for U64Vec2 {
1452 #[inline]
1453 fn bitand_assign(&mut self, rhs: Self) {
1454 *self = self.bitand(rhs);
1455 }
1456}
1457
1458impl BitAndAssign<&Self> for U64Vec2 {
1459 #[inline]
1460 fn bitand_assign(&mut self, rhs: &Self) {
1461 self.bitand_assign(*rhs);
1462 }
1463}
1464
1465impl BitOr for U64Vec2 {
1466 type Output = Self;
1467 #[inline]
1468 fn bitor(self, rhs: Self) -> Self::Output {
1469 Self {
1470 x: self.x.bitor(rhs.x),
1471 y: self.y.bitor(rhs.y),
1472 }
1473 }
1474}
1475
1476impl BitOr<&Self> for U64Vec2 {
1477 type Output = Self;
1478 #[inline]
1479 fn bitor(self, rhs: &Self) -> Self {
1480 self.bitor(*rhs)
1481 }
1482}
1483
1484impl BitOr<&U64Vec2> for &U64Vec2 {
1485 type Output = U64Vec2;
1486 #[inline]
1487 fn bitor(self, rhs: &U64Vec2) -> U64Vec2 {
1488 (*self).bitor(*rhs)
1489 }
1490}
1491
1492impl BitOr<U64Vec2> for &U64Vec2 {
1493 type Output = U64Vec2;
1494 #[inline]
1495 fn bitor(self, rhs: U64Vec2) -> U64Vec2 {
1496 (*self).bitor(rhs)
1497 }
1498}
1499
1500impl BitOrAssign for U64Vec2 {
1501 #[inline]
1502 fn bitor_assign(&mut self, rhs: Self) {
1503 *self = self.bitor(rhs);
1504 }
1505}
1506
1507impl BitOrAssign<&Self> for U64Vec2 {
1508 #[inline]
1509 fn bitor_assign(&mut self, rhs: &Self) {
1510 self.bitor_assign(*rhs);
1511 }
1512}
1513
1514impl BitXor for U64Vec2 {
1515 type Output = Self;
1516 #[inline]
1517 fn bitxor(self, rhs: Self) -> Self::Output {
1518 Self {
1519 x: self.x.bitxor(rhs.x),
1520 y: self.y.bitxor(rhs.y),
1521 }
1522 }
1523}
1524
1525impl BitXor<&Self> for U64Vec2 {
1526 type Output = Self;
1527 #[inline]
1528 fn bitxor(self, rhs: &Self) -> Self {
1529 self.bitxor(*rhs)
1530 }
1531}
1532
1533impl BitXor<&U64Vec2> for &U64Vec2 {
1534 type Output = U64Vec2;
1535 #[inline]
1536 fn bitxor(self, rhs: &U64Vec2) -> U64Vec2 {
1537 (*self).bitxor(*rhs)
1538 }
1539}
1540
1541impl BitXor<U64Vec2> for &U64Vec2 {
1542 type Output = U64Vec2;
1543 #[inline]
1544 fn bitxor(self, rhs: U64Vec2) -> U64Vec2 {
1545 (*self).bitxor(rhs)
1546 }
1547}
1548
1549impl BitXorAssign for U64Vec2 {
1550 #[inline]
1551 fn bitxor_assign(&mut self, rhs: Self) {
1552 *self = self.bitxor(rhs);
1553 }
1554}
1555
1556impl BitXorAssign<&Self> for U64Vec2 {
1557 #[inline]
1558 fn bitxor_assign(&mut self, rhs: &Self) {
1559 self.bitxor_assign(*rhs);
1560 }
1561}
1562
1563impl BitAnd<u64> for U64Vec2 {
1564 type Output = Self;
1565 #[inline]
1566 fn bitand(self, rhs: u64) -> Self::Output {
1567 Self {
1568 x: self.x.bitand(rhs),
1569 y: self.y.bitand(rhs),
1570 }
1571 }
1572}
1573
1574impl BitAnd<&u64> for U64Vec2 {
1575 type Output = Self;
1576 #[inline]
1577 fn bitand(self, rhs: &u64) -> Self {
1578 self.bitand(*rhs)
1579 }
1580}
1581
1582impl BitAnd<&u64> for &U64Vec2 {
1583 type Output = U64Vec2;
1584 #[inline]
1585 fn bitand(self, rhs: &u64) -> U64Vec2 {
1586 (*self).bitand(*rhs)
1587 }
1588}
1589
1590impl BitAnd<u64> for &U64Vec2 {
1591 type Output = U64Vec2;
1592 #[inline]
1593 fn bitand(self, rhs: u64) -> U64Vec2 {
1594 (*self).bitand(rhs)
1595 }
1596}
1597
1598impl BitAndAssign<u64> for U64Vec2 {
1599 #[inline]
1600 fn bitand_assign(&mut self, rhs: u64) {
1601 *self = self.bitand(rhs);
1602 }
1603}
1604
1605impl BitAndAssign<&u64> for U64Vec2 {
1606 #[inline]
1607 fn bitand_assign(&mut self, rhs: &u64) {
1608 self.bitand_assign(*rhs);
1609 }
1610}
1611
1612impl BitOr<u64> for U64Vec2 {
1613 type Output = Self;
1614 #[inline]
1615 fn bitor(self, rhs: u64) -> Self::Output {
1616 Self {
1617 x: self.x.bitor(rhs),
1618 y: self.y.bitor(rhs),
1619 }
1620 }
1621}
1622
1623impl BitOr<&u64> for U64Vec2 {
1624 type Output = Self;
1625 #[inline]
1626 fn bitor(self, rhs: &u64) -> Self {
1627 self.bitor(*rhs)
1628 }
1629}
1630
1631impl BitOr<&u64> for &U64Vec2 {
1632 type Output = U64Vec2;
1633 #[inline]
1634 fn bitor(self, rhs: &u64) -> U64Vec2 {
1635 (*self).bitor(*rhs)
1636 }
1637}
1638
1639impl BitOr<u64> for &U64Vec2 {
1640 type Output = U64Vec2;
1641 #[inline]
1642 fn bitor(self, rhs: u64) -> U64Vec2 {
1643 (*self).bitor(rhs)
1644 }
1645}
1646
1647impl BitOrAssign<u64> for U64Vec2 {
1648 #[inline]
1649 fn bitor_assign(&mut self, rhs: u64) {
1650 *self = self.bitor(rhs);
1651 }
1652}
1653
1654impl BitOrAssign<&u64> for U64Vec2 {
1655 #[inline]
1656 fn bitor_assign(&mut self, rhs: &u64) {
1657 self.bitor_assign(*rhs);
1658 }
1659}
1660
1661impl BitXor<u64> for U64Vec2 {
1662 type Output = Self;
1663 #[inline]
1664 fn bitxor(self, rhs: u64) -> Self::Output {
1665 Self {
1666 x: self.x.bitxor(rhs),
1667 y: self.y.bitxor(rhs),
1668 }
1669 }
1670}
1671
1672impl BitXor<&u64> for U64Vec2 {
1673 type Output = Self;
1674 #[inline]
1675 fn bitxor(self, rhs: &u64) -> Self {
1676 self.bitxor(*rhs)
1677 }
1678}
1679
1680impl BitXor<&u64> for &U64Vec2 {
1681 type Output = U64Vec2;
1682 #[inline]
1683 fn bitxor(self, rhs: &u64) -> U64Vec2 {
1684 (*self).bitxor(*rhs)
1685 }
1686}
1687
1688impl BitXor<u64> for &U64Vec2 {
1689 type Output = U64Vec2;
1690 #[inline]
1691 fn bitxor(self, rhs: u64) -> U64Vec2 {
1692 (*self).bitxor(rhs)
1693 }
1694}
1695
1696impl BitXorAssign<u64> for U64Vec2 {
1697 #[inline]
1698 fn bitxor_assign(&mut self, rhs: u64) {
1699 *self = self.bitxor(rhs);
1700 }
1701}
1702
1703impl BitXorAssign<&u64> for U64Vec2 {
1704 #[inline]
1705 fn bitxor_assign(&mut self, rhs: &u64) {
1706 self.bitxor_assign(*rhs);
1707 }
1708}
1709
1710impl Shl<i8> for U64Vec2 {
1711 type Output = Self;
1712 #[inline]
1713 fn shl(self, rhs: i8) -> Self::Output {
1714 Self {
1715 x: self.x.shl(rhs),
1716 y: self.y.shl(rhs),
1717 }
1718 }
1719}
1720
1721impl Shl<&i8> for U64Vec2 {
1722 type Output = Self;
1723 #[inline]
1724 fn shl(self, rhs: &i8) -> Self {
1725 self.shl(*rhs)
1726 }
1727}
1728
1729impl Shl<&i8> for &U64Vec2 {
1730 type Output = U64Vec2;
1731 #[inline]
1732 fn shl(self, rhs: &i8) -> U64Vec2 {
1733 (*self).shl(*rhs)
1734 }
1735}
1736
1737impl Shl<i8> for &U64Vec2 {
1738 type Output = U64Vec2;
1739 #[inline]
1740 fn shl(self, rhs: i8) -> U64Vec2 {
1741 (*self).shl(rhs)
1742 }
1743}
1744
1745impl ShlAssign<i8> for U64Vec2 {
1746 #[inline]
1747 fn shl_assign(&mut self, rhs: i8) {
1748 *self = self.shl(rhs);
1749 }
1750}
1751
1752impl ShlAssign<&i8> for U64Vec2 {
1753 #[inline]
1754 fn shl_assign(&mut self, rhs: &i8) {
1755 self.shl_assign(*rhs);
1756 }
1757}
1758
1759impl Shr<i8> for U64Vec2 {
1760 type Output = Self;
1761 #[inline]
1762 fn shr(self, rhs: i8) -> Self::Output {
1763 Self {
1764 x: self.x.shr(rhs),
1765 y: self.y.shr(rhs),
1766 }
1767 }
1768}
1769
1770impl Shr<&i8> for U64Vec2 {
1771 type Output = Self;
1772 #[inline]
1773 fn shr(self, rhs: &i8) -> Self {
1774 self.shr(*rhs)
1775 }
1776}
1777
1778impl Shr<&i8> for &U64Vec2 {
1779 type Output = U64Vec2;
1780 #[inline]
1781 fn shr(self, rhs: &i8) -> U64Vec2 {
1782 (*self).shr(*rhs)
1783 }
1784}
1785
1786impl Shr<i8> for &U64Vec2 {
1787 type Output = U64Vec2;
1788 #[inline]
1789 fn shr(self, rhs: i8) -> U64Vec2 {
1790 (*self).shr(rhs)
1791 }
1792}
1793
1794impl ShrAssign<i8> for U64Vec2 {
1795 #[inline]
1796 fn shr_assign(&mut self, rhs: i8) {
1797 *self = self.shr(rhs);
1798 }
1799}
1800
1801impl ShrAssign<&i8> for U64Vec2 {
1802 #[inline]
1803 fn shr_assign(&mut self, rhs: &i8) {
1804 self.shr_assign(*rhs);
1805 }
1806}
1807
1808impl Shl<i16> for U64Vec2 {
1809 type Output = Self;
1810 #[inline]
1811 fn shl(self, rhs: i16) -> Self::Output {
1812 Self {
1813 x: self.x.shl(rhs),
1814 y: self.y.shl(rhs),
1815 }
1816 }
1817}
1818
1819impl Shl<&i16> for U64Vec2 {
1820 type Output = Self;
1821 #[inline]
1822 fn shl(self, rhs: &i16) -> Self {
1823 self.shl(*rhs)
1824 }
1825}
1826
1827impl Shl<&i16> for &U64Vec2 {
1828 type Output = U64Vec2;
1829 #[inline]
1830 fn shl(self, rhs: &i16) -> U64Vec2 {
1831 (*self).shl(*rhs)
1832 }
1833}
1834
1835impl Shl<i16> for &U64Vec2 {
1836 type Output = U64Vec2;
1837 #[inline]
1838 fn shl(self, rhs: i16) -> U64Vec2 {
1839 (*self).shl(rhs)
1840 }
1841}
1842
1843impl ShlAssign<i16> for U64Vec2 {
1844 #[inline]
1845 fn shl_assign(&mut self, rhs: i16) {
1846 *self = self.shl(rhs);
1847 }
1848}
1849
1850impl ShlAssign<&i16> for U64Vec2 {
1851 #[inline]
1852 fn shl_assign(&mut self, rhs: &i16) {
1853 self.shl_assign(*rhs);
1854 }
1855}
1856
1857impl Shr<i16> for U64Vec2 {
1858 type Output = Self;
1859 #[inline]
1860 fn shr(self, rhs: i16) -> Self::Output {
1861 Self {
1862 x: self.x.shr(rhs),
1863 y: self.y.shr(rhs),
1864 }
1865 }
1866}
1867
1868impl Shr<&i16> for U64Vec2 {
1869 type Output = Self;
1870 #[inline]
1871 fn shr(self, rhs: &i16) -> Self {
1872 self.shr(*rhs)
1873 }
1874}
1875
1876impl Shr<&i16> for &U64Vec2 {
1877 type Output = U64Vec2;
1878 #[inline]
1879 fn shr(self, rhs: &i16) -> U64Vec2 {
1880 (*self).shr(*rhs)
1881 }
1882}
1883
1884impl Shr<i16> for &U64Vec2 {
1885 type Output = U64Vec2;
1886 #[inline]
1887 fn shr(self, rhs: i16) -> U64Vec2 {
1888 (*self).shr(rhs)
1889 }
1890}
1891
1892impl ShrAssign<i16> for U64Vec2 {
1893 #[inline]
1894 fn shr_assign(&mut self, rhs: i16) {
1895 *self = self.shr(rhs);
1896 }
1897}
1898
1899impl ShrAssign<&i16> for U64Vec2 {
1900 #[inline]
1901 fn shr_assign(&mut self, rhs: &i16) {
1902 self.shr_assign(*rhs);
1903 }
1904}
1905
1906impl Shl<i32> for U64Vec2 {
1907 type Output = Self;
1908 #[inline]
1909 fn shl(self, rhs: i32) -> Self::Output {
1910 Self {
1911 x: self.x.shl(rhs),
1912 y: self.y.shl(rhs),
1913 }
1914 }
1915}
1916
1917impl Shl<&i32> for U64Vec2 {
1918 type Output = Self;
1919 #[inline]
1920 fn shl(self, rhs: &i32) -> Self {
1921 self.shl(*rhs)
1922 }
1923}
1924
1925impl Shl<&i32> for &U64Vec2 {
1926 type Output = U64Vec2;
1927 #[inline]
1928 fn shl(self, rhs: &i32) -> U64Vec2 {
1929 (*self).shl(*rhs)
1930 }
1931}
1932
1933impl Shl<i32> for &U64Vec2 {
1934 type Output = U64Vec2;
1935 #[inline]
1936 fn shl(self, rhs: i32) -> U64Vec2 {
1937 (*self).shl(rhs)
1938 }
1939}
1940
1941impl ShlAssign<i32> for U64Vec2 {
1942 #[inline]
1943 fn shl_assign(&mut self, rhs: i32) {
1944 *self = self.shl(rhs);
1945 }
1946}
1947
1948impl ShlAssign<&i32> for U64Vec2 {
1949 #[inline]
1950 fn shl_assign(&mut self, rhs: &i32) {
1951 self.shl_assign(*rhs);
1952 }
1953}
1954
1955impl Shr<i32> for U64Vec2 {
1956 type Output = Self;
1957 #[inline]
1958 fn shr(self, rhs: i32) -> Self::Output {
1959 Self {
1960 x: self.x.shr(rhs),
1961 y: self.y.shr(rhs),
1962 }
1963 }
1964}
1965
1966impl Shr<&i32> for U64Vec2 {
1967 type Output = Self;
1968 #[inline]
1969 fn shr(self, rhs: &i32) -> Self {
1970 self.shr(*rhs)
1971 }
1972}
1973
1974impl Shr<&i32> for &U64Vec2 {
1975 type Output = U64Vec2;
1976 #[inline]
1977 fn shr(self, rhs: &i32) -> U64Vec2 {
1978 (*self).shr(*rhs)
1979 }
1980}
1981
1982impl Shr<i32> for &U64Vec2 {
1983 type Output = U64Vec2;
1984 #[inline]
1985 fn shr(self, rhs: i32) -> U64Vec2 {
1986 (*self).shr(rhs)
1987 }
1988}
1989
1990impl ShrAssign<i32> for U64Vec2 {
1991 #[inline]
1992 fn shr_assign(&mut self, rhs: i32) {
1993 *self = self.shr(rhs);
1994 }
1995}
1996
1997impl ShrAssign<&i32> for U64Vec2 {
1998 #[inline]
1999 fn shr_assign(&mut self, rhs: &i32) {
2000 self.shr_assign(*rhs);
2001 }
2002}
2003
2004impl Shl<i64> for U64Vec2 {
2005 type Output = Self;
2006 #[inline]
2007 fn shl(self, rhs: i64) -> Self::Output {
2008 Self {
2009 x: self.x.shl(rhs),
2010 y: self.y.shl(rhs),
2011 }
2012 }
2013}
2014
2015impl Shl<&i64> for U64Vec2 {
2016 type Output = Self;
2017 #[inline]
2018 fn shl(self, rhs: &i64) -> Self {
2019 self.shl(*rhs)
2020 }
2021}
2022
2023impl Shl<&i64> for &U64Vec2 {
2024 type Output = U64Vec2;
2025 #[inline]
2026 fn shl(self, rhs: &i64) -> U64Vec2 {
2027 (*self).shl(*rhs)
2028 }
2029}
2030
2031impl Shl<i64> for &U64Vec2 {
2032 type Output = U64Vec2;
2033 #[inline]
2034 fn shl(self, rhs: i64) -> U64Vec2 {
2035 (*self).shl(rhs)
2036 }
2037}
2038
2039impl ShlAssign<i64> for U64Vec2 {
2040 #[inline]
2041 fn shl_assign(&mut self, rhs: i64) {
2042 *self = self.shl(rhs);
2043 }
2044}
2045
2046impl ShlAssign<&i64> for U64Vec2 {
2047 #[inline]
2048 fn shl_assign(&mut self, rhs: &i64) {
2049 self.shl_assign(*rhs);
2050 }
2051}
2052
2053impl Shr<i64> for U64Vec2 {
2054 type Output = Self;
2055 #[inline]
2056 fn shr(self, rhs: i64) -> Self::Output {
2057 Self {
2058 x: self.x.shr(rhs),
2059 y: self.y.shr(rhs),
2060 }
2061 }
2062}
2063
2064impl Shr<&i64> for U64Vec2 {
2065 type Output = Self;
2066 #[inline]
2067 fn shr(self, rhs: &i64) -> Self {
2068 self.shr(*rhs)
2069 }
2070}
2071
2072impl Shr<&i64> for &U64Vec2 {
2073 type Output = U64Vec2;
2074 #[inline]
2075 fn shr(self, rhs: &i64) -> U64Vec2 {
2076 (*self).shr(*rhs)
2077 }
2078}
2079
2080impl Shr<i64> for &U64Vec2 {
2081 type Output = U64Vec2;
2082 #[inline]
2083 fn shr(self, rhs: i64) -> U64Vec2 {
2084 (*self).shr(rhs)
2085 }
2086}
2087
2088impl ShrAssign<i64> for U64Vec2 {
2089 #[inline]
2090 fn shr_assign(&mut self, rhs: i64) {
2091 *self = self.shr(rhs);
2092 }
2093}
2094
2095impl ShrAssign<&i64> for U64Vec2 {
2096 #[inline]
2097 fn shr_assign(&mut self, rhs: &i64) {
2098 self.shr_assign(*rhs);
2099 }
2100}
2101
2102impl Shl<u8> for U64Vec2 {
2103 type Output = Self;
2104 #[inline]
2105 fn shl(self, rhs: u8) -> Self::Output {
2106 Self {
2107 x: self.x.shl(rhs),
2108 y: self.y.shl(rhs),
2109 }
2110 }
2111}
2112
2113impl Shl<&u8> for U64Vec2 {
2114 type Output = Self;
2115 #[inline]
2116 fn shl(self, rhs: &u8) -> Self {
2117 self.shl(*rhs)
2118 }
2119}
2120
2121impl Shl<&u8> for &U64Vec2 {
2122 type Output = U64Vec2;
2123 #[inline]
2124 fn shl(self, rhs: &u8) -> U64Vec2 {
2125 (*self).shl(*rhs)
2126 }
2127}
2128
2129impl Shl<u8> for &U64Vec2 {
2130 type Output = U64Vec2;
2131 #[inline]
2132 fn shl(self, rhs: u8) -> U64Vec2 {
2133 (*self).shl(rhs)
2134 }
2135}
2136
2137impl ShlAssign<u8> for U64Vec2 {
2138 #[inline]
2139 fn shl_assign(&mut self, rhs: u8) {
2140 *self = self.shl(rhs);
2141 }
2142}
2143
2144impl ShlAssign<&u8> for U64Vec2 {
2145 #[inline]
2146 fn shl_assign(&mut self, rhs: &u8) {
2147 self.shl_assign(*rhs);
2148 }
2149}
2150
2151impl Shr<u8> for U64Vec2 {
2152 type Output = Self;
2153 #[inline]
2154 fn shr(self, rhs: u8) -> Self::Output {
2155 Self {
2156 x: self.x.shr(rhs),
2157 y: self.y.shr(rhs),
2158 }
2159 }
2160}
2161
2162impl Shr<&u8> for U64Vec2 {
2163 type Output = Self;
2164 #[inline]
2165 fn shr(self, rhs: &u8) -> Self {
2166 self.shr(*rhs)
2167 }
2168}
2169
2170impl Shr<&u8> for &U64Vec2 {
2171 type Output = U64Vec2;
2172 #[inline]
2173 fn shr(self, rhs: &u8) -> U64Vec2 {
2174 (*self).shr(*rhs)
2175 }
2176}
2177
2178impl Shr<u8> for &U64Vec2 {
2179 type Output = U64Vec2;
2180 #[inline]
2181 fn shr(self, rhs: u8) -> U64Vec2 {
2182 (*self).shr(rhs)
2183 }
2184}
2185
2186impl ShrAssign<u8> for U64Vec2 {
2187 #[inline]
2188 fn shr_assign(&mut self, rhs: u8) {
2189 *self = self.shr(rhs);
2190 }
2191}
2192
2193impl ShrAssign<&u8> for U64Vec2 {
2194 #[inline]
2195 fn shr_assign(&mut self, rhs: &u8) {
2196 self.shr_assign(*rhs);
2197 }
2198}
2199
2200impl Shl<u16> for U64Vec2 {
2201 type Output = Self;
2202 #[inline]
2203 fn shl(self, rhs: u16) -> Self::Output {
2204 Self {
2205 x: self.x.shl(rhs),
2206 y: self.y.shl(rhs),
2207 }
2208 }
2209}
2210
2211impl Shl<&u16> for U64Vec2 {
2212 type Output = Self;
2213 #[inline]
2214 fn shl(self, rhs: &u16) -> Self {
2215 self.shl(*rhs)
2216 }
2217}
2218
2219impl Shl<&u16> for &U64Vec2 {
2220 type Output = U64Vec2;
2221 #[inline]
2222 fn shl(self, rhs: &u16) -> U64Vec2 {
2223 (*self).shl(*rhs)
2224 }
2225}
2226
2227impl Shl<u16> for &U64Vec2 {
2228 type Output = U64Vec2;
2229 #[inline]
2230 fn shl(self, rhs: u16) -> U64Vec2 {
2231 (*self).shl(rhs)
2232 }
2233}
2234
2235impl ShlAssign<u16> for U64Vec2 {
2236 #[inline]
2237 fn shl_assign(&mut self, rhs: u16) {
2238 *self = self.shl(rhs);
2239 }
2240}
2241
2242impl ShlAssign<&u16> for U64Vec2 {
2243 #[inline]
2244 fn shl_assign(&mut self, rhs: &u16) {
2245 self.shl_assign(*rhs);
2246 }
2247}
2248
2249impl Shr<u16> for U64Vec2 {
2250 type Output = Self;
2251 #[inline]
2252 fn shr(self, rhs: u16) -> Self::Output {
2253 Self {
2254 x: self.x.shr(rhs),
2255 y: self.y.shr(rhs),
2256 }
2257 }
2258}
2259
2260impl Shr<&u16> for U64Vec2 {
2261 type Output = Self;
2262 #[inline]
2263 fn shr(self, rhs: &u16) -> Self {
2264 self.shr(*rhs)
2265 }
2266}
2267
2268impl Shr<&u16> for &U64Vec2 {
2269 type Output = U64Vec2;
2270 #[inline]
2271 fn shr(self, rhs: &u16) -> U64Vec2 {
2272 (*self).shr(*rhs)
2273 }
2274}
2275
2276impl Shr<u16> for &U64Vec2 {
2277 type Output = U64Vec2;
2278 #[inline]
2279 fn shr(self, rhs: u16) -> U64Vec2 {
2280 (*self).shr(rhs)
2281 }
2282}
2283
2284impl ShrAssign<u16> for U64Vec2 {
2285 #[inline]
2286 fn shr_assign(&mut self, rhs: u16) {
2287 *self = self.shr(rhs);
2288 }
2289}
2290
2291impl ShrAssign<&u16> for U64Vec2 {
2292 #[inline]
2293 fn shr_assign(&mut self, rhs: &u16) {
2294 self.shr_assign(*rhs);
2295 }
2296}
2297
2298impl Shl<u32> for U64Vec2 {
2299 type Output = Self;
2300 #[inline]
2301 fn shl(self, rhs: u32) -> Self::Output {
2302 Self {
2303 x: self.x.shl(rhs),
2304 y: self.y.shl(rhs),
2305 }
2306 }
2307}
2308
2309impl Shl<&u32> for U64Vec2 {
2310 type Output = Self;
2311 #[inline]
2312 fn shl(self, rhs: &u32) -> Self {
2313 self.shl(*rhs)
2314 }
2315}
2316
2317impl Shl<&u32> for &U64Vec2 {
2318 type Output = U64Vec2;
2319 #[inline]
2320 fn shl(self, rhs: &u32) -> U64Vec2 {
2321 (*self).shl(*rhs)
2322 }
2323}
2324
2325impl Shl<u32> for &U64Vec2 {
2326 type Output = U64Vec2;
2327 #[inline]
2328 fn shl(self, rhs: u32) -> U64Vec2 {
2329 (*self).shl(rhs)
2330 }
2331}
2332
2333impl ShlAssign<u32> for U64Vec2 {
2334 #[inline]
2335 fn shl_assign(&mut self, rhs: u32) {
2336 *self = self.shl(rhs);
2337 }
2338}
2339
2340impl ShlAssign<&u32> for U64Vec2 {
2341 #[inline]
2342 fn shl_assign(&mut self, rhs: &u32) {
2343 self.shl_assign(*rhs);
2344 }
2345}
2346
2347impl Shr<u32> for U64Vec2 {
2348 type Output = Self;
2349 #[inline]
2350 fn shr(self, rhs: u32) -> Self::Output {
2351 Self {
2352 x: self.x.shr(rhs),
2353 y: self.y.shr(rhs),
2354 }
2355 }
2356}
2357
2358impl Shr<&u32> for U64Vec2 {
2359 type Output = Self;
2360 #[inline]
2361 fn shr(self, rhs: &u32) -> Self {
2362 self.shr(*rhs)
2363 }
2364}
2365
2366impl Shr<&u32> for &U64Vec2 {
2367 type Output = U64Vec2;
2368 #[inline]
2369 fn shr(self, rhs: &u32) -> U64Vec2 {
2370 (*self).shr(*rhs)
2371 }
2372}
2373
2374impl Shr<u32> for &U64Vec2 {
2375 type Output = U64Vec2;
2376 #[inline]
2377 fn shr(self, rhs: u32) -> U64Vec2 {
2378 (*self).shr(rhs)
2379 }
2380}
2381
2382impl ShrAssign<u32> for U64Vec2 {
2383 #[inline]
2384 fn shr_assign(&mut self, rhs: u32) {
2385 *self = self.shr(rhs);
2386 }
2387}
2388
2389impl ShrAssign<&u32> for U64Vec2 {
2390 #[inline]
2391 fn shr_assign(&mut self, rhs: &u32) {
2392 self.shr_assign(*rhs);
2393 }
2394}
2395
2396impl Shl<u64> for U64Vec2 {
2397 type Output = Self;
2398 #[inline]
2399 fn shl(self, rhs: u64) -> Self::Output {
2400 Self {
2401 x: self.x.shl(rhs),
2402 y: self.y.shl(rhs),
2403 }
2404 }
2405}
2406
2407impl Shl<&u64> for U64Vec2 {
2408 type Output = Self;
2409 #[inline]
2410 fn shl(self, rhs: &u64) -> Self {
2411 self.shl(*rhs)
2412 }
2413}
2414
2415impl Shl<&u64> for &U64Vec2 {
2416 type Output = U64Vec2;
2417 #[inline]
2418 fn shl(self, rhs: &u64) -> U64Vec2 {
2419 (*self).shl(*rhs)
2420 }
2421}
2422
2423impl Shl<u64> for &U64Vec2 {
2424 type Output = U64Vec2;
2425 #[inline]
2426 fn shl(self, rhs: u64) -> U64Vec2 {
2427 (*self).shl(rhs)
2428 }
2429}
2430
2431impl ShlAssign<u64> for U64Vec2 {
2432 #[inline]
2433 fn shl_assign(&mut self, rhs: u64) {
2434 *self = self.shl(rhs);
2435 }
2436}
2437
2438impl ShlAssign<&u64> for U64Vec2 {
2439 #[inline]
2440 fn shl_assign(&mut self, rhs: &u64) {
2441 self.shl_assign(*rhs);
2442 }
2443}
2444
2445impl Shr<u64> for U64Vec2 {
2446 type Output = Self;
2447 #[inline]
2448 fn shr(self, rhs: u64) -> Self::Output {
2449 Self {
2450 x: self.x.shr(rhs),
2451 y: self.y.shr(rhs),
2452 }
2453 }
2454}
2455
2456impl Shr<&u64> for U64Vec2 {
2457 type Output = Self;
2458 #[inline]
2459 fn shr(self, rhs: &u64) -> Self {
2460 self.shr(*rhs)
2461 }
2462}
2463
2464impl Shr<&u64> for &U64Vec2 {
2465 type Output = U64Vec2;
2466 #[inline]
2467 fn shr(self, rhs: &u64) -> U64Vec2 {
2468 (*self).shr(*rhs)
2469 }
2470}
2471
2472impl Shr<u64> for &U64Vec2 {
2473 type Output = U64Vec2;
2474 #[inline]
2475 fn shr(self, rhs: u64) -> U64Vec2 {
2476 (*self).shr(rhs)
2477 }
2478}
2479
2480impl ShrAssign<u64> for U64Vec2 {
2481 #[inline]
2482 fn shr_assign(&mut self, rhs: u64) {
2483 *self = self.shr(rhs);
2484 }
2485}
2486
2487impl ShrAssign<&u64> for U64Vec2 {
2488 #[inline]
2489 fn shr_assign(&mut self, rhs: &u64) {
2490 self.shr_assign(*rhs);
2491 }
2492}
2493
2494impl Shl<IVec2> for U64Vec2 {
2495 type Output = Self;
2496 #[inline]
2497 fn shl(self, rhs: IVec2) -> Self {
2498 Self {
2499 x: self.x.shl(rhs.x),
2500 y: self.y.shl(rhs.y),
2501 }
2502 }
2503}
2504
2505impl Shl<&IVec2> for U64Vec2 {
2506 type Output = Self;
2507 #[inline]
2508 fn shl(self, rhs: &IVec2) -> Self {
2509 self.shl(*rhs)
2510 }
2511}
2512
2513impl Shl<&IVec2> for &U64Vec2 {
2514 type Output = U64Vec2;
2515 #[inline]
2516 fn shl(self, rhs: &IVec2) -> U64Vec2 {
2517 (*self).shl(*rhs)
2518 }
2519}
2520
2521impl Shl<IVec2> for &U64Vec2 {
2522 type Output = U64Vec2;
2523 #[inline]
2524 fn shl(self, rhs: IVec2) -> U64Vec2 {
2525 (*self).shl(rhs)
2526 }
2527}
2528
2529impl Shr<IVec2> for U64Vec2 {
2530 type Output = Self;
2531 #[inline]
2532 fn shr(self, rhs: IVec2) -> Self {
2533 Self {
2534 x: self.x.shr(rhs.x),
2535 y: self.y.shr(rhs.y),
2536 }
2537 }
2538}
2539
2540impl Shr<&IVec2> for U64Vec2 {
2541 type Output = Self;
2542 #[inline]
2543 fn shr(self, rhs: &IVec2) -> Self {
2544 self.shr(*rhs)
2545 }
2546}
2547
2548impl Shr<&IVec2> for &U64Vec2 {
2549 type Output = U64Vec2;
2550 #[inline]
2551 fn shr(self, rhs: &IVec2) -> U64Vec2 {
2552 (*self).shr(*rhs)
2553 }
2554}
2555
2556impl Shr<IVec2> for &U64Vec2 {
2557 type Output = U64Vec2;
2558 #[inline]
2559 fn shr(self, rhs: IVec2) -> U64Vec2 {
2560 (*self).shr(rhs)
2561 }
2562}
2563
2564impl Shl<UVec2> for U64Vec2 {
2565 type Output = Self;
2566 #[inline]
2567 fn shl(self, rhs: UVec2) -> Self {
2568 Self {
2569 x: self.x.shl(rhs.x),
2570 y: self.y.shl(rhs.y),
2571 }
2572 }
2573}
2574
2575impl Shl<&UVec2> for U64Vec2 {
2576 type Output = Self;
2577 #[inline]
2578 fn shl(self, rhs: &UVec2) -> Self {
2579 self.shl(*rhs)
2580 }
2581}
2582
2583impl Shl<&UVec2> for &U64Vec2 {
2584 type Output = U64Vec2;
2585 #[inline]
2586 fn shl(self, rhs: &UVec2) -> U64Vec2 {
2587 (*self).shl(*rhs)
2588 }
2589}
2590
2591impl Shl<UVec2> for &U64Vec2 {
2592 type Output = U64Vec2;
2593 #[inline]
2594 fn shl(self, rhs: UVec2) -> U64Vec2 {
2595 (*self).shl(rhs)
2596 }
2597}
2598
2599impl Shr<UVec2> for U64Vec2 {
2600 type Output = Self;
2601 #[inline]
2602 fn shr(self, rhs: UVec2) -> Self {
2603 Self {
2604 x: self.x.shr(rhs.x),
2605 y: self.y.shr(rhs.y),
2606 }
2607 }
2608}
2609
2610impl Shr<&UVec2> for U64Vec2 {
2611 type Output = Self;
2612 #[inline]
2613 fn shr(self, rhs: &UVec2) -> Self {
2614 self.shr(*rhs)
2615 }
2616}
2617
2618impl Shr<&UVec2> for &U64Vec2 {
2619 type Output = U64Vec2;
2620 #[inline]
2621 fn shr(self, rhs: &UVec2) -> U64Vec2 {
2622 (*self).shr(*rhs)
2623 }
2624}
2625
2626impl Shr<UVec2> for &U64Vec2 {
2627 type Output = U64Vec2;
2628 #[inline]
2629 fn shr(self, rhs: UVec2) -> U64Vec2 {
2630 (*self).shr(rhs)
2631 }
2632}
2633
2634impl Index<usize> for U64Vec2 {
2635 type Output = u64;
2636 #[inline]
2637 fn index(&self, index: usize) -> &Self::Output {
2638 match index {
2639 0 => &self.x,
2640 1 => &self.y,
2641 _ => panic!("index out of bounds"),
2642 }
2643 }
2644}
2645
2646impl IndexMut<usize> for U64Vec2 {
2647 #[inline]
2648 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
2649 match index {
2650 0 => &mut self.x,
2651 1 => &mut self.y,
2652 _ => panic!("index out of bounds"),
2653 }
2654 }
2655}
2656
2657impl fmt::Display for U64Vec2 {
2658 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2659 write!(f, "[{}, {}]", self.x, self.y)
2660 }
2661}
2662
2663impl fmt::Debug for U64Vec2 {
2664 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
2665 fmt.debug_tuple(stringify!(U64Vec2))
2666 .field(&self.x)
2667 .field(&self.y)
2668 .finish()
2669 }
2670}
2671
2672impl From<[u64; 2]> for U64Vec2 {
2673 #[inline]
2674 fn from(a: [u64; 2]) -> Self {
2675 Self::new(a[0], a[1])
2676 }
2677}
2678
2679impl From<U64Vec2> for [u64; 2] {
2680 #[inline]
2681 fn from(v: U64Vec2) -> Self {
2682 [v.x, v.y]
2683 }
2684}
2685
2686impl From<(u64, u64)> for U64Vec2 {
2687 #[inline]
2688 fn from(t: (u64, u64)) -> Self {
2689 Self::new(t.0, t.1)
2690 }
2691}
2692
2693impl From<U64Vec2> for (u64, u64) {
2694 #[inline]
2695 fn from(v: U64Vec2) -> Self {
2696 (v.x, v.y)
2697 }
2698}
2699
2700impl From<U8Vec2> for U64Vec2 {
2701 #[inline]
2702 fn from(v: U8Vec2) -> Self {
2703 Self::new(u64::from(v.x), u64::from(v.y))
2704 }
2705}
2706
2707impl From<U16Vec2> for U64Vec2 {
2708 #[inline]
2709 fn from(v: U16Vec2) -> Self {
2710 Self::new(u64::from(v.x), u64::from(v.y))
2711 }
2712}
2713
2714impl From<UVec2> for U64Vec2 {
2715 #[inline]
2716 fn from(v: UVec2) -> Self {
2717 Self::new(u64::from(v.x), u64::from(v.y))
2718 }
2719}
2720
2721impl TryFrom<I8Vec2> for U64Vec2 {
2722 type Error = core::num::TryFromIntError;
2723
2724 #[inline]
2725 fn try_from(v: I8Vec2) -> Result<Self, Self::Error> {
2726 Ok(Self::new(u64::try_from(v.x)?, u64::try_from(v.y)?))
2727 }
2728}
2729
2730impl TryFrom<I16Vec2> for U64Vec2 {
2731 type Error = core::num::TryFromIntError;
2732
2733 #[inline]
2734 fn try_from(v: I16Vec2) -> Result<Self, Self::Error> {
2735 Ok(Self::new(u64::try_from(v.x)?, u64::try_from(v.y)?))
2736 }
2737}
2738
2739impl TryFrom<IVec2> for U64Vec2 {
2740 type Error = core::num::TryFromIntError;
2741
2742 #[inline]
2743 fn try_from(v: IVec2) -> Result<Self, Self::Error> {
2744 Ok(Self::new(u64::try_from(v.x)?, u64::try_from(v.y)?))
2745 }
2746}
2747
2748impl TryFrom<I64Vec2> for U64Vec2 {
2749 type Error = core::num::TryFromIntError;
2750
2751 #[inline]
2752 fn try_from(v: I64Vec2) -> Result<Self, Self::Error> {
2753 Ok(Self::new(u64::try_from(v.x)?, u64::try_from(v.y)?))
2754 }
2755}
2756
2757impl TryFrom<USizeVec2> for U64Vec2 {
2758 type Error = core::num::TryFromIntError;
2759
2760 #[inline]
2761 fn try_from(v: USizeVec2) -> Result<Self, Self::Error> {
2762 Ok(Self::new(u64::try_from(v.x)?, u64::try_from(v.y)?))
2763 }
2764}
2765
2766impl From<BVec2> for U64Vec2 {
2767 #[inline]
2768 fn from(v: BVec2) -> Self {
2769 Self::new(u64::from(v.x), u64::from(v.y))
2770 }
2771}