Jolt Physics
A multi core friendly Game Physics Engine
Loading...
Searching...
No Matches
UVec4.inl
Go to the documentation of this file.
1// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
2// SPDX-FileCopyrightText: 2021 Jorrit Rouwe
3// SPDX-License-Identifier: MIT
4
6
8{
9#if defined(JPH_USE_SSE)
10 mValue = _mm_set_epi32(int(inW), int(inZ), int(inY), int(inX));
11#elif defined(JPH_USE_NEON)
12 uint32x2_t xy = vcreate_u32(static_cast<uint64>(inX) | (static_cast<uint64>(inY) << 32));
13 uint32x2_t zw = vcreate_u32(static_cast<uint64>(inZ) | (static_cast<uint64>(inW) << 32));
15#else
16 mU32[0] = inX;
17 mU32[1] = inY;
18 mU32[2] = inZ;
19 mU32[3] = inW;
20#endif
21}
22
24{
25 return sEquals(*this, inV2).TestAllTrue();
26}
27
28template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW>
30{
31 static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range");
32 static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range");
33 static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range");
34 static_assert(SwizzleW <= 3, "SwizzleW template parameter out of range");
35
36#if defined(JPH_USE_SSE)
38#elif defined(JPH_USE_NEON)
40#else
42#endif
43}
44
46{
47#if defined(JPH_USE_SSE)
48 return _mm_setzero_si128();
49#elif defined(JPH_USE_NEON)
50 return vdupq_n_u32(0);
51#else
52 return UVec4(0, 0, 0, 0);
53#endif
54}
55
57{
58#if defined(JPH_USE_SSE)
59 return _mm_set1_epi32(int(inV));
60#elif defined(JPH_USE_NEON)
61 return vdupq_n_u32(inV);
62#else
63 return UVec4(inV, inV, inV, inV);
64#endif
65}
66
68{
69#if defined(JPH_USE_SSE)
70 return _mm_castps_si128(_mm_load_ss(reinterpret_cast<const float*>(inV)));
71#elif defined(JPH_USE_NEON)
72 return vsetq_lane_u32(*inV, vdupq_n_u32(0), 0);
73#else
74 return UVec4(*inV, 0, 0, 0);
75#endif
76}
77
79{
80#if defined(JPH_USE_SSE)
81 return _mm_loadu_si128(reinterpret_cast<const __m128i *>(inV));
82#elif defined(JPH_USE_NEON)
83 return vld1q_u32(inV);
84#else
85 return UVec4(inV[0], inV[1], inV[2], inV[3]);
86#endif
87}
88
90{
91#if defined(JPH_USE_SSE)
92 return _mm_load_si128(reinterpret_cast<const __m128i *>(inV));
93#elif defined(JPH_USE_NEON)
94 return vld1q_u32(inV); // ARM doesn't make distinction between aligned or not
95#else
96 return UVec4(inV[0], inV[1], inV[2], inV[3]);
97#endif
98}
99
100template <const int Scale>
102{
103#ifdef JPH_USE_AVX2
104 return _mm_i32gather_epi32(reinterpret_cast<const int *>(inBase), inOffsets.mValue, Scale);
105#else
106 return Vec4::sGatherFloat4<Scale>(reinterpret_cast<const float *>(inBase), inOffsets).ReinterpretAsInt();
107#endif
108}
109
111{
112#if defined(JPH_USE_SSE4_1)
113 return _mm_min_epu32(inV1.mValue, inV2.mValue);
114#elif defined(JPH_USE_NEON)
115 return vminq_u32(inV1.mValue, inV2.mValue);
116#else
118 for (int i = 0; i < 4; i++)
119 result.mU32[i] = min(inV1.mU32[i], inV2.mU32[i]);
120 return result;
121#endif
122}
123
125{
126#if defined(JPH_USE_SSE4_1)
127 return _mm_max_epu32(inV1.mValue, inV2.mValue);
128#elif defined(JPH_USE_NEON)
129 return vmaxq_u32(inV1.mValue, inV2.mValue);
130#else
132 for (int i = 0; i < 4; i++)
133 result.mU32[i] = max(inV1.mU32[i], inV2.mU32[i]);
134 return result;
135#endif
136}
137
139{
140#if defined(JPH_USE_SSE)
141 return _mm_cmpeq_epi32(inV1.mValue, inV2.mValue);
142#elif defined(JPH_USE_NEON)
143 return vceqq_u32(inV1.mValue, inV2.mValue);
144#else
145 return UVec4(inV1.mU32[0] == inV2.mU32[0]? 0xffffffffu : 0,
146 inV1.mU32[1] == inV2.mU32[1]? 0xffffffffu : 0,
147 inV1.mU32[2] == inV2.mU32[2]? 0xffffffffu : 0,
148 inV1.mU32[3] == inV2.mU32[3]? 0xffffffffu : 0);
149#endif
150}
151
153{
154#if defined(JPH_USE_SSE4_1)
156#elif defined(JPH_USE_NEON)
157 return vbslq_u32(vshrq_n_s32(inControl.mValue, 31), inV2.mValue, inV1.mValue);
158#else
160 for (int i = 0; i < 4; i++)
161 result.mU32[i] = inControl.mU32[i] ? inV2.mU32[i] : inV1.mU32[i];
162 return result;
163#endif
164}
165
167{
168#if defined(JPH_USE_SSE)
169 return _mm_or_si128(inV1.mValue, inV2.mValue);
170#elif defined(JPH_USE_NEON)
171 return vorrq_u32(inV1.mValue, inV2.mValue);
172#else
173 return UVec4(inV1.mU32[0] | inV2.mU32[0],
174 inV1.mU32[1] | inV2.mU32[1],
175 inV1.mU32[2] | inV2.mU32[2],
176 inV1.mU32[3] | inV2.mU32[3]);
177#endif
178}
179
181{
182#if defined(JPH_USE_SSE)
183 return _mm_xor_si128(inV1.mValue, inV2.mValue);
184#elif defined(JPH_USE_NEON)
185 return veorq_u32(inV1.mValue, inV2.mValue);
186#else
187 return UVec4(inV1.mU32[0] ^ inV2.mU32[0],
188 inV1.mU32[1] ^ inV2.mU32[1],
189 inV1.mU32[2] ^ inV2.mU32[2],
190 inV1.mU32[3] ^ inV2.mU32[3]);
191#endif
192}
193
195{
196#if defined(JPH_USE_SSE)
197 return _mm_and_si128(inV1.mValue, inV2.mValue);
198#elif defined(JPH_USE_NEON)
199 return vandq_u32(inV1.mValue, inV2.mValue);
200#else
201 return UVec4(inV1.mU32[0] & inV2.mU32[0],
202 inV1.mU32[1] & inV2.mU32[1],
203 inV1.mU32[2] & inV2.mU32[2],
204 inV1.mU32[3] & inV2.mU32[3]);
205#endif
206}
207
208
210{
211#if defined(JPH_USE_AVX512)
212 return _mm_ternarylogic_epi32(inV1.mValue, inV1.mValue, inV1.mValue, 0b01010101);
213#elif defined(JPH_USE_SSE)
214 return sXor(inV1, sReplicate(0xffffffff));
215#elif defined(JPH_USE_NEON)
216 return vmvnq_u32(inV1.mValue);
217#else
218 return UVec4(~inV1.mU32[0], ~inV1.mU32[1], ~inV1.mU32[2], ~inV1.mU32[3]);
219#endif
220}
221
223{
224 // If inValue.z is false then shift W to Z
226
227 // If inValue.y is false then shift Z and further to Y and further
228 v = UVec4::sSelect(v.Swizzle<SWIZZLE_X, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_W>(), v, inValue.SplatY());
229
230 // If inValue.x is false then shift X and further to Y and further
231 v = UVec4::sSelect(v.Swizzle<SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_W>(), v, inValue.SplatX());
232
233 return v;
234}
235
237{
238#if defined(JPH_USE_SSE4_1)
239 return _mm_mullo_epi32(mValue, inV2.mValue);
240#elif defined(JPH_USE_NEON)
241 return vmulq_u32(mValue, inV2.mValue);
242#else
244 for (int i = 0; i < 4; i++)
245 result.mU32[i] = mU32[i] * inV2.mU32[i];
246 return result;
247#endif
248}
249
251{
252#if defined(JPH_USE_SSE)
253 return _mm_add_epi32(mValue, inV2.mValue);
254#elif defined(JPH_USE_NEON)
255 return vaddq_u32(mValue, inV2.mValue);
256#else
257 return UVec4(mU32[0] + inV2.mU32[0],
258 mU32[1] + inV2.mU32[1],
259 mU32[2] + inV2.mU32[2],
260 mU32[3] + inV2.mU32[3]);
261#endif
262}
263
265{
266#if defined(JPH_USE_SSE)
268#elif defined(JPH_USE_NEON)
269 mValue = vaddq_u32(mValue, inV2.mValue);
270#else
271 for (int i = 0; i < 4; ++i)
272 mU32[i] += inV2.mU32[i];
273#endif
274 return *this;
275}
276
278{
279#if defined(JPH_USE_SSE)
280 return _mm_shuffle_epi32(mValue, _MM_SHUFFLE(0, 0, 0, 0));
281#elif defined(JPH_USE_NEON)
282 return vdupq_laneq_u32(mValue, 0);
283#else
284 return UVec4(mU32[0], mU32[0], mU32[0], mU32[0]);
285#endif
286}
287
289{
290#if defined(JPH_USE_SSE)
291 return _mm_shuffle_epi32(mValue, _MM_SHUFFLE(1, 1, 1, 1));
292#elif defined(JPH_USE_NEON)
293 return vdupq_laneq_u32(mValue, 1);
294#else
295 return UVec4(mU32[1], mU32[1], mU32[1], mU32[1]);
296#endif
297}
298
300{
301#if defined(JPH_USE_SSE)
302 return _mm_shuffle_epi32(mValue, _MM_SHUFFLE(2, 2, 2, 2));
303#elif defined(JPH_USE_NEON)
304 return vdupq_laneq_u32(mValue, 2);
305#else
306 return UVec4(mU32[2], mU32[2], mU32[2], mU32[2]);
307#endif
308}
309
311{
312#if defined(JPH_USE_SSE)
313 return _mm_shuffle_epi32(mValue, _MM_SHUFFLE(3, 3, 3, 3));
314#elif defined(JPH_USE_NEON)
315 return vdupq_laneq_u32(mValue, 3);
316#else
317 return UVec4(mU32[3], mU32[3], mU32[3], mU32[3]);
318#endif
319}
320
322{
323#if defined(JPH_USE_SSE)
324 return _mm_cvtepi32_ps(mValue);
325#elif defined(JPH_USE_NEON)
326 return vcvtq_f32_s32(mValue);
327#else
328 return Vec4((float)mU32[0], (float)mU32[1], (float)mU32[2], (float)mU32[3]);
329#endif
330}
331
333{
334#if defined(JPH_USE_SSE)
336#elif defined(JPH_USE_NEON)
338#else
339 return *reinterpret_cast<const Vec4 *>(this);
340#endif
341}
342
344{
345#if defined(JPH_USE_SSE)
346 _mm_storeu_si128(reinterpret_cast<__m128i *>(outV), mValue);
347#elif defined(JPH_USE_NEON)
349#else
350 for (int i = 0; i < 4; ++i)
351 outV[i] = mU32[i];
352#endif
353}
354
356{
357#if defined(JPH_USE_SSE)
358 _mm_store_si128(reinterpret_cast<__m128i *>(outV), mValue);
359#elif defined(JPH_USE_NEON)
360 vst1q_u32(outV, mValue); // ARM doesn't make distinction between aligned or not
361#else
362 for (int i = 0; i < 4; ++i)
363 outV[i] = mU32[i];
364#endif
365}
366
368{
369#if defined(JPH_USE_SSE)
371#elif defined(JPH_USE_NEON)
372 return vaddvq_u32(vshrq_n_u32(mValue, 31));
373#else
374 return (mU32[0] >> 31) + (mU32[1] >> 31) + (mU32[2] >> 31) + (mU32[3] >> 31);
375#endif
376}
377
379{
380#if defined(JPH_USE_SSE)
382#elif defined(JPH_USE_NEON)
383 int32x4_t shift = JPH_NEON_INT32x4(0, 1, 2, 3);
385#else
386 return (mU32[0] >> 31) | ((mU32[1] >> 31) << 1) | ((mU32[2] >> 31) << 2) | ((mU32[3] >> 31) << 3);
387#endif
388}
389
391{
392 return GetTrues() != 0;
393}
394
396{
397 return (GetTrues() & 0b111) != 0;
398}
399
401{
402 return GetTrues() == 0b1111;
403}
404
406{
407 return (GetTrues() & 0b111) == 0b111;
408}
409
410template <const uint Count>
412{
413 static_assert(Count <= 31, "Invalid shift");
414
415#if defined(JPH_USE_SSE)
416 return _mm_slli_epi32(mValue, Count);
417#elif defined(JPH_USE_NEON)
418 return vshlq_n_u32(mValue, Count);
419#else
420 return UVec4(mU32[0] << Count, mU32[1] << Count, mU32[2] << Count, mU32[3] << Count);
421#endif
422}
423
424template <const uint Count>
426{
427 static_assert(Count <= 31, "Invalid shift");
428
429#if defined(JPH_USE_SSE)
430 return _mm_srli_epi32(mValue, Count);
431#elif defined(JPH_USE_NEON)
432 return vshrq_n_u32(mValue, Count);
433#else
434 return UVec4(mU32[0] >> Count, mU32[1] >> Count, mU32[2] >> Count, mU32[3] >> Count);
435#endif
436}
437
438template <const uint Count>
440{
441 static_assert(Count <= 31, "Invalid shift");
442
443#if defined(JPH_USE_SSE)
444 return _mm_srai_epi32(mValue, Count);
445#elif defined(JPH_USE_NEON)
446 return vshrq_n_s32(mValue, Count);
447#else
448 return UVec4(uint32(int32_t(mU32[0]) >> Count),
449 uint32(int32_t(mU32[1]) >> Count),
450 uint32(int32_t(mU32[2]) >> Count),
451 uint32(int32_t(mU32[3]) >> Count));
452#endif
453}
454
456{
457#if defined(JPH_USE_SSE)
459#elif defined(JPH_USE_NEON)
463#else
464 return UVec4(mU32[0] & 0xffff,
465 (mU32[0] >> 16) & 0xffff,
466 mU32[1] & 0xffff,
467 (mU32[1] >> 16) & 0xffff);
468#endif
469}
470
472{
473#if defined(JPH_USE_SSE)
475#elif defined(JPH_USE_NEON)
479#else
480 return UVec4(mU32[2] & 0xffff,
481 (mU32[2] >> 16) & 0xffff,
482 mU32[3] & 0xffff,
483 (mU32[3] >> 16) & 0xffff);
484#endif
485}
486
488{
489#if defined(JPH_USE_SSE4_1)
490 return _mm_shuffle_epi8(mValue, _mm_set_epi32(int(0xffffff03), int(0xffffff02), int(0xffffff01), int(0xffffff00)));
491#elif defined(JPH_USE_NEON)
492 int8x16_t idx = JPH_NEON_INT8x16(0x00, 0x7f, 0x7f, 0x7f, 0x01, 0x7f, 0x7f, 0x7f, 0x02, 0x7f, 0x7f, 0x7f, 0x03, 0x7f, 0x7f, 0x7f);
494#else
496 for (int i = 0; i < 4; i++)
497 result.mU32[i] = (mU32[0] >> (i * 8)) & 0xff;
498 return result;
499#endif
500}
501
503{
504#if defined(JPH_USE_SSE4_1)
505 return _mm_shuffle_epi8(mValue, _mm_set_epi32(int(0xffffff07), int(0xffffff06), int(0xffffff05), int(0xffffff04)));
506#elif defined(JPH_USE_NEON)
507 int8x16_t idx = JPH_NEON_INT8x16(0x04, 0x7f, 0x7f, 0x7f, 0x05, 0x7f, 0x7f, 0x7f, 0x06, 0x7f, 0x7f, 0x7f, 0x07, 0x7f, 0x7f, 0x7f);
509#else
511 for (int i = 0; i < 4; i++)
512 result.mU32[i] = (mU32[1] >> (i * 8)) & 0xff;
513 return result;
514#endif
515}
516
518{
519#if defined(JPH_USE_SSE4_1)
520 return _mm_shuffle_epi8(mValue, _mm_set_epi32(int(0xffffff0b), int(0xffffff0a), int(0xffffff09), int(0xffffff08)));
521#elif defined(JPH_USE_NEON)
522 int8x16_t idx = JPH_NEON_INT8x16(0x08, 0x7f, 0x7f, 0x7f, 0x09, 0x7f, 0x7f, 0x7f, 0x0a, 0x7f, 0x7f, 0x7f, 0x0b, 0x7f, 0x7f, 0x7f);
524#else
526 for (int i = 0; i < 4; i++)
527 result.mU32[i] = (mU32[2] >> (i * 8)) & 0xff;
528 return result;
529#endif
530}
531
533{
534#if defined(JPH_USE_SSE4_1)
535 return _mm_shuffle_epi8(mValue, _mm_set_epi32(int(0xffffff0f), int(0xffffff0e), int(0xffffff0d), int(0xffffff0c)));
536#elif defined(JPH_USE_NEON)
537 int8x16_t idx = JPH_NEON_INT8x16(0x0c, 0x7f, 0x7f, 0x7f, 0x0d, 0x7f, 0x7f, 0x7f, 0x0e, 0x7f, 0x7f, 0x7f, 0x0f, 0x7f, 0x7f, 0x7f);
539#else
541 for (int i = 0; i < 4; i++)
542 result.mU32[i] = (mU32[3] >> (i * 8)) & 0xff;
543 return result;
544#endif
545}
546
548{
549#if defined(JPH_USE_SSE4_1) || defined(JPH_USE_NEON)
550 alignas(UVec4) static constexpr uint32 sFourMinusXShuffle[5][4] =
551 {
552 { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff },
553 { 0x0f0e0d0c, 0xffffffff, 0xffffffff, 0xffffffff },
554 { 0x0b0a0908, 0x0f0e0d0c, 0xffffffff, 0xffffffff },
555 { 0x07060504, 0x0b0a0908, 0x0f0e0d0c, 0xffffffff },
556 { 0x03020100, 0x07060504, 0x0b0a0908, 0x0f0e0d0c }
557 };
558#endif
559
560#if defined(JPH_USE_SSE4_1)
561 return _mm_shuffle_epi8(mValue, *reinterpret_cast<const UVec4::Type *>(sFourMinusXShuffle[inCount]));
562#elif defined(JPH_USE_NEON)
565#else
567 for (int i = 0; i < inCount; i++)
568 result.mU32[i] = mU32[i + 4 - inCount];
569 return result;
570#endif
571}
572
std::uint64_t uint64
Definition Core.h:443
#define JPH_NAMESPACE_END
Definition Core.h:367
std::uint32_t uint32
Definition Core.h:442
#define JPH_NAMESPACE_BEGIN
Definition Core.h:361
uint CountBits(uint32 inValue)
Count the number of 1 bits in a value.
Definition Math.h:159
AllocateFunction Allocate
Definition Memory.cpp:59
@ SWIZZLE_Z
Use the Z component.
Definition Swizzle.h:14
@ SWIZZLE_W
Use the W component.
Definition Swizzle.h:15
@ SWIZZLE_X
Use the X component.
Definition Swizzle.h:12
@ SWIZZLE_Y
Use the Y component.
Definition Swizzle.h:13
Definition UVec4.h:12
JPH_INLINE UVec4 Swizzle() const
Swizzle the elements in inV.
static JPH_INLINE UVec4 sNot(UVec4Arg inV1)
Logical not (component wise)
Definition UVec4.inl:209
static JPH_INLINE UVec4 sMin(UVec4Arg inV1, UVec4Arg inV2)
Return the minimum value of each of the components.
Definition UVec4.inl:110
JPH_INLINE UVec4 LogicalShiftLeft() const
Shift all components by Count bits to the left (filling with zeros from the left)
JPH_INLINE int CountTrues() const
Count the number of components that are true (true is when highest bit of component is set)
Definition UVec4.inl:367
JPH_INLINE UVec4 SplatY() const
Replicate the Y component to all components.
Definition UVec4.inl:288
static JPH_INLINE UVec4 sLoadInt(const uint32 *inV)
Load 1 int from memory and place it in the X component, zeros Y, Z and W.
Definition UVec4.inl:67
JPH_INLINE UVec4 Expand4Uint16Lo() const
Takes the lower 4 16 bits and expands them to X, Y, Z and W.
Definition UVec4.inl:455
static JPH_INLINE UVec4 sSort4True(UVec4Arg inValue, UVec4Arg inIndex)
Definition UVec4.inl:222
JPH_INLINE UVec4 LogicalShiftRight() const
Shift all components by Count bits to the right (filling with zeros from the right)
static JPH_INLINE UVec4 sReplicate(uint32 inV)
Replicate int inV across all components.
Definition UVec4.inl:56
JPH_INLINE UVec4 SplatX() const
Replicate the X component to all components.
Definition UVec4.inl:277
JPH_INLINE UVec4 Expand4Byte4() const
Takes byte 4 .. 7 and expands them to X, Y, Z and W.
Definition UVec4.inl:502
JPH_INLINE bool TestAllTrue() const
Test if all components are true (true is when highest bit of component is set)
Definition UVec4.inl:400
JPH_INLINE UVec4 Expand4Byte0() const
Takes byte 0 .. 3 and expands them to X, Y, Z and W.
Definition UVec4.inl:487
JPH_INLINE int GetTrues() const
Store if X is true in bit 0, Y in bit 1, Z in bit 2 and W in bit 3 (true is when highest bit of compo...
Definition UVec4.inl:378
JPH_INLINE bool TestAnyXYZTrue() const
Test if any of X, Y or Z components are true (true is when highest bit of component is set)
Definition UVec4.inl:395
JPH_INLINE UVec4 & operator+=(UVec4Arg inV2)
Add two integer vectors (component wise)
Definition UVec4.inl:264
static JPH_INLINE UVec4 sGatherInt4(const uint32 *inBase, UVec4Arg inOffsets)
Gather 4 ints from memory at inBase + inOffsets[i] * Scale.
static JPH_INLINE UVec4 sAnd(UVec4Arg inV1, UVec4Arg inV2)
Logical and (component wise)
Definition UVec4.inl:194
static JPH_INLINE UVec4 sEquals(UVec4Arg inV1, UVec4Arg inV2)
Equals (component wise)
Definition UVec4.inl:138
static JPH_INLINE UVec4 sOr(UVec4Arg inV1, UVec4Arg inV2)
Logical or (component wise)
Definition UVec4.inl:166
struct { uint32 mData[4];} Type
Definition UVec4.h:22
JPH_INLINE bool TestAllXYZTrue() const
Test if X, Y and Z components are true (true is when highest bit of component is set)
Definition UVec4.inl:405
JPH_INLINE UVec4 ShiftComponents4Minus(int inCount) const
Shift vector components by 4 - Count floats to the left, so if Count = 1 the resulting vector is (W,...
Definition UVec4.inl:547
JPH_INLINE bool operator==(UVec4Arg inV2) const
Comparison.
Definition UVec4.inl:23
static JPH_INLINE UVec4 sMax(UVec4Arg inV1, UVec4Arg inV2)
Return the maximum of each of the components.
Definition UVec4.inl:124
JPH_INLINE UVec4 SplatZ() const
Replicate the Z component to all components.
Definition UVec4.inl:299
Type mValue
Definition UVec4.h:211
JPH_INLINE UVec4 SplatW() const
Replicate the W component to all components.
Definition UVec4.inl:310
JPH_INLINE void StoreInt4(uint32 *outV) const
Store 4 ints to memory.
Definition UVec4.inl:343
JPH_INLINE UVec4 Expand4Byte8() const
Takes byte 8 .. 11 and expands them to X, Y, Z and W.
Definition UVec4.inl:517
static JPH_INLINE UVec4 sLoadInt4Aligned(const uint32 *inV)
Load 4 ints from memory, aligned to 16 bytes.
Definition UVec4.inl:89
static JPH_INLINE UVec4 sLoadInt4(const uint32 *inV)
Load 4 ints from memory.
Definition UVec4.inl:78
JPH_INLINE UVec4 Expand4Byte12() const
Takes byte 12 .. 15 and expands them to X, Y, Z and W.
Definition UVec4.inl:532
static JPH_INLINE UVec4 sXor(UVec4Arg inV1, UVec4Arg inV2)
Logical xor (component wise)
Definition UVec4.inl:180
JPH_INLINE UVec4 Expand4Uint16Hi() const
Takes the upper 4 16 bits and expands them to X, Y, Z and W.
Definition UVec4.inl:471
static JPH_INLINE UVec4 sZero()
Vector with all zeros.
Definition UVec4.inl:45
JPH_INLINE UVec4 operator+(UVec4Arg inV2)
Adds an integer value to all integer components (discards any overflow)
Definition UVec4.inl:250
JPH_INLINE UVec4 ArithmeticShiftRight() const
Shift all components by Count bits to the right (shifting in the value of the highest bit)
UVec4()=default
Constructor.
JPH_INLINE UVec4 operator*(UVec4Arg inV2) const
Multiplies each of the 4 integer components with an integer (discards any overflow)
Definition UVec4.inl:236
static JPH_INLINE UVec4 sSelect(UVec4Arg inV1, UVec4Arg inV2, UVec4Arg inControl)
Component wise select, returns inV1 when highest bit of inControl = 0 and inV2 when highest bit of in...
Definition UVec4.inl:152
JPH_INLINE Vec4 ToFloat() const
Convert each component from an int to a float.
Definition UVec4.inl:321
JPH_INLINE Vec4 ReinterpretAsFloat() const
Reinterpret UVec4 as a Vec4 (doesn't change the bits)
Definition UVec4.inl:332
JPH_INLINE void StoreInt4Aligned(uint32 *outV) const
Store 4 ints to memory, aligned to 16 bytes.
Definition UVec4.inl:355
JPH_INLINE bool TestAnyTrue() const
Test if any of the components are true (true is when highest bit of component is set)
Definition UVec4.inl:390
uint32 mU32[4]
Definition UVec4.h:212
Definition Vec4.h:14