Jolt Physics
A multi core friendly Game Physics Engine
Loading...
Searching...
No Matches
DVec3.inl
Go to the documentation of this file.
1// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics)
2// SPDX-FileCopyrightText: 2021 Jorrit Rouwe
3// SPDX-License-Identifier: MIT
4
5#pragma once
6
8
9// Create a std::hash for DVec3
10JPH_MAKE_HASHABLE(JPH::DVec3, t.GetX(), t.GetY(), t.GetZ())
11
13
14DVec3::DVec3(Vec3Arg inRHS)
15{
16#if defined(JPH_USE_AVX)
17 mValue = _mm256_cvtps_pd(inRHS.mValue);
18#elif defined(JPH_USE_SSE)
19 mValue.mLow = _mm_cvtps_pd(inRHS.mValue);
20 mValue.mHigh = _mm_cvtps_pd(_mm_shuffle_ps(inRHS.mValue, inRHS.mValue, _MM_SHUFFLE(2, 2, 2, 2)));
21#elif defined(JPH_USE_NEON)
22 mValue.val[0] = vcvt_f64_f32(vget_low_f32(inRHS.mValue));
23 mValue.val[1] = vcvt_high_f64_f32(inRHS.mValue);
24#else
25 mF64[0] = (double)inRHS.GetX();
26 mF64[1] = (double)inRHS.GetY();
27 mF64[2] = (double)inRHS.GetZ();
28 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
29 mF64[3] = mF64[2];
30 #endif
31#endif
32}
33
35 DVec3(Vec3(inRHS))
36{
37}
38
39DVec3::DVec3(double inX, double inY, double inZ)
40{
41#if defined(JPH_USE_AVX)
42 mValue = _mm256_set_pd(inZ, inZ, inY, inX); // Assure Z and W are the same
43#elif defined(JPH_USE_SSE)
44 mValue.mLow = _mm_set_pd(inY, inX);
45 mValue.mHigh = _mm_set_pd1(inZ);
46#elif defined(JPH_USE_NEON)
47 mValue.val[0] = vcombine_f64(vcreate_f64(*reinterpret_cast<uint64 *>(&inX)), vcreate_f64(*reinterpret_cast<uint64 *>(&inY)));
48 mValue.val[1] = vdupq_n_f64(inZ);
49#else
50 mF64[0] = inX;
51 mF64[1] = inY;
52 mF64[2] = inZ;
53 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
54 mF64[3] = mF64[2];
55 #endif
56#endif
57}
58
60{
61#if defined(JPH_USE_AVX)
62 Type x = _mm256_castpd128_pd256(_mm_load_sd(&inV.x));
63 Type y = _mm256_castpd128_pd256(_mm_load_sd(&inV.y));
64 Type z = _mm256_broadcast_sd(&inV.z);
65 Type xy = _mm256_unpacklo_pd(x, y);
66 mValue = _mm256_blend_pd(xy, z, 0b1100); // Assure Z and W are the same
67#elif defined(JPH_USE_SSE)
68 mValue.mLow = _mm_loadu_pd(&inV.x);
69 mValue.mHigh = _mm_set_pd1(inV.z);
70#elif defined(JPH_USE_NEON)
71 mValue.val[0] = vld1q_f64(&inV.x);
72 mValue.val[1] = vdupq_n_f64(inV.z);
73#else
74 mF64[0] = inV.x;
75 mF64[1] = inV.y;
76 mF64[2] = inV.z;
77 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
78 mF64[3] = mF64[2];
79 #endif
80#endif
81}
82
83void DVec3::CheckW() const
84{
85#ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
86 // Avoid asserts when both components are NaN
87 JPH_ASSERT(reinterpret_cast<const uint64 *>(mF64)[2] == reinterpret_cast<const uint64 *>(mF64)[3]);
88#endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
89}
90
93{
94#ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
95 #if defined(JPH_USE_AVX)
96 return _mm256_shuffle_pd(inValue, inValue, 2);
97 #elif defined(JPH_USE_SSE)
98 Type value;
99 value.mLow = inValue.mLow;
100 value.mHigh = _mm_shuffle_pd(inValue.mHigh, inValue.mHigh, 0);
101 return value;
102 #elif defined(JPH_USE_NEON)
103 Type value;
104 value.val[0] = inValue.val[0];
105 value.val[1] = vdupq_laneq_f64(inValue.val[1], 0);
106 return value;
107 #else
108 Type value;
109 value.mData[0] = inValue.mData[0];
110 value.mData[1] = inValue.mData[1];
111 value.mData[2] = inValue.mData[2];
112 value.mData[3] = inValue.mData[2];
113 return value;
114 #endif
115#else
116 return inValue;
117#endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
118}
119
121{
122#if defined(JPH_USE_AVX)
123 return _mm256_setzero_pd();
124#elif defined(JPH_USE_SSE)
125 __m128d zero = _mm_setzero_pd();
126 return DVec3({ zero, zero });
127#elif defined(JPH_USE_NEON)
128 float64x2_t zero = vdupq_n_f64(0.0);
129 return DVec3({ zero, zero });
130#else
131 return DVec3(0, 0, 0);
132#endif
133}
134
136{
137#if defined(JPH_USE_AVX)
138 return _mm256_set1_pd(inV);
139#elif defined(JPH_USE_SSE)
140 __m128d value = _mm_set1_pd(inV);
141 return DVec3({ value, value });
142#elif defined(JPH_USE_NEON)
143 float64x2_t value = vdupq_n_f64(inV);
144 return DVec3({ value, value });
145#else
146 return DVec3(inV, inV, inV);
147#endif
148}
149
151{
152 return sReplicate(numeric_limits<double>::quiet_NaN());
153}
154
156{
157#if defined(JPH_USE_AVX)
158 Type v = _mm256_loadu_pd(&inV.x);
159#elif defined(JPH_USE_SSE)
160 Type v;
161 v.mLow = _mm_loadu_pd(&inV.x);
162 v.mHigh = _mm_set1_pd(inV.z);
163#elif defined(JPH_USE_NEON)
164 Type v = vld1q_f64_x2(&inV.x);
165#else
166 Type v = { inV.x, inV.y, inV.z };
167#endif
168 return sFixW(v);
169}
170
172{
173 outV->x = mF64[0];
174 outV->y = mF64[1];
175 outV->z = mF64[2];
176}
177
178DVec3::operator Vec3() const
179{
180#if defined(JPH_USE_AVX)
181 return _mm256_cvtpd_ps(mValue);
182#elif defined(JPH_USE_SSE)
183 __m128 low = _mm_cvtpd_ps(mValue.mLow);
184 __m128 high = _mm_cvtpd_ps(mValue.mHigh);
185 return _mm_shuffle_ps(low, high, _MM_SHUFFLE(1, 0, 1, 0));
186#elif defined(JPH_USE_NEON)
187 return vcvt_high_f32_f64(vcvtx_f32_f64(mValue.val[0]), mValue.val[1]);
188#else
189 return Vec3((float)GetX(), (float)GetY(), (float)GetZ());
190#endif
191}
192
194{
195#if defined(JPH_USE_AVX)
196 return _mm256_min_pd(inV1.mValue, inV2.mValue);
197#elif defined(JPH_USE_SSE)
198 return DVec3({ _mm_min_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_min_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
199#elif defined(JPH_USE_NEON)
200 return DVec3({ vminq_f64(inV1.mValue.val[0], inV2.mValue.val[0]), vminq_f64(inV1.mValue.val[1], inV2.mValue.val[1]) });
201#else
202 return DVec3(min(inV1.mF64[0], inV2.mF64[0]),
203 min(inV1.mF64[1], inV2.mF64[1]),
204 min(inV1.mF64[2], inV2.mF64[2]));
205#endif
206}
207
209{
210#if defined(JPH_USE_AVX)
211 return _mm256_max_pd(inV1.mValue, inV2.mValue);
212#elif defined(JPH_USE_SSE)
213 return DVec3({ _mm_max_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_max_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
214#elif defined(JPH_USE_NEON)
215 return DVec3({ vmaxq_f64(inV1.mValue.val[0], inV2.mValue.val[0]), vmaxq_f64(inV1.mValue.val[1], inV2.mValue.val[1]) });
216#else
217 return DVec3(max(inV1.mF64[0], inV2.mF64[0]),
218 max(inV1.mF64[1], inV2.mF64[1]),
219 max(inV1.mF64[2], inV2.mF64[2]));
220#endif
221}
222
224{
225 return sMax(sMin(inV, inMax), inMin);
226}
227
229{
230#if defined(JPH_USE_AVX)
231 return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_EQ_OQ);
232#elif defined(JPH_USE_SSE)
233 return DVec3({ _mm_cmpeq_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmpeq_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
234#elif defined(JPH_USE_NEON)
235 return DVec3({ vreinterpretq_u64_f64(vceqq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vceqq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) });
236#else
237 return DVec3(inV1.mF64[0] == inV2.mF64[0]? cTrue : cFalse,
238 inV1.mF64[1] == inV2.mF64[1]? cTrue : cFalse,
239 inV1.mF64[2] == inV2.mF64[2]? cTrue : cFalse);
240#endif
241}
242
244{
245#if defined(JPH_USE_AVX)
246 return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_LT_OQ);
247#elif defined(JPH_USE_SSE)
248 return DVec3({ _mm_cmplt_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmplt_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
249#elif defined(JPH_USE_NEON)
250 return DVec3({ vreinterpretq_u64_f64(vcltq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vcltq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) });
251#else
252 return DVec3(inV1.mF64[0] < inV2.mF64[0]? cTrue : cFalse,
253 inV1.mF64[1] < inV2.mF64[1]? cTrue : cFalse,
254 inV1.mF64[2] < inV2.mF64[2]? cTrue : cFalse);
255#endif
256}
257
259{
260#if defined(JPH_USE_AVX)
261 return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_LE_OQ);
262#elif defined(JPH_USE_SSE)
263 return DVec3({ _mm_cmple_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmple_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
264#elif defined(JPH_USE_NEON)
265 return DVec3({ vreinterpretq_u64_f64(vcleq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vcleq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) });
266#else
267 return DVec3(inV1.mF64[0] <= inV2.mF64[0]? cTrue : cFalse,
268 inV1.mF64[1] <= inV2.mF64[1]? cTrue : cFalse,
269 inV1.mF64[2] <= inV2.mF64[2]? cTrue : cFalse);
270#endif
271}
272
274{
275#if defined(JPH_USE_AVX)
276 return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_GT_OQ);
277#elif defined(JPH_USE_SSE)
278 return DVec3({ _mm_cmpgt_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmpgt_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
279#elif defined(JPH_USE_NEON)
280 return DVec3({ vreinterpretq_u64_f64(vcgtq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vcgtq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) });
281#else
282 return DVec3(inV1.mF64[0] > inV2.mF64[0]? cTrue : cFalse,
283 inV1.mF64[1] > inV2.mF64[1]? cTrue : cFalse,
284 inV1.mF64[2] > inV2.mF64[2]? cTrue : cFalse);
285#endif
286}
287
289{
290#if defined(JPH_USE_AVX)
291 return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_GE_OQ);
292#elif defined(JPH_USE_SSE)
293 return DVec3({ _mm_cmpge_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmpge_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
294#elif defined(JPH_USE_NEON)
295 return DVec3({ vreinterpretq_u64_f64(vcgeq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vcgeq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) });
296#else
297 return DVec3(inV1.mF64[0] >= inV2.mF64[0]? cTrue : cFalse,
298 inV1.mF64[1] >= inV2.mF64[1]? cTrue : cFalse,
299 inV1.mF64[2] >= inV2.mF64[2]? cTrue : cFalse);
300#endif
301}
302
304{
305#if defined(JPH_USE_AVX)
306 #ifdef JPH_USE_FMADD
307 return _mm256_fmadd_pd(inMul1.mValue, inMul2.mValue, inAdd.mValue);
308 #else
309 return _mm256_add_pd(_mm256_mul_pd(inMul1.mValue, inMul2.mValue), inAdd.mValue);
310 #endif
311#elif defined(JPH_USE_NEON)
312 return DVec3({ vmlaq_f64(inAdd.mValue.val[0], inMul1.mValue.val[0], inMul2.mValue.val[0]), vmlaq_f64(inAdd.mValue.val[1], inMul1.mValue.val[1], inMul2.mValue.val[1]) });
313#else
314 return inMul1 * inMul2 + inAdd;
315#endif
316}
317
319{
320#if defined(JPH_USE_AVX)
321 return _mm256_blendv_pd(inV1.mValue, inV2.mValue, inControl.mValue);
322#elif defined(JPH_USE_SSE4_1)
323 Type v = { _mm_blendv_pd(inV1.mValue.mLow, inV2.mValue.mLow, inControl.mValue.mLow), _mm_blendv_pd(inV1.mValue.mHigh, inV2.mValue.mHigh, inControl.mValue.mHigh) };
324 return sFixW(v);
325#elif defined(JPH_USE_NEON)
326 Type v = { vbslq_f64(vshrq_n_s64(inControl.mValue.val[0], 63), inV2.mValue.val[0], inV1.mValue.val[0]), vbslq_f64(vshrq_n_s64(inControl.mValue.val[1], 63), inV2.mValue.val[1], inV1.mValue.val[1]) };
327 return sFixW(v);
328#else
329 DVec3 result;
330 for (int i = 0; i < 3; i++)
331 result.mF64[i] = BitCast<uint64>(inControl.mF64[i])? inV2.mF64[i] : inV1.mF64[i];
332#ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
333 result.mF64[3] = result.mF64[2];
334#endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
335 return result;
336#endif
337}
338
340{
341#if defined(JPH_USE_AVX)
342 return _mm256_or_pd(inV1.mValue, inV2.mValue);
343#elif defined(JPH_USE_SSE)
344 return DVec3({ _mm_or_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_or_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
345#elif defined(JPH_USE_NEON)
346 return DVec3({ vorrq_s64(inV1.mValue.val[0], inV2.mValue.val[0]), vorrq_s64(inV1.mValue.val[1], inV2.mValue.val[1]) });
347#else
348 return DVec3(BitCast<double>(BitCast<uint64>(inV1.mF64[0]) | BitCast<uint64>(inV2.mF64[0])),
349 BitCast<double>(BitCast<uint64>(inV1.mF64[1]) | BitCast<uint64>(inV2.mF64[1])),
350 BitCast<double>(BitCast<uint64>(inV1.mF64[2]) | BitCast<uint64>(inV2.mF64[2])));
351#endif
352}
353
355{
356#if defined(JPH_USE_AVX)
357 return _mm256_xor_pd(inV1.mValue, inV2.mValue);
358#elif defined(JPH_USE_SSE)
359 return DVec3({ _mm_xor_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_xor_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
360#elif defined(JPH_USE_NEON)
361 return DVec3({ veorq_s64(inV1.mValue.val[0], inV2.mValue.val[0]), veorq_s64(inV1.mValue.val[1], inV2.mValue.val[1]) });
362#else
363 return DVec3(BitCast<double>(BitCast<uint64>(inV1.mF64[0]) ^ BitCast<uint64>(inV2.mF64[0])),
364 BitCast<double>(BitCast<uint64>(inV1.mF64[1]) ^ BitCast<uint64>(inV2.mF64[1])),
365 BitCast<double>(BitCast<uint64>(inV1.mF64[2]) ^ BitCast<uint64>(inV2.mF64[2])));
366#endif
367}
368
370{
371#if defined(JPH_USE_AVX)
372 return _mm256_and_pd(inV1.mValue, inV2.mValue);
373#elif defined(JPH_USE_SSE)
374 return DVec3({ _mm_and_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_and_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) });
375#elif defined(JPH_USE_NEON)
376 return DVec3({ vandq_s64(inV1.mValue.val[0], inV2.mValue.val[0]), vandq_s64(inV1.mValue.val[1], inV2.mValue.val[1]) });
377#else
378 return DVec3(BitCast<double>(BitCast<uint64>(inV1.mF64[0]) & BitCast<uint64>(inV2.mF64[0])),
379 BitCast<double>(BitCast<uint64>(inV1.mF64[1]) & BitCast<uint64>(inV2.mF64[1])),
380 BitCast<double>(BitCast<uint64>(inV1.mF64[2]) & BitCast<uint64>(inV2.mF64[2])));
381#endif
382}
383
385{
386#if defined(JPH_USE_AVX)
387 return _mm256_movemask_pd(mValue) & 0x7;
388#elif defined(JPH_USE_SSE)
389 return (_mm_movemask_pd(mValue.mLow) + (_mm_movemask_pd(mValue.mHigh) << 2)) & 0x7;
390#else
391 return int((BitCast<uint64>(mF64[0]) >> 63) | ((BitCast<uint64>(mF64[1]) >> 63) << 1) | ((BitCast<uint64>(mF64[2]) >> 63) << 2));
392#endif
393}
394
396{
397 return GetTrues() != 0;
398}
399
401{
402 return GetTrues() == 0x7;
403}
404
406{
407 return sEquals(*this, inV2).TestAllTrue();
408}
409
410bool DVec3::IsClose(DVec3Arg inV2, double inMaxDistSq) const
411{
412 return (inV2 - *this).LengthSq() <= inMaxDistSq;
413}
414
415bool DVec3::IsNearZero(double inMaxDistSq) const
416{
417 return LengthSq() <= inMaxDistSq;
418}
419
421{
422#if defined(JPH_USE_AVX)
423 return _mm256_mul_pd(mValue, inV2.mValue);
424#elif defined(JPH_USE_SSE)
425 return DVec3({ _mm_mul_pd(mValue.mLow, inV2.mValue.mLow), _mm_mul_pd(mValue.mHigh, inV2.mValue.mHigh) });
426#elif defined(JPH_USE_NEON)
427 return DVec3({ vmulq_f64(mValue.val[0], inV2.mValue.val[0]), vmulq_f64(mValue.val[1], inV2.mValue.val[1]) });
428#else
429 return DVec3(mF64[0] * inV2.mF64[0], mF64[1] * inV2.mF64[1], mF64[2] * inV2.mF64[2]);
430#endif
431}
432
433DVec3 DVec3::operator * (double inV2) const
434{
435#if defined(JPH_USE_AVX)
436 return _mm256_mul_pd(mValue, _mm256_set1_pd(inV2));
437#elif defined(JPH_USE_SSE)
438 __m128d v = _mm_set1_pd(inV2);
439 return DVec3({ _mm_mul_pd(mValue.mLow, v), _mm_mul_pd(mValue.mHigh, v) });
440#elif defined(JPH_USE_NEON)
441 return DVec3({ vmulq_n_f64(mValue.val[0], inV2), vmulq_n_f64(mValue.val[1], inV2) });
442#else
443 return DVec3(mF64[0] * inV2, mF64[1] * inV2, mF64[2] * inV2);
444#endif
445}
446
447DVec3 operator * (double inV1, DVec3Arg inV2)
448{
449#if defined(JPH_USE_AVX)
450 return _mm256_mul_pd(_mm256_set1_pd(inV1), inV2.mValue);
451#elif defined(JPH_USE_SSE)
452 __m128d v = _mm_set1_pd(inV1);
453 return DVec3({ _mm_mul_pd(v, inV2.mValue.mLow), _mm_mul_pd(v, inV2.mValue.mHigh) });
454#elif defined(JPH_USE_NEON)
455 return DVec3({ vmulq_n_f64(inV2.mValue.val[0], inV1), vmulq_n_f64(inV2.mValue.val[1], inV1) });
456#else
457 return DVec3(inV1 * inV2.mF64[0], inV1 * inV2.mF64[1], inV1 * inV2.mF64[2]);
458#endif
459}
460
461DVec3 DVec3::operator / (double inV2) const
462{
463#if defined(JPH_USE_AVX)
464 return _mm256_div_pd(mValue, _mm256_set1_pd(inV2));
465#elif defined(JPH_USE_SSE)
466 __m128d v = _mm_set1_pd(inV2);
467 return DVec3({ _mm_div_pd(mValue.mLow, v), _mm_div_pd(mValue.mHigh, v) });
468#elif defined(JPH_USE_NEON)
469 float64x2_t v = vdupq_n_f64(inV2);
470 return DVec3({ vdivq_f64(mValue.val[0], v), vdivq_f64(mValue.val[1], v) });
471#else
472 return DVec3(mF64[0] / inV2, mF64[1] / inV2, mF64[2] / inV2);
473#endif
474}
475
477{
478#if defined(JPH_USE_AVX)
479 mValue = _mm256_mul_pd(mValue, _mm256_set1_pd(inV2));
480#elif defined(JPH_USE_SSE)
481 __m128d v = _mm_set1_pd(inV2);
482 mValue.mLow = _mm_mul_pd(mValue.mLow, v);
483 mValue.mHigh = _mm_mul_pd(mValue.mHigh, v);
484#elif defined(JPH_USE_NEON)
485 mValue.val[0] = vmulq_n_f64(mValue.val[0], inV2);
486 mValue.val[1] = vmulq_n_f64(mValue.val[1], inV2);
487#else
488 for (int i = 0; i < 3; ++i)
489 mF64[i] *= inV2;
490 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
491 mF64[3] = mF64[2];
492 #endif
493#endif
494 return *this;
495}
496
498{
499#if defined(JPH_USE_AVX)
500 mValue = _mm256_mul_pd(mValue, inV2.mValue);
501#elif defined(JPH_USE_SSE)
502 mValue.mLow = _mm_mul_pd(mValue.mLow, inV2.mValue.mLow);
503 mValue.mHigh = _mm_mul_pd(mValue.mHigh, inV2.mValue.mHigh);
504#elif defined(JPH_USE_NEON)
505 mValue.val[0] = vmulq_f64(mValue.val[0], inV2.mValue.val[0]);
506 mValue.val[1] = vmulq_f64(mValue.val[1], inV2.mValue.val[1]);
507#else
508 for (int i = 0; i < 3; ++i)
509 mF64[i] *= inV2.mF64[i];
510 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
511 mF64[3] = mF64[2];
512 #endif
513#endif
514 return *this;
515}
516
518{
519#if defined(JPH_USE_AVX)
520 mValue = _mm256_div_pd(mValue, _mm256_set1_pd(inV2));
521#elif defined(JPH_USE_SSE)
522 __m128d v = _mm_set1_pd(inV2);
523 mValue.mLow = _mm_div_pd(mValue.mLow, v);
524 mValue.mHigh = _mm_div_pd(mValue.mHigh, v);
525#elif defined(JPH_USE_NEON)
526 float64x2_t v = vdupq_n_f64(inV2);
527 mValue.val[0] = vdivq_f64(mValue.val[0], v);
528 mValue.val[1] = vdivq_f64(mValue.val[1], v);
529#else
530 for (int i = 0; i < 3; ++i)
531 mF64[i] /= inV2;
532 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
533 mF64[3] = mF64[2];
534 #endif
535#endif
536 return *this;
537}
538
540{
541#if defined(JPH_USE_AVX)
542 return _mm256_add_pd(mValue, _mm256_cvtps_pd(inV2.mValue));
543#elif defined(JPH_USE_SSE)
544 return DVec3({ _mm_add_pd(mValue.mLow, _mm_cvtps_pd(inV2.mValue)), _mm_add_pd(mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(2, 2, 2, 2)))) });
545#elif defined(JPH_USE_NEON)
546 return DVec3({ vaddq_f64(mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.mValue))), vaddq_f64(mValue.val[1], vcvt_high_f64_f32(inV2.mValue)) });
547#else
548 return DVec3(mF64[0] + inV2.mF32[0], mF64[1] + inV2.mF32[1], mF64[2] + inV2.mF32[2]);
549#endif
550}
551
553{
554#if defined(JPH_USE_AVX)
555 return _mm256_add_pd(mValue, inV2.mValue);
556#elif defined(JPH_USE_SSE)
557 return DVec3({ _mm_add_pd(mValue.mLow, inV2.mValue.mLow), _mm_add_pd(mValue.mHigh, inV2.mValue.mHigh) });
558#elif defined(JPH_USE_NEON)
559 return DVec3({ vaddq_f64(mValue.val[0], inV2.mValue.val[0]), vaddq_f64(mValue.val[1], inV2.mValue.val[1]) });
560#else
561 return DVec3(mF64[0] + inV2.mF64[0], mF64[1] + inV2.mF64[1], mF64[2] + inV2.mF64[2]);
562#endif
563}
564
566{
567#if defined(JPH_USE_AVX)
568 mValue = _mm256_add_pd(mValue, _mm256_cvtps_pd(inV2.mValue));
569#elif defined(JPH_USE_SSE)
570 mValue.mLow = _mm_add_pd(mValue.mLow, _mm_cvtps_pd(inV2.mValue));
571 mValue.mHigh = _mm_add_pd(mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
572#elif defined(JPH_USE_NEON)
573 mValue.val[0] = vaddq_f64(mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.mValue)));
574 mValue.val[1] = vaddq_f64(mValue.val[1], vcvt_high_f64_f32(inV2.mValue));
575#else
576 for (int i = 0; i < 3; ++i)
577 mF64[i] += inV2.mF32[i];
578 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
579 mF64[3] = mF64[2];
580 #endif
581#endif
582 return *this;
583}
584
586{
587#if defined(JPH_USE_AVX)
588 mValue = _mm256_add_pd(mValue, inV2.mValue);
589#elif defined(JPH_USE_SSE)
590 mValue.mLow = _mm_add_pd(mValue.mLow, inV2.mValue.mLow);
591 mValue.mHigh = _mm_add_pd(mValue.mHigh, inV2.mValue.mHigh);
592#elif defined(JPH_USE_NEON)
593 mValue.val[0] = vaddq_f64(mValue.val[0], inV2.mValue.val[0]);
594 mValue.val[1] = vaddq_f64(mValue.val[1], inV2.mValue.val[1]);
595#else
596 for (int i = 0; i < 3; ++i)
597 mF64[i] += inV2.mF64[i];
598 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
599 mF64[3] = mF64[2];
600 #endif
601#endif
602 return *this;
603}
604
606{
607#if defined(JPH_USE_AVX)
608 return _mm256_sub_pd(_mm256_setzero_pd(), mValue);
609#elif defined(JPH_USE_SSE)
610 __m128d zero = _mm_setzero_pd();
611 return DVec3({ _mm_sub_pd(zero, mValue.mLow), _mm_sub_pd(zero, mValue.mHigh) });
612#elif defined(JPH_USE_NEON)
613 return DVec3({ vnegq_f64(mValue.val[0]), vnegq_f64(mValue.val[1]) });
614#else
615 return DVec3(-mF64[0], -mF64[1], -mF64[2]);
616#endif
617}
618
620{
621#if defined(JPH_USE_AVX)
622 return _mm256_sub_pd(mValue, _mm256_cvtps_pd(inV2.mValue));
623#elif defined(JPH_USE_SSE)
624 return DVec3({ _mm_sub_pd(mValue.mLow, _mm_cvtps_pd(inV2.mValue)), _mm_sub_pd(mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(2, 2, 2, 2)))) });
625#elif defined(JPH_USE_NEON)
626 return DVec3({ vsubq_f64(mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.mValue))), vsubq_f64(mValue.val[1], vcvt_high_f64_f32(inV2.mValue)) });
627#else
628 return DVec3(mF64[0] - inV2.mF32[0], mF64[1] - inV2.mF32[1], mF64[2] - inV2.mF32[2]);
629#endif
630}
631
633{
634#if defined(JPH_USE_AVX)
635 return _mm256_sub_pd(mValue, inV2.mValue);
636#elif defined(JPH_USE_SSE)
637 return DVec3({ _mm_sub_pd(mValue.mLow, inV2.mValue.mLow), _mm_sub_pd(mValue.mHigh, inV2.mValue.mHigh) });
638#elif defined(JPH_USE_NEON)
639 return DVec3({ vsubq_f64(mValue.val[0], inV2.mValue.val[0]), vsubq_f64(mValue.val[1], inV2.mValue.val[1]) });
640#else
641 return DVec3(mF64[0] - inV2.mF64[0], mF64[1] - inV2.mF64[1], mF64[2] - inV2.mF64[2]);
642#endif
643}
644
646{
647#if defined(JPH_USE_AVX)
648 mValue = _mm256_sub_pd(mValue, _mm256_cvtps_pd(inV2.mValue));
649#elif defined(JPH_USE_SSE)
650 mValue.mLow = _mm_sub_pd(mValue.mLow, _mm_cvtps_pd(inV2.mValue));
651 mValue.mHigh = _mm_sub_pd(mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(2, 2, 2, 2))));
652#elif defined(JPH_USE_NEON)
653 mValue.val[0] = vsubq_f64(mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.mValue)));
654 mValue.val[1] = vsubq_f64(mValue.val[1], vcvt_high_f64_f32(inV2.mValue));
655#else
656 for (int i = 0; i < 3; ++i)
657 mF64[i] -= inV2.mF32[i];
658 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
659 mF64[3] = mF64[2];
660 #endif
661#endif
662 return *this;
663}
664
666{
667#if defined(JPH_USE_AVX)
668 mValue = _mm256_sub_pd(mValue, inV2.mValue);
669#elif defined(JPH_USE_SSE)
670 mValue.mLow = _mm_sub_pd(mValue.mLow, inV2.mValue.mLow);
671 mValue.mHigh = _mm_sub_pd(mValue.mHigh, inV2.mValue.mHigh);
672#elif defined(JPH_USE_NEON)
673 mValue.val[0] = vsubq_f64(mValue.val[0], inV2.mValue.val[0]);
674 mValue.val[1] = vsubq_f64(mValue.val[1], inV2.mValue.val[1]);
675#else
676 for (int i = 0; i < 3; ++i)
677 mF64[i] -= inV2.mF64[i];
678 #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED
679 mF64[3] = mF64[2];
680 #endif
681#endif
682 return *this;
683}
684
686{
687 inV2.CheckW();
688#if defined(JPH_USE_AVX)
689 return _mm256_div_pd(mValue, inV2.mValue);
690#elif defined(JPH_USE_SSE)
691 return DVec3({ _mm_div_pd(mValue.mLow, inV2.mValue.mLow), _mm_div_pd(mValue.mHigh, inV2.mValue.mHigh) });
692#elif defined(JPH_USE_NEON)
693 return DVec3({ vdivq_f64(mValue.val[0], inV2.mValue.val[0]), vdivq_f64(mValue.val[1], inV2.mValue.val[1]) });
694#else
695 return DVec3(mF64[0] / inV2.mF64[0], mF64[1] / inV2.mF64[1], mF64[2] / inV2.mF64[2]);
696#endif
697}
698
700{
701#if defined(JPH_USE_AVX512)
702 return _mm256_range_pd(mValue, mValue, 0b1000);
703#elif defined(JPH_USE_AVX)
704 return _mm256_max_pd(_mm256_sub_pd(_mm256_setzero_pd(), mValue), mValue);
705#elif defined(JPH_USE_SSE)
706 __m128d zero = _mm_setzero_pd();
707 return DVec3({ _mm_max_pd(_mm_sub_pd(zero, mValue.mLow), mValue.mLow), _mm_max_pd(_mm_sub_pd(zero, mValue.mHigh), mValue.mHigh) });
708#elif defined(JPH_USE_NEON)
709 return DVec3({ vabsq_f64(mValue.val[0]), vabsq_f64(mValue.val[1]) });
710#else
711 return DVec3(abs(mF64[0]), abs(mF64[1]), abs(mF64[2]));
712#endif
713}
714
716{
717 return sReplicate(1.0) / mValue;
718}
719
721{
722#if defined(JPH_USE_AVX2)
723 __m256d t1 = _mm256_permute4x64_pd(inV2.mValue, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same
724 t1 = _mm256_mul_pd(t1, mValue);
725 __m256d t2 = _mm256_permute4x64_pd(mValue, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same
726 t2 = _mm256_mul_pd(t2, inV2.mValue);
727 __m256d t3 = _mm256_sub_pd(t1, t2);
728 return _mm256_permute4x64_pd(t3, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same
729#else
730 return DVec3(mF64[1] * inV2.mF64[2] - mF64[2] * inV2.mF64[1],
731 mF64[2] * inV2.mF64[0] - mF64[0] * inV2.mF64[2],
732 mF64[0] * inV2.mF64[1] - mF64[1] * inV2.mF64[0]);
733#endif
734}
735
736double DVec3::Dot(DVec3Arg inV2) const
737{
738#if defined(JPH_USE_AVX)
739 __m256d mul = _mm256_mul_pd(mValue, inV2.mValue);
740 __m128d xy = _mm256_castpd256_pd128(mul);
741 __m128d yx = _mm_shuffle_pd(xy, xy, 1);
742 __m128d sum = _mm_add_pd(xy, yx);
743 __m128d zw = _mm256_extractf128_pd(mul, 1);
744 sum = _mm_add_pd(sum, zw);
745 return _mm_cvtsd_f64(sum);
746#elif defined(JPH_USE_SSE)
747 __m128d xy = _mm_mul_pd(mValue.mLow, inV2.mValue.mLow);
748 __m128d yx = _mm_shuffle_pd(xy, xy, 1);
749 __m128d sum = _mm_add_pd(xy, yx);
750 __m128d z = _mm_mul_sd(mValue.mHigh, inV2.mValue.mHigh);
751 sum = _mm_add_pd(sum, z);
752 return _mm_cvtsd_f64(sum);
753#elif defined(JPH_USE_NEON)
754 float64x2_t mul_low = vmulq_f64(mValue.val[0], inV2.mValue.val[0]);
755 float64x2_t mul_high = vmulq_f64(mValue.val[1], inV2.mValue.val[1]);
756 return vaddvq_f64(mul_low) + vgetq_lane_f64(mul_high, 0);
757#else
758 double dot = 0.0;
759 for (int i = 0; i < 3; i++)
760 dot += mF64[i] * inV2.mF64[i];
761 return dot;
762#endif
763}
764
765double DVec3::LengthSq() const
766{
767 return Dot(*this);
768}
769
771{
772#if defined(JPH_USE_AVX)
773 return _mm256_sqrt_pd(mValue);
774#elif defined(JPH_USE_SSE)
775 return DVec3({ _mm_sqrt_pd(mValue.mLow), _mm_sqrt_pd(mValue.mHigh) });
776#elif defined(JPH_USE_NEON)
777 return DVec3({ vsqrtq_f64(mValue.val[0]), vsqrtq_f64(mValue.val[1]) });
778#else
779 return DVec3(sqrt(mF64[0]), sqrt(mF64[1]), sqrt(mF64[2]));
780#endif
781}
782
783double DVec3::Length() const
784{
785 return sqrt(Dot(*this));
786}
787
789{
790 return *this / Length();
791}
792
793bool DVec3::IsNormalized(double inTolerance) const
794{
795 return abs(LengthSq() - 1.0) <= inTolerance;
796}
797
798bool DVec3::IsNaN() const
799{
800#if defined(JPH_USE_AVX512)
801 return (_mm256_fpclass_pd_mask(mValue, 0b10000001) & 0x7) != 0;
802#elif defined(JPH_USE_AVX)
803 return (_mm256_movemask_pd(_mm256_cmp_pd(mValue, mValue, _CMP_UNORD_Q)) & 0x7) != 0;
804#elif defined(JPH_USE_SSE)
805 return ((_mm_movemask_pd(_mm_cmpunord_pd(mValue.mLow, mValue.mLow)) + (_mm_movemask_pd(_mm_cmpunord_pd(mValue.mHigh, mValue.mHigh)) << 2)) & 0x7) != 0;
806#else
807 return isnan(mF64[0]) || isnan(mF64[1]) || isnan(mF64[2]);
808#endif
809}
810
812{
813#if defined(JPH_USE_AVX512)
814 return _mm256_fixupimm_pd(mValue, mValue, _mm256_set1_epi32(0xA9A90A00), 0);
815#elif defined(JPH_USE_AVX)
816 __m256d minus_one = _mm256_set1_pd(-1.0);
817 __m256d one = _mm256_set1_pd(1.0);
818 return _mm256_or_pd(_mm256_and_pd(mValue, minus_one), one);
819#elif defined(JPH_USE_SSE)
820 __m128d minus_one = _mm_set1_pd(-1.0);
821 __m128d one = _mm_set1_pd(1.0);
822 return DVec3({ _mm_or_pd(_mm_and_pd(mValue.mLow, minus_one), one), _mm_or_pd(_mm_and_pd(mValue.mHigh, minus_one), one) });
823#elif defined(JPH_USE_NEON)
824 float64x2_t minus_one = vdupq_n_f64(-1.0f);
825 float64x2_t one = vdupq_n_f64(1.0f);
826 return DVec3({ vorrq_s64(vandq_s64(mValue.val[0], minus_one), one), vorrq_s64(vandq_s64(mValue.val[1], minus_one), one) });
827#else
828 return DVec3(std::signbit(mF64[0])? -1.0 : 1.0,
829 std::signbit(mF64[1])? -1.0 : 1.0,
830 std::signbit(mF64[2])? -1.0 : 1.0);
831#endif
832}
833
835{
836 // Float has 23 bit mantissa, double 52 bit mantissa => we lose 29 bits when converting from double to float
837 constexpr uint64 cDoubleToFloatMantissaLoss = (1U << 29) - 1;
838
839#if defined(JPH_USE_AVX)
840 return _mm256_and_pd(mValue, _mm256_castsi256_pd(_mm256_set1_epi64x(int64_t(~cDoubleToFloatMantissaLoss))));
841#elif defined(JPH_USE_SSE)
842 __m128d mask = _mm_castsi128_pd(_mm_set1_epi64x(int64_t(~cDoubleToFloatMantissaLoss)));
843 return DVec3({ _mm_and_pd(mValue.mLow, mask), _mm_and_pd(mValue.mHigh, mask) });
844#elif defined(JPH_USE_NEON)
845 float64x2_t mask = vreinterpretq_f64_u64(vdupq_n_u64(~cDoubleToFloatMantissaLoss));
846 return DVec3({ vandq_s64(mValue.val[0], mask), vandq_s64(mValue.val[1], mask) });
847#else
848 double x = BitCast<double>(BitCast<uint64>(mF64[0]) & ~cDoubleToFloatMantissaLoss);
849 double y = BitCast<double>(BitCast<uint64>(mF64[1]) & ~cDoubleToFloatMantissaLoss);
850 double z = BitCast<double>(BitCast<uint64>(mF64[2]) & ~cDoubleToFloatMantissaLoss);
851
852 return DVec3(x, y, z);
853#endif
854}
855
857{
858 // Float has 23 bit mantissa, double 52 bit mantissa => we lose 29 bits when converting from double to float
859 constexpr uint64 cDoubleToFloatMantissaLoss = (1U << 29) - 1;
860
861#if defined(JPH_USE_AVX512)
862 __m256i mantissa_loss = _mm256_set1_epi64x(cDoubleToFloatMantissaLoss);
863 __mmask8 is_zero = _mm256_testn_epi64_mask(_mm256_castpd_si256(mValue), mantissa_loss);
864 __m256d value_or_mantissa_loss = _mm256_or_pd(mValue, _mm256_castsi256_pd(mantissa_loss));
865 return _mm256_mask_blend_pd(is_zero, value_or_mantissa_loss, mValue);
866#elif defined(JPH_USE_AVX)
867 __m256i mantissa_loss = _mm256_set1_epi64x(cDoubleToFloatMantissaLoss);
868 __m256d value_and_mantissa_loss = _mm256_and_pd(mValue, _mm256_castsi256_pd(mantissa_loss));
869 __m256d is_zero = _mm256_cmp_pd(value_and_mantissa_loss, _mm256_setzero_pd(), _CMP_EQ_OQ);
870 __m256d value_or_mantissa_loss = _mm256_or_pd(mValue, _mm256_castsi256_pd(mantissa_loss));
871 return _mm256_blendv_pd(value_or_mantissa_loss, mValue, is_zero);
872#elif defined(JPH_USE_SSE4_1)
873 __m128i mantissa_loss = _mm_set1_epi64x(cDoubleToFloatMantissaLoss);
874 __m128d zero = _mm_setzero_pd();
875 __m128d value_and_mantissa_loss_low = _mm_and_pd(mValue.mLow, _mm_castsi128_pd(mantissa_loss));
876 __m128d is_zero_low = _mm_cmpeq_pd(value_and_mantissa_loss_low, zero);
877 __m128d value_or_mantissa_loss_low = _mm_or_pd(mValue.mLow, _mm_castsi128_pd(mantissa_loss));
878 __m128d value_and_mantissa_loss_high = _mm_and_pd(mValue.mHigh, _mm_castsi128_pd(mantissa_loss));
879 __m128d is_zero_high = _mm_cmpeq_pd(value_and_mantissa_loss_high, zero);
880 __m128d value_or_mantissa_loss_high = _mm_or_pd(mValue.mHigh, _mm_castsi128_pd(mantissa_loss));
881 return DVec3({ _mm_blendv_pd(value_or_mantissa_loss_low, mValue.mLow, is_zero_low), _mm_blendv_pd(value_or_mantissa_loss_high, mValue.mHigh, is_zero_high) });
882#elif defined(JPH_USE_NEON)
883 float64x2_t mantissa_loss = vreinterpretq_f64_u64(vdupq_n_u64(cDoubleToFloatMantissaLoss));
884 float64x2_t zero = vdupq_n_f64(0.0);
885 float64x2_t value_and_mantissa_loss_low = vandq_s64(mValue.val[0], mantissa_loss);
886 float64x2_t is_zero_low = vceqq_f64(value_and_mantissa_loss_low, zero);
887 float64x2_t value_or_mantissa_loss_low = vorrq_s64(mValue.val[0], mantissa_loss);
888 float64x2_t value_and_mantissa_loss_high = vandq_s64(mValue.val[1], mantissa_loss);
889 float64x2_t value_low = vbslq_f64(is_zero_low, mValue.val[0], value_or_mantissa_loss_low);
890 float64x2_t is_zero_high = vceqq_f64(value_and_mantissa_loss_high, zero);
891 float64x2_t value_or_mantissa_loss_high = vorrq_s64(mValue.val[1], mantissa_loss);
892 float64x2_t value_high = vbslq_f64(is_zero_high, mValue.val[1], value_or_mantissa_loss_high);
893 return DVec3({ value_low, value_high });
894#else
895 uint64 ux = BitCast<uint64>(mF64[0]);
896 uint64 uy = BitCast<uint64>(mF64[1]);
897 uint64 uz = BitCast<uint64>(mF64[2]);
898
899 double x = BitCast<double>((ux & cDoubleToFloatMantissaLoss) == 0? ux : (ux | cDoubleToFloatMantissaLoss));
900 double y = BitCast<double>((uy & cDoubleToFloatMantissaLoss) == 0? uy : (uy | cDoubleToFloatMantissaLoss));
901 double z = BitCast<double>((uz & cDoubleToFloatMantissaLoss) == 0? uz : (uz | cDoubleToFloatMantissaLoss));
902
903 return DVec3(x, y, z);
904#endif
905}
906
908{
909 DVec3 to_zero = PrepareRoundToZero();
910 DVec3 to_inf = PrepareRoundToInf();
911 return Vec3(DVec3::sSelect(to_zero, to_inf, DVec3::sLess(*this, DVec3::sZero())));
912}
913
915{
916 DVec3 to_zero = PrepareRoundToZero();
917 DVec3 to_inf = PrepareRoundToInf();
918 return Vec3(DVec3::sSelect(to_inf, to_zero, DVec3::sLess(*this, DVec3::sZero())));
919}
920
std::uint64_t uint64
Definition Core.h:430
#define JPH_NAMESPACE_END
Definition Core.h:354
#define JPH_NAMESPACE_BEGIN
Definition Core.h:348
DVec3 operator*(double inV1, DVec3Arg inV2)
Definition DVec3.inl:447
#define JPH_MAKE_HASHABLE(type,...)
Definition HashCombine.h:87
#define JPH_ASSERT(...)
Definition IssueReporting.h:33
Definition DVec3.h:14
static JPH_INLINE DVec3 sLess(DVec3Arg inV1, DVec3Arg inV2)
Less than (component wise)
Definition DVec3.inl:243
double mF64[4]
Definition DVec3.h:277
static JPH_INLINE DVec3 sMax(DVec3Arg inV1, DVec3Arg inV2)
Return the maximum of each of the components.
Definition DVec3.inl:208
JPH_INLINE bool TestAnyTrue() const
Test if any of the components are true (true is when highest bit of component is set)
Definition DVec3.inl:395
JPH_INLINE Vec3 ToVec3RoundDown() const
Convert to float vector 3 rounding down.
Definition DVec3.inl:907
static JPH_INLINE DVec3 sClamp(DVec3Arg inV, DVec3Arg inMin, DVec3Arg inMax)
Clamp a vector between min and max (component wise)
Definition DVec3.inl:223
static JPH_INLINE DVec3 sMin(DVec3Arg inV1, DVec3Arg inV2)
Return the minimum value of each of the components.
Definition DVec3.inl:193
JPH_INLINE int GetTrues() const
Store if X is true in bit 0, Y in bit 1, Z in bit 2 and W in bit 3 (true is when highest bit of compo...
Definition DVec3.inl:384
static JPH_INLINE DVec3 sAnd(DVec3Arg inV1, DVec3Arg inV2)
Logical and (component wise)
Definition DVec3.inl:369
JPH_INLINE DVec3 & operator*=(double inV2)
Multiply vector with double.
Definition DVec3.inl:476
JPH_INLINE DVec3 Abs() const
Return the absolute value of each of the components.
Definition DVec3.inl:699
static JPH_INLINE DVec3 sFusedMultiplyAdd(DVec3Arg inMul1, DVec3Arg inMul2, DVec3Arg inAdd)
Calculates inMul1 * inMul2 + inAdd.
Definition DVec3.inl:303
static JPH_INLINE Type sFixW(TypeArg inValue)
Internal helper function that ensures that the Z component is replicated to the W component to preven...
Definition DVec3.inl:92
JPH_INLINE DVec3 Sqrt() const
Component wise square root.
Definition DVec3.inl:770
JPH_INLINE DVec3 GetSign() const
Get vector that contains the sign of each element (returns 1 if positive, -1 if negative)
Definition DVec3.inl:811
Type mValue
Definition DVec3.h:276
static JPH_INLINE DVec3 sXor(DVec3Arg inV1, DVec3Arg inV2)
Logical xor (component wise)
Definition DVec3.inl:354
static JPH_INLINE DVec3 sGreaterOrEqual(DVec3Arg inV1, DVec3Arg inV2)
Greater than or equal (component wise)
Definition DVec3.inl:288
JPH_INLINE DVec3 operator+(Vec3Arg inV2) const
Add two vectors (component wise)
Definition DVec3.inl:539
JPH_INLINE bool IsClose(DVec3Arg inV2, double inMaxDistSq=1.0e-24) const
Test if two vectors are close.
Definition DVec3.inl:410
JPH_INLINE bool IsNormalized(double inTolerance=1.0e-12) const
Test if vector is normalized.
Definition DVec3.inl:793
const Type & TypeArg
Definition DVec3.h:30
static JPH_INLINE DVec3 sNaN()
Vector with all NaN's.
Definition DVec3.inl:150
friend JPH_INLINE DVec3 operator*(double inV1, DVec3Arg inV2)
Multiply vector with double.
Definition DVec3.inl:447
static JPH_INLINE DVec3 sGreater(DVec3Arg inV1, DVec3Arg inV2)
Greater than (component wise)
Definition DVec3.inl:273
JPH_INLINE void StoreDouble3(Double3 *outV) const
Store 3 doubles to memory.
Definition DVec3.inl:171
static JPH_INLINE DVec3 sOr(DVec3Arg inV1, DVec3Arg inV2)
Logical or (component wise)
Definition DVec3.inl:339
static JPH_INLINE DVec3 sSelect(DVec3Arg inV1, DVec3Arg inV2, DVec3Arg inControl)
Component wise select, returns inV1 when highest bit of inControl = 0 and inV2 when highest bit of in...
Definition DVec3.inl:318
static JPH_INLINE DVec3 sZero()
Vector with all zeros.
Definition DVec3.inl:120
JPH_INLINE bool TestAllTrue() const
Test if all components are true (true is when highest bit of component is set)
Definition DVec3.inl:400
JPH_INLINE double Length() const
Length of vector.
Definition DVec3.inl:783
JPH_INLINE DVec3 operator-() const
Negate.
Definition DVec3.inl:605
JPH_INLINE bool IsNaN() const
Test if vector contains NaN elements.
Definition DVec3.inl:798
JPH_INLINE Vec3 ToVec3RoundUp() const
Convert to float vector 3 rounding up.
Definition DVec3.inl:914
static const double cTrue
Representations of true and false for boolean operations.
Definition DVec3.h:271
DVec3()=default
Constructor.
JPH_INLINE void CheckW() const
Internal helper function that checks that W is equal to Z, so e.g. dividing by it should not generate...
Definition DVec3.inl:83
JPH_INLINE double LengthSq() const
Squared length of vector.
Definition DVec3.inl:765
JPH_INLINE DVec3 Normalized() const
Normalize vector.
Definition DVec3.inl:788
JPH_INLINE DVec3 operator/(double inV2) const
Divide vector by double.
Definition DVec3.inl:461
JPH_INLINE double Dot(DVec3Arg inV2) const
Dot product.
Definition DVec3.inl:736
static JPH_INLINE DVec3 sReplicate(double inV)
Replicate inV across all components.
Definition DVec3.inl:135
static JPH_INLINE DVec3 sLessOrEqual(DVec3Arg inV1, DVec3Arg inV2)
Less than or equal (component wise)
Definition DVec3.inl:258
JPH_INLINE DVec3 PrepareRoundToInf() const
Prepare to convert to float vector 3 rounding towards positive/negative inf (returns DVec3 that can b...
Definition DVec3.inl:856
JPH_INLINE DVec3 & operator+=(Vec3Arg inV2)
Add two vectors (component wise)
Definition DVec3.inl:565
static JPH_INLINE DVec3 sLoadDouble3Unsafe(const Double3 &inV)
Load 3 doubles from memory (reads 64 bits extra which it doesn't use)
Definition DVec3.inl:155
JPH_INLINE DVec3 & operator/=(double inV2)
Divide vector by double.
Definition DVec3.inl:517
JPH_INLINE DVec3 Cross(DVec3Arg inV2) const
Cross product.
Definition DVec3.inl:720
JPH_INLINE DVec3 & operator-=(Vec3Arg inV2)
Add two vectors (component wise)
Definition DVec3.inl:645
JPH_INLINE DVec3 PrepareRoundToZero() const
Prepare to convert to float vector 3 rounding towards zero (returns DVec3 that can be converted to a ...
Definition DVec3.inl:834
JPH_INLINE DVec3 Reciprocal() const
Reciprocal vector (1 / value) for each of the components.
Definition DVec3.inl:715
static JPH_INLINE DVec3 sEquals(DVec3Arg inV1, DVec3Arg inV2)
Equals (component wise)
Definition DVec3.inl:228
struct { double mData[4];} Type
Definition DVec3.h:29
JPH_INLINE bool IsNearZero(double inMaxDistSq=1.0e-24) const
Test if vector is near zero.
Definition DVec3.inl:415
JPH_INLINE bool operator==(DVec3Arg inV2) const
Comparison.
Definition DVec3.inl:405
static const double cFalse
Definition DVec3.h:272
Class that holds 3 doubles. Used as a storage class. Convert to DVec3 for calculations.
Definition Double3.h:13
double z
Definition Double3.h:40
double y
Definition Double3.h:39
double x
Definition Double3.h:38
Definition Vec3.h:16
Type mValue
Definition Vec3.h:285
float mF32[4]
Definition Vec3.h:286
Definition Vec4.h:14