summaryrefslogtreecommitdiff
path: root/Runtime/Math/Simd
diff options
context:
space:
mode:
Diffstat (limited to 'Runtime/Math/Simd')
-rw-r--r--Runtime/Math/Simd/Matrix4x4Simd.h175
-rw-r--r--Runtime/Math/Simd/SimdMath.h240
-rw-r--r--Runtime/Math/Simd/SimdTest.cpp814
-rw-r--r--Runtime/Math/Simd/bool1.h42
-rw-r--r--Runtime/Math/Simd/bool4.h61
-rw-r--r--Runtime/Math/Simd/float1.h232
-rw-r--r--Runtime/Math/Simd/float4.h397
-rw-r--r--Runtime/Math/Simd/fpu.h245
-rw-r--r--Runtime/Math/Simd/intrinsic.h184
-rw-r--r--Runtime/Math/Simd/math.h678
-rw-r--r--Runtime/Math/Simd/neon.h548
-rw-r--r--Runtime/Math/Simd/ppu.h1944
-rw-r--r--Runtime/Math/Simd/quaternion.h253
-rw-r--r--Runtime/Math/Simd/sse.h237
-rw-r--r--Runtime/Math/Simd/xenon.h275
-rw-r--r--Runtime/Math/Simd/xform.h153
16 files changed, 6478 insertions, 0 deletions
diff --git a/Runtime/Math/Simd/Matrix4x4Simd.h b/Runtime/Math/Simd/Matrix4x4Simd.h
new file mode 100644
index 0000000..b38890d
--- /dev/null
+++ b/Runtime/Math/Simd/Matrix4x4Simd.h
@@ -0,0 +1,175 @@
+#ifndef MATRIX4X4SIMD_H
+#define MATRIX4X4SIMD_H
+
+static void MultiplyMatrices4x4NATIVE (const Simd128& m10, const Simd128& m11, const Simd128& m12, const Simd128& m13, const Simd128& m20, const Simd128& m21, const Simd128& m22, const Simd128& m23, Simd128& rm0, Simd128& rm1, Simd128& rm2, Simd128& rm3)
+{
+ const Simd128 m20_X = V4Splat( m20, 0 );
+ const Simd128 m21_X = V4Splat( m21, 0 );
+ const Simd128 m22_X = V4Splat( m22, 0 );
+ const Simd128 m23_X = V4Splat( m23, 0 );
+ const Simd128 rm0_0 = V4Mul( m20_X, m10 );
+ const Simd128 rm1_0 = V4Mul( m21_X, m10 );
+ const Simd128 rm2_0 = V4Mul( m22_X, m10 );
+ const Simd128 rm3_0 = V4Mul( m23_X, m10 );
+ const Simd128 m20_Y = V4Splat(m20, 1 );
+ const Simd128 m21_Y = V4Splat(m21, 1 );
+ const Simd128 m22_Y = V4Splat(m22, 1 );
+ const Simd128 m23_Y = V4Splat(m23, 1 );
+ const Simd128 rm0_1 = V4MulAdd( m20_Y, m11, rm0_0 );
+ const Simd128 rm1_1 = V4MulAdd( m21_Y, m11, rm1_0 );
+ const Simd128 rm2_1 = V4MulAdd( m22_Y, m11, rm2_0 );
+ const Simd128 rm3_1 = V4MulAdd( m23_Y, m11, rm3_0 );
+ const Simd128 m20_Z = V4Splat(m20, 2 );
+ const Simd128 m21_Z = V4Splat(m21, 2 );
+ const Simd128 m22_Z = V4Splat(m22, 2 );
+ const Simd128 m23_Z = V4Splat(m23, 2 );
+ const Simd128 rm0_2 = V4MulAdd( m20_Z, m12, rm0_1 );
+ const Simd128 rm1_2 = V4MulAdd( m21_Z, m12, rm1_1 );
+ const Simd128 rm2_2 = V4MulAdd( m22_Z, m12, rm2_1 );
+ const Simd128 rm3_2 = V4MulAdd( m23_Z, m12, rm3_1 );
+ const Simd128 m20_W = V4Splat(m20, 3 );
+ const Simd128 m21_W = V4Splat(m21, 3 );
+ const Simd128 m22_W = V4Splat(m22, 3 );
+ const Simd128 m23_W = V4Splat(m23, 3 );
+ rm0 = V4MulAdd( m20_W, m13 , rm0_2 );
+ rm1 = V4MulAdd( m21_W, m13 , rm1_2 );
+ rm2 = V4MulAdd( m22_W, m13 , rm2_2 );
+ rm3 = V4MulAdd( m23_W, m13 , rm3_2 );
+}
+
+static void TransformPoint3NATIVE(const Simd128& m0, const Simd128& m1, const Simd128& m2, const Simd128& m3, const Simd128& vin, Simd128& vout)
+{
+ const Simd128 v0 = V4Splat(vin, 0);
+ const Simd128 v1 = V4Splat(vin, 1);
+ const Simd128 v2 = V4Splat(vin, 2);
+ Simd128 vtemp = V4MulAdd(m0, v0, m3);
+ vtemp = V4MulAdd(m1, v1, vtemp);
+ vout = V4MulAdd(m2, v2, vtemp);
+}
+
+static void TransformVector3NATIVE(const Simd128& m0, const Simd128& m1, const Simd128& m2, const Simd128& m3, const Simd128& vin, Simd128& vout)
+{
+ const Simd128 v0 = V4Splat(vin, 0);
+ const Simd128 v1 = V4Splat(vin, 1);
+ const Simd128 v2 = V4Splat(vin, 2);
+ Simd128 vtemp = V4Mul(m0, v0);
+ vtemp = V4MulAdd(m1, v1, vtemp);
+ vout = V4MulAdd(m2, v2, vtemp);
+}
+
+static void DECLARE_SIMD_FUNC(MultiplyMatrices4x4) (const Matrix4x4f* __restrict lhs, const Matrix4x4f* __restrict rhs, Matrix4x4f* __restrict res)
+{
+ Assert (lhs != rhs && lhs != res && rhs != res);
+ float* m = res->m_Data;
+ const float* m1 = lhs->m_Data;
+ const float* m2 = rhs->m_Data;
+ Simd128 rm0, rm1, rm2, rm3;
+
+ Prefetch((const char*)m1);
+ Prefetch((const char*)m2);
+
+ const Simd128 m10 = V4LoadUnaligned( m1, 0x0 );
+ const Simd128 m11 = V4LoadUnaligned( m1, 0x4 );
+ const Simd128 m12 = V4LoadUnaligned( m1, 0x8 );
+ const Simd128 m13 = V4LoadUnaligned( m1, 0xC );
+
+ const Simd128 m20 = V4LoadUnaligned( m2, 0x0 );
+ const Simd128 m21 = V4LoadUnaligned( m2, 0x4 );
+ const Simd128 m22 = V4LoadUnaligned( m2, 0x8 );
+ const Simd128 m23 = V4LoadUnaligned( m2, 0xC );
+
+ MultiplyMatrices4x4NATIVE(m10, m11, m12, m13, m20, m21, m22, m23, rm0, rm1, rm2, rm3);
+
+ V4StoreUnaligned(rm0, m, 0x0 );
+ V4StoreUnaligned(rm1, m, 0x4 );
+ V4StoreUnaligned(rm2, m, 0x8 );
+ V4StoreUnaligned(rm3, m, 0xC );
+}
+
+static void DECLARE_SIMD_FUNC(CopyMatrix) ( const float* __restrict lhs, float* __restrict res)
+{
+ Simd128 r0 = V4LoadUnaligned(lhs, 0x0);
+ Simd128 r1 = V4LoadUnaligned(lhs, 0x4);
+ Simd128 r2 = V4LoadUnaligned(lhs, 0x8);
+ Simd128 r3 = V4LoadUnaligned(lhs, 0xC);
+ V4StoreUnaligned(r0, res, 0x0);
+ V4StoreUnaligned(r1, res, 0x4);
+ V4StoreUnaligned(r2, res, 0x8);
+ V4StoreUnaligned(r3, res, 0xC);
+}
+
+
+static void DECLARE_SIMD_FUNC(TransposeMatrix4x4) (const Matrix4x4f* __restrict lhs, Matrix4x4f* __restrict res)
+{
+ const float* m0 = lhs->m_Data;
+ float* m = res->m_Data;
+
+ const Simd128 m00 = V4LoadUnaligned(m0, 0x0);
+ const Simd128 m01 = V4LoadUnaligned(m0, 0x4);
+ const Simd128 m02 = V4LoadUnaligned(m0, 0x8);
+ const Simd128 m03 = V4LoadUnaligned(m0, 0xC);
+
+ const Simd128 xxyy1 = V4MergeH(m00, m02);
+ const Simd128 zzww1 = V4MergeL(m00, m02);
+ const Simd128 xxyy2 = V4MergeH(m01, m03);
+ const Simd128 zzww2 = V4MergeL(m01, m03);
+ const Simd128 t00 = V4MergeH(xxyy1,xxyy2);
+ const Simd128 t01 = V4MergeL(xxyy1,xxyy2);
+ const Simd128 t02 = V4MergeH(zzww1,zzww2);
+ const Simd128 t03 = V4MergeL(zzww1,zzww2);
+
+ V4StoreUnaligned(t00, m, 0x0);
+ V4StoreUnaligned(t01, m, 0x4);
+ V4StoreUnaligned(t02, m, 0x8);
+ V4StoreUnaligned(t03, m, 0xC);
+}
+
+static void DECLARE_SIMD_FUNC(MultiplyMatrixArrayWithBase4x4) (const Matrix4x4f* __restrict base,
+ const Matrix4x4f* __restrict a, const Matrix4x4f* __restrict b, Matrix4x4f* __restrict res, size_t count)
+{
+ const float* mbase = base->m_Data;
+ Prefetch((const char*)mbase);
+
+ const Simd128 base0 = V4LoadUnaligned( mbase, 0x0 );
+ const Simd128 base1 = V4LoadUnaligned( mbase, 0x4 );
+ const Simd128 base2 = V4LoadUnaligned( mbase, 0x8 );
+ const Simd128 base3 = V4LoadUnaligned( mbase, 0xC );
+
+ for (size_t i = 0; i < count; ++i)
+ {
+ float* m = res[i].m_Data;
+ const float* m1 = a[i].m_Data;
+ const float* m2 = b[i].m_Data;
+ Prefetch((const char*)m1);
+ Prefetch((const char*)m2);
+ const Simd128 m10 = V4LoadUnaligned( m1, 0x0 );
+ const Simd128 m11 = V4LoadUnaligned( m1, 0x4 );
+ const Simd128 m12 = V4LoadUnaligned( m1, 0x8 );
+ const Simd128 m13 = V4LoadUnaligned( m1, 0xC );
+ const Simd128 m20 = V4LoadUnaligned( m2, 0x0 );
+ const Simd128 m21 = V4LoadUnaligned( m2, 0x4 );
+ const Simd128 m22 = V4LoadUnaligned( m2, 0x8 );
+ const Simd128 m23 = V4LoadUnaligned( m2, 0xC );
+
+ Simd128 b20, b21, b22, b23, rb0, rb1, rb2, rb3;
+ MultiplyMatrices4x4NATIVE(m10, m11, m12, m13, m20, m21, m22, m23, b20, b21, b22, b23);
+ MultiplyMatrices4x4NATIVE(base0, base1, base2, base3, b20, b21, b22, b23, rb0, rb1, rb2, rb3);
+
+ V4StoreUnaligned(rb0, m, 0x0 );
+ V4StoreUnaligned(rb1, m, 0x4 );
+ V4StoreUnaligned(rb2, m, 0x8 );
+ V4StoreUnaligned(rb3, m, 0xC );
+ }
+}
+
+
+#if UNITY_AUTO_DETECT_VECTOR_UNIT && UNITY_SUPPORTS_SSE
+# define MultiplyMatrices4x4(a,b,c) CPUInfo::HasSSESupport() ? MultiplyMatrices4x4Simd(a,b,c) : MultiplyMatrices4x4REF(a,b,c)
+# define CopyMatrix(a,b) CPUInfo::HasSSESupport() ? CopyMatrixSimd(a,b) : CopyMatrixREF(a,b)
+# define TransposeMatrix4x4(a,b) CPUInfo::HasSSESupport() ? TransposeMatrix4x4Simd(a,b) : TransposeMatrix4x4REF(a,b)
+# define MultiplyMatrixArrayWithBase4x4(base,a,b,res,count) CPUInfo::HasSSESupport() ? MultiplyMatrixArrayWithBase4x4Simd(base,a,b,res,count) : MultiplyMatrixArrayWithBase4x4REF(base,a,b,res,count)
+#endif
+
+#define MultiplyMatrixArray4x4 MultiplyMatrixArray4x4REF
+
+#endif //MATRIX4X4SIMD_H \ No newline at end of file
diff --git a/Runtime/Math/Simd/SimdMath.h b/Runtime/Math/Simd/SimdMath.h
new file mode 100644
index 0000000..99fd118
--- /dev/null
+++ b/Runtime/Math/Simd/SimdMath.h
@@ -0,0 +1,240 @@
+#pragma once
+
+#include "Runtime/Utilities/Prefetch.h"
+#include "Runtime/Misc/CPUInfo.h"
+
+#if UNITY_SUPPORTS_VMX
+
+# define VMX_0X 0
+# define VMX_0Y 1
+# define VMX_0Z 2
+# define VMX_0W 3
+
+# define VMX_1X 4
+# define VMX_1Y 5
+# define VMX_1Z 6
+# define VMX_1W 7
+
+# define V4BuildPermuteMask(a,b,c,d) { \
+ (((a)<<2)|((a)<<10)|((a)<<18)|((a)<<26))+0x00010203,\
+ (((b)<<2)|((b)<<10)|((b)<<18)|((b)<<26))+0x00010203,\
+ (((c)<<2)|((c)<<10)|((c)<<18)|((c)<<26))+0x00010203,\
+ (((d)<<2)|((d)<<10)|((d)<<18)|((d)<<26))+0x00010203}
+
+
+# if UNITY_XENON
+# include <Xtl.h>
+# define ALIGN16 __declspec(align(16))
+ typedef __vector4 Simd128;
+
+# define vec_splat __vspltw
+# define vec_ste(vec, off, addr) __stvebx(vec, addr, off)
+
+# define V4Splat(v0, i) __vspltw((v0), (i))
+
+# elif UNITY_PS3
+# include <ppu_intrinsics.h>
+# define ALIGN16 __attribute__((aligned(16)))
+# define __forceinline __attribute__((always_inline))
+ typedef vec_float4 Simd128;
+ static const vec_float4 __vsignedzero = {-0.f,-0.f,-0.f,-0.f};
+
+
+# define __vzero() ((vec_float4)vec_splat_u32(0))
+
+# define __lvx(base, offset) vec_lvx(offset, base)
+# define __lvlx(base, offset) vec_lvlx(offset, base)
+# define __lvrx(base, offset) vec_lvrx(offset, base)
+
+# define __stvx(value, base, offset) vec_stvx((value), (offset), (float*)(base))
+# define __stvlx(value, base, offset) vec_stvlx((value), (offset), (float*)(base))
+# define __stvrx(value, base, offset) vec_stvrx((value), (offset), (float*)(base))
+
+# define __vmrglw(v0, v1) vec_mergel((vec_float4)(v0), (vec_float4)(v1))
+# define __vmrghw(v0, v1) vec_mergeh((vec_float4)(v0), (vec_float4)(v1))
+
+# define __vmulfp(a, b) vec_madd( a, b, __vsignedzero)
+
+# define __vand vec_and
+# define __vandc vec_andc
+# define __vor vec_or
+# define __vnor vec_nor
+# define __vxor vec_xor
+# define __vspltw vec_splat
+# define __vmaddfp vec_madd
+# define __vaddfp vec_add
+# define __vsubfp vec_sub
+# define __vperm vec_perm
+# define __vnmsubfp vec_nmsub
+# define __vminfp vec_min
+# define __vmaxfp vec_max
+# define __vrsqrtefp vec_rsqrte
+# define __vsel vec_sel
+# define __vrefp vec_re
+
+# define __vcmpeqfp vec_vcmpeqfp
+# define __vcmpgtfp vec_vcmpgtfp
+# define __vcmpgefp vec_vcmpgefp
+
+ __forceinline static Simd128 __vmsum3fp(Simd128 v0, Simd128 v1)
+ {
+ const Simd128 m0 = vec_madd(v0, v1, __vsignedzero);
+ const Simd128 m1 = vec_splat(m0, 0);
+ const Simd128 m2 = vec_splat(m0, 1);
+ const Simd128 m3 = vec_splat(m0, 2);
+ return vec_add(vec_add(m1, m2), m3);
+ }
+ __forceinline static Simd128 __vmsum4fp(Simd128 v0, Simd128 v1)
+ {
+ const Simd128 m0 = vec_madd(v0, v1, __vsignedzero);
+ const Simd128 m1 = vec_sld(m0, m0, 8);
+ const Simd128 m2 = vec_add(m0, m1);
+ const Simd128 m3 = vec_sld(m2, m2, 4);
+ return vec_add(m2, m3);
+ }
+
+# endif
+
+
+# if UNITY_PS3
+ typedef vec_uchar16 Simd128Mask;
+# else
+ typedef Simd128 Simd128Mask;
+# endif
+
+ typedef ALIGN16 struct Simd128i { union { int i[4]; Simd128 v; };} Simd128i;
+
+ // Load / Save
+# define V4Load(base, offset) __lvx((base), sizeof(base)*(offset))
+# define V4LoadUnaligned(base, offset) __vor(__lvlx((base), sizeof(base)*(offset)), __lvrx((base), (sizeof(base)*(offset)) + 16))
+# define V4Store(value, base, offset) __stvx(value, (base), sizeof(base)*(offset))
+# define V4StoreUnaligned(value, base, offset) __stvlx(value, (float*)(base), sizeof(base)*(offset)); __stvrx(value, (float*)(base), (sizeof(base)*(offset)) + 16 )
+
+ // Math functions
+# define V4Zero() __vzero()
+# define V4Add(v0, v1) __vaddfp((v0), (v1))
+# define V4Sub(v0, v1) __vsubfp((v0), (v1))
+# define V4Mul(v0, v1) __vmulfp((v0), (v1))
+# define V4MulAdd(v0, v1, v2) __vmaddfp((v0), (v1), (v2))
+# define V4Min(v0, v1) __vminfp((v0), (v1))
+# define V4Max(v0, v1) __vmaxfp((v0), (v1))
+# define V4Rcp(v0) __vrefp((v0))
+# define V4Rsqrt(v0) __vrsqrtefp((v0))
+# define V3Dot(v0, v1) __vmsum3fp((v0), (v1))
+# define V4Dot(v0, v1) __vmsum4fp((v0), (v1))
+
+ // Shuffling / Permuting / Splatting / Merging
+# define V4Splat(v0, i) __vspltw((v0), (i))
+# define V4MergeL(v0, v1) __vmrglw((v0), (v1))
+# define V4MergeH(v0, v1) __vmrghw((v0), (v1))
+
+ __forceinline static Simd128 V3Cross(Simd128 v0, Simd128 v1)
+ {
+ const static Simd128i maskYZXW = V4BuildPermuteMask(VMX_0Y, VMX_0Z, VMX_0X, VMX_0W);
+ const Simd128Mask p = (Simd128Mask)maskYZXW.v;
+ const Simd128 m0 = __vperm(v1, v1, p);
+ const Simd128 m1 = __vperm(v0, v0, p);
+ const Simd128 m2 = __vmulfp(v0, m0);
+ const Simd128 m3 = __vnmsubfp(m1, v1, m2);
+ return __vperm(m3, m3, p);
+ }
+
+
+
+#elif UNITY_SUPPORTS_SSE
+
+# if UNITY_WIN
+# include <intrin.h>
+# define ALIGN16 __declspec(align(16))
+# else
+# include <xmmintrin.h>
+# define ALIGN16 __attribute__((aligned(16)))
+# define __forceinline inline __attribute__((always_inline))
+# endif
+
+ typedef __m128 Simd128;
+
+ // Load / Save
+# define V4Load(base, offset) _mm_load_ps((base)+(offset))
+# define V4LoadUnaligned(base, offset) _mm_loadu_ps((base)+(offset))
+# define V4Store(value, base, offset) _mm_store_ps((base)+(offset), value)
+# define V4StoreUnaligned(value, base, offset) _mm_storeu_ps((base)+(offset), value)
+
+ // Math functions
+# define V4Zero() _mm_setzero_ps()
+# define V4Add(v0, v1) _mm_add_ps((v0), (v1))
+# define V4Sub(v0, v1) _mm_sub_ps((v0), (v1))
+# define V4Mul(v0, v1) _mm_mul_ps((v0), (v1))
+# define V4MulAdd(v0, v1, v2) _mm_add_ps(_mm_mul_ps((v0), (v1)), (v2))
+# define V4Min(v0, v1) _mm_min_ps((v0), (v1))
+# define V4Max(v0, v1) _mm_max_ps((v0), (v1))
+# define V4Rcp(v0) _mm_rcp_ps((v0))
+# define V4Rsqrt(v0) _mm_rsqrt_ps((v0))
+
+ __forceinline static Simd128 V3Dot(Simd128 v0, Simd128 v1)
+ {
+ const Simd128 m0 = _mm_mul_ps(v0, v1);
+ const Simd128 m1 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0,0,0,0));
+ const Simd128 m2 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(1,1,1,1));
+ const Simd128 m3 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(2,2,2,2));
+ return _mm_add_ps(_mm_add_ps(m1, m2), m3);
+ }
+ __forceinline static Simd128 V4Dot(Simd128 v0, Simd128 v1)
+ {
+ const Simd128 m0 = _mm_mul_ps(v0, v1);
+ const Simd128 m1 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(2,3,0,1));
+ const Simd128 m2 = _mm_add_ps(m0, m1);
+ const Simd128 m3 = _mm_shuffle_ps(m2, m2, _MM_SHUFFLE(1,0,3,2));
+ return _mm_add_ps(m2, m3);
+ }
+ __forceinline static Simd128 V3Cross(Simd128 v0, Simd128 v1)
+ {
+ const Simd128 m0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3,0,2,1));
+ const Simd128 m1 = _mm_shuffle_ps(v0, v0, _MM_SHUFFLE(3,0,2,1));
+ const Simd128 m2 = _mm_mul_ps(v1, m1);
+ const Simd128 m3 = _mm_sub_ps(_mm_mul_ps(m0, v0), m2);
+ return _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(3,0,2,1));
+ }
+
+ // Shuffling / Permuting / Splatting / Merging
+# define V4Splat(v0, i) _mm_shuffle_ps((v0), (v0), _MM_SHUFFLE(i,i,i,i))
+
+ // Attention! : these are done after PPC big-endian specs.
+# define V4MergeL(v0, v1) _mm_unpackhi_ps((v0), (v1))
+# define V4MergeH(v0, v1) _mm_unpacklo_ps((v0), (v1))
+#endif
+
+// Matrix & Quaternion types
+struct ALIGN16 SimdMatrix3x4 { Simd128 m00, m10, m20; };
+struct ALIGN16 SimdMatrix4x4 { Simd128 m00, m10, m20, m30;};
+typedef Simd128 SimdQuaternion;
+
+#if UNITY_SUPPORTS_VMX
+
+__forceinline static void V3StoreUnaligned(Simd128 value, float* volatile base, const UInt32 offset)
+{
+ const Simd128 X = vec_splat(value, 0);
+ const Simd128 Y = vec_splat(value, 1);
+ const Simd128 Z = vec_splat(value, 2);
+ vec_ste(X, 0, base+offset);
+ vec_ste(Y, 4, base+offset);
+ vec_ste(Z, 8, base+offset);
+}
+
+#else
+
+__forceinline static void V3StoreUnaligned(Simd128 value, float* base, const UInt32 offset)
+{
+ typedef union {
+ UInt32 u[4];
+ __m128i v;
+ } m128u;
+
+ static const m128u store_mask={{0xffffffff,0xffffffff,0xffffffff,0}};
+ _mm_maskmoveu_si128(*(__m128i *)&value, store_mask.v, (char*)(base+offset));
+}
+
+#endif
+
+
+
diff --git a/Runtime/Math/Simd/SimdTest.cpp b/Runtime/Math/Simd/SimdTest.cpp
new file mode 100644
index 0000000..24f76b8
--- /dev/null
+++ b/Runtime/Math/Simd/SimdTest.cpp
@@ -0,0 +1,814 @@
+#include "UnityPrefix.h"
+#if ENABLE_UNIT_TESTS
+#include "External/UnitTest++/src/UnitTest++.h"
+#include "Runtime/Profiler/TimeHelper.h"
+
+#include "Runtime/Math/Simd/float1.h"
+#include "Runtime/Math/Simd/float4.h"
+#include "Runtime/Math/Simd/math.h"
+#include "Runtime/Math/Simd/quaternion.h"
+#include "Runtime/mecanim/math/axes.h"
+
+using namespace math;
+
+const float epsilon = 1e-5f;
+
+SUITE (SimdTests)
+{
+ struct SimdFixture
+ {
+ SimdFixture()
+ {
+ }
+ ~SimdFixture()
+ {
+ }
+ };
+
+ TEST_FIXTURE(SimdFixture, swizzle)
+ {
+ constant_float4(value, 1,2,3,4);
+
+ float4 a = float4(0,0,0,0);
+
+ a.x() = 3.f;
+ CHECK_CLOSE(3, a.x().tofloat(), epsilon);
+ CHECK_CLOSE(0, a.y().tofloat(), epsilon);
+ CHECK_CLOSE(0, a.z().tofloat(), epsilon);
+ CHECK_CLOSE(0, a.w().tofloat(), epsilon);
+
+ a.x() = value.x();
+ CHECK_CLOSE(1, a.x().tofloat(), epsilon);
+ CHECK_CLOSE(0, a.y().tofloat(), epsilon);
+ CHECK_CLOSE(0, a.z().tofloat(), epsilon);
+ CHECK_CLOSE(0, a.w().tofloat(), epsilon);
+
+
+
+ a.y() = value.y();
+ CHECK_CLOSE(1, a.x().tofloat(), epsilon);
+ CHECK_CLOSE(2, a.y().tofloat(), epsilon);
+ CHECK_CLOSE(0, a.z().tofloat(), epsilon);
+ CHECK_CLOSE(0, a.w().tofloat(), epsilon);
+
+ a.z() = value.z();
+ CHECK_CLOSE(1, a.x().tofloat(), epsilon);
+ CHECK_CLOSE(2, a.y().tofloat(), epsilon);
+ CHECK_CLOSE(3, a.z().tofloat(), epsilon);
+ CHECK_CLOSE(0, a.w().tofloat(), epsilon);
+
+ a.w() = value.w();
+ CHECK_CLOSE(1, a.x().tofloat(), epsilon);
+ CHECK_CLOSE(2, a.y().tofloat(), epsilon);
+ CHECK_CLOSE(3, a.z().tofloat(), epsilon);
+ CHECK_CLOSE(4, a.w().tofloat(), epsilon);
+
+ a.z() = value.y();
+ CHECK_CLOSE(1, a.x().tofloat(), epsilon);
+ CHECK_CLOSE(2, a.y().tofloat(), epsilon);
+ CHECK_CLOSE(2, a.z().tofloat(), epsilon);
+ CHECK_CLOSE(4, a.w().tofloat(), epsilon);
+
+ float1 f = value.w();
+ CHECK_CLOSE(4, f.tofloat(), epsilon);
+
+ a = value.wxzy();
+ CHECK( all(a == float4(4,1,3,2) ) );
+
+ float4 g(value.wxzy());
+ CHECK( all(g == float4(4,1,3,2) ) );
+
+
+ a = value.xzwy();
+ CHECK( all(a == float4(1,3,4,2) ) );
+
+ a = value.xwyz();
+ CHECK( all(a == float4(1,4,2,3) ) );
+
+ a = value.wyxz();
+ CHECK( all(a == float4(4,2,1,3) ) );
+
+ a = value.zywx();
+ CHECK( all(a == float4(3,2,4,1) ) );
+
+ a = value.ywzx();
+ CHECK( all(a == float4(2,4,3,1) ) );
+
+ a = value.yzxw();
+ CHECK( all(a == float4(2,3,1,4) ) );
+
+ a = value.zxyw();
+ CHECK( all(a == float4(3,1,2,4) ) );
+
+ a = value.zwxy();
+ CHECK( all(a == float4(3,4,1,2) ) );
+
+ a = value.wwwz();
+ CHECK( all(a == float4(4,4,4,3) ) );
+
+ a = value.wwzz();
+ CHECK( all(a == float4(4,4,3,3) ) );
+
+ a = value.wzyx();
+ CHECK( all(a == float4(4,3,2,1) ) );
+
+ a = value.yxwz();
+ CHECK( all(a == float4(2,1,4,3) ) );
+
+ }
+
+ TEST_FIXTURE(SimdFixture, float1_op)
+ {
+ float ATTRIBUTE_ALIGN(ALIGN4F) v[4];
+
+ float1 b(3.f);
+ cvec4f(two, 2.f, 2.f, 2.f, 2.f);
+ float1 c(two);
+ float4 cx;
+ float1 e;
+
+ {
+ Vstorepf(b.eval(), v, 0);
+ CHECK_CLOSE(3.f, v[0], epsilon);
+ CHECK_CLOSE(3.f, v[1], epsilon);
+ CHECK_CLOSE(3.f, v[2], epsilon);
+ CHECK_CLOSE(3.f, v[3], epsilon);
+
+ Vstorepf(c.eval(), v, 0);
+ CHECK_CLOSE(2.f, v[0], epsilon);
+ CHECK_CLOSE(2.f, v[1], epsilon);
+ CHECK_CLOSE(2.f, v[2], epsilon);
+ CHECK_CLOSE(2.f, v[3], epsilon);
+
+ cx = float4(10.f,2.f,3.f,4.f);
+
+ float1 d(cx.x());
+
+ Vstorepf(d.eval(), v, 0);
+ CHECK_CLOSE(10.f, v[0], epsilon);
+ CHECK_CLOSE(10.f, v[1], epsilon);
+ CHECK_CLOSE(10.f, v[2], epsilon);
+ CHECK_CLOSE(10.f, v[3], epsilon);
+
+ e = cx.y();
+
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(2.f, v[0], epsilon);
+ CHECK_CLOSE(2.f, v[1], epsilon);
+ CHECK_CLOSE(2.f, v[2], epsilon);
+ CHECK_CLOSE(2.f, v[3], epsilon);
+
+ e = float1(4.f);
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(4.f, v[0], epsilon);
+ CHECK_CLOSE(4.f, v[1], epsilon);
+ CHECK_CLOSE(4.f, v[2], epsilon);
+ CHECK_CLOSE(4.f, v[3], epsilon);
+
+ e = float1(cx.x());
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(10.f, v[0], epsilon);
+ CHECK_CLOSE(10.f, v[1], epsilon);
+ CHECK_CLOSE(10.f, v[2], epsilon);
+ CHECK_CLOSE(10.f, v[3], epsilon);
+
+ e = cx.z();
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(3.f, v[0], epsilon);
+ CHECK_CLOSE(3.f, v[1], epsilon);
+ CHECK_CLOSE(3.f, v[2], epsilon);
+ CHECK_CLOSE(3.f, v[3], epsilon);
+
+ e += cx.w();
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(7.f, v[0], epsilon);
+ CHECK_CLOSE(7.f, v[1], epsilon);
+ CHECK_CLOSE(7.f, v[2], epsilon);
+ CHECK_CLOSE(7.f, v[3], epsilon);
+
+ e -= cx.x();
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(-3.f, v[0], epsilon);
+ CHECK_CLOSE(-3.f, v[1], epsilon);
+ CHECK_CLOSE(-3.f, v[2], epsilon);
+ CHECK_CLOSE(-3.f, v[3], epsilon);
+
+ e *= cx.y();
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(-6.f, v[0], epsilon);
+ CHECK_CLOSE(-6.f, v[1], epsilon);
+ CHECK_CLOSE(-6.f, v[2], epsilon);
+ CHECK_CLOSE(-6.f, v[3], epsilon);
+
+ e /= cx.z();
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(-2.f, v[0], epsilon);
+ CHECK_CLOSE(-2.f, v[1], epsilon);
+ CHECK_CLOSE(-2.f, v[2], epsilon);
+ CHECK_CLOSE(-2.f, v[3], epsilon);
+ }
+ {
+ float1 f = e++;
+ Vstorepf(f.eval(), v, 0);
+ CHECK_CLOSE(-2.f, v[0], epsilon);
+ CHECK_CLOSE(-2.f, v[1], epsilon);
+ CHECK_CLOSE(-2.f, v[2], epsilon);
+ CHECK_CLOSE(-2.f, v[3], epsilon);
+
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(-1.f, v[0], epsilon);
+ CHECK_CLOSE(-1.f, v[1], epsilon);
+ CHECK_CLOSE(-1.f, v[2], epsilon);
+ CHECK_CLOSE(-1.f, v[3], epsilon);
+
+ float1 g = ++e;
+ Vstorepf(g.eval(), v, 0);
+ CHECK_CLOSE(0.f, v[0], epsilon);
+ CHECK_CLOSE(0.f, v[1], epsilon);
+ CHECK_CLOSE(0.f, v[2], epsilon);
+ CHECK_CLOSE(0.f, v[3], epsilon);
+
+ Vstorepf(e.eval(), v, 0);
+ CHECK_CLOSE(0.f, v[0], epsilon);
+ CHECK_CLOSE(0.f, v[1], epsilon);
+ CHECK_CLOSE(0.f, v[2], epsilon);
+ CHECK_CLOSE(0.f, v[3], epsilon);
+
+
+ float1 h(float1::zero());
+ Vstorepf(h.eval(), v, 0);
+ CHECK_CLOSE(0.f, v[0], epsilon);
+ CHECK_CLOSE(0.f, v[1], epsilon);
+ CHECK_CLOSE(0.f, v[2], epsilon);
+ CHECK_CLOSE(0.f, v[3], epsilon);
+
+ float1 i(float1::one());
+ Vstorepf(i.eval(), v, 0);
+ CHECK_CLOSE(1.f, v[0], epsilon);
+ CHECK_CLOSE(1.f, v[1], epsilon);
+ CHECK_CLOSE(1.f, v[2], epsilon);
+ CHECK_CLOSE(1.f, v[3], epsilon);
+
+ float1 j(4.f);
+ float1 l(3.f);
+
+ float1 m = j + l;
+ Vstorepf(m.eval(), v, 0);
+ CHECK_CLOSE(7.f, v[0], epsilon);
+ CHECK_CLOSE(7.f, v[1], epsilon);
+ CHECK_CLOSE(7.f, v[2], epsilon);
+ CHECK_CLOSE(7.f, v[3], epsilon);
+
+ float1 n = j - l;
+ Vstorepf(n.eval(), v, 0);
+ CHECK_CLOSE(1.f, v[0], epsilon);
+ CHECK_CLOSE(1.f, v[1], epsilon);
+ CHECK_CLOSE(1.f, v[2], epsilon);
+ CHECK_CLOSE(1.f, v[3], epsilon);
+
+ float1 o = j * l;
+ Vstorepf(o.eval(), v, 0);
+ CHECK_CLOSE(12.f, v[0], epsilon);
+ CHECK_CLOSE(12.f, v[1], epsilon);
+ CHECK_CLOSE(12.f, v[2], epsilon);
+ CHECK_CLOSE(12.f, v[3], epsilon);
+
+ float1 p = j / l;
+ Vstorepf(p.eval(), v, 0);
+ CHECK_CLOSE(4.f/3.f, v[0], epsilon);
+ CHECK_CLOSE(4.f/3.f, v[1], epsilon);
+ CHECK_CLOSE(4.f/3.f, v[2], epsilon);
+ CHECK_CLOSE(4.f/3.f, v[3], epsilon);
+ }
+
+ bool1 bvalue = float1(4.f) < float1(3.f);
+ CHECK( (bool)!bvalue );
+
+ bvalue = float1(4.f) < float1(4.f);
+ CHECK( (bool)!bvalue );
+
+ bvalue = float1(4.f) < float1(5.f);
+ CHECK( (bool)bvalue );
+
+ bvalue = float1(4.f) <= float1(3.f);
+ CHECK( (bool)!bvalue );
+
+ bvalue = float1(4.f) <= float1(4.f);
+ CHECK( (bool)bvalue );
+
+ bvalue = float1(4.f) <= float1(5.f);
+ CHECK( (bool)bvalue );
+
+ bvalue = float1(4.f) > float1(3.f);
+ CHECK( (bool)bvalue );
+
+ bvalue = float1(4.f) > float1(4.f);
+ CHECK( (bool)!bvalue );
+
+ bvalue = float1(4.f) > float1(5.f);
+ CHECK( (bool)!bvalue );
+
+ bvalue = float1(4.f) >= float1(3.f);
+ CHECK( (bool)bvalue );
+
+ bvalue = float1(4.f) >= float1(4.f);
+ CHECK( (bool)bvalue );
+
+ bvalue = float1(4.f) >= float1(5.f);
+ CHECK( (bool)!bvalue );
+
+ bvalue = float1(10.f) == float1(5.f);
+ CHECK( (bool)!bvalue );
+
+ bvalue = float1(10.f) == float1(10.f);
+ CHECK( (bool)bvalue );
+
+ bvalue = float1(10.f) != float1(5.f);
+ CHECK( (bool)bvalue );
+
+ bvalue = float1(10.f) != float1(10.f);
+ CHECK( (bool)!bvalue );
+
+ }
+
+ TEST_FIXTURE(SimdFixture, Operator1)
+ {
+ float ATTRIBUTE_ALIGN(ALIGN4F) v[4];
+
+ float4 a(1,2,3,4);
+ float4 b(4,3,2,1);
+ float4 e(54,3,42,2);
+
+ float4 c = a+b;
+ CHECK_CLOSE( 5.f, c.x().tofloat(), epsilon);
+ CHECK_CLOSE( 5.f, c.y().tofloat(), epsilon);
+ CHECK_CLOSE( 5.f, c.z().tofloat(), epsilon);
+ CHECK_CLOSE( 5.f, c.w().tofloat(), epsilon);
+
+ c = a+b.wwwz();
+ CHECK_CLOSE( 2.f, c.x().tofloat(), epsilon);
+ CHECK_CLOSE( 3.f, c.y().tofloat(), epsilon);
+ CHECK_CLOSE( 4.f, c.z().tofloat(), epsilon);
+ CHECK_CLOSE( 6.f, c.w().tofloat(), epsilon);
+
+ c = a+b.z();
+ CHECK_CLOSE( 3.f, c.x().tofloat(), epsilon);
+ CHECK_CLOSE( 4.f, c.y().tofloat(), epsilon);
+ CHECK_CLOSE( 5.f, c.z().tofloat(), epsilon);
+ CHECK_CLOSE( 6.f, c.w().tofloat(), epsilon);
+
+ c = a+b.wwwz()+e.y();
+ CHECK_CLOSE( 5.f, c.x().tofloat(), epsilon);
+ CHECK_CLOSE( 6.f, c.y().tofloat(), epsilon);
+ CHECK_CLOSE( 7.f, c.z().tofloat(), epsilon);
+ CHECK_CLOSE( 9.f, c.w().tofloat(), epsilon);
+
+ float4 d = a;
+ CHECK_CLOSE( 1.f, d.x().tofloat(), epsilon);
+ CHECK_CLOSE( 2.f, d.y().tofloat(), epsilon);
+ CHECK_CLOSE( 3.f, d.z().tofloat(), epsilon);
+ CHECK_CLOSE( 4.f, d.w().tofloat(), epsilon);
+
+ float1 a1 = float1(10.f);
+
+ d = a+a1;
+ CHECK_CLOSE( 11.f, d.x().tofloat(), epsilon);
+ CHECK_CLOSE( 12.f, d.y().tofloat(), epsilon);
+ CHECK_CLOSE( 13.f, d.z().tofloat(), epsilon);
+ CHECK_CLOSE( 14.f, d.w().tofloat(), epsilon);
+
+ a.x() = 0;
+ CHECK( all(a == float4(0,2,3,4)) );
+
+ a.y() = float1(12);
+ CHECK( all(a == float4(0,12,3,4)) );
+
+ a = float4(1,2,3,4);
+
+ c = a+b;
+ CHECK( all(c == float4(5,5,5,5)) );
+
+ c = a*b;
+ CHECK( all(c == float4(4.f,6.f,6.f,4.f)) );
+
+ c = a/b;
+ CHECK( all(c == float4(1.f/4.f,2.f/3.f,3.f/2.f,4.f/1.f)) );
+
+ c = ++a;
+ CHECK( all(c == float4(2.f,3.f,4.f,5.f)) );
+ CHECK( all(a == float4(2.f,3.f,4.f,5.f)) );
+
+ c = a++;
+ CHECK( all(c == float4(2.f,3.f,4.f,5.f)) );
+ CHECK( all(a == float4(3.f,4.f,5.f,6.f)) );
+
+ c += b;
+ CHECK( all(c == float4(6.f,6.f,6.f,6.f)) );
+
+ c -= a;
+ CHECK( all(c == float4(3.f,2.f,1.f,0.f)) );
+
+ c += 5.f;
+ CHECK( all(c == float4(8.f,7.f,6.f,5.f)) );
+
+ c *= b;
+ CHECK( all(c == float4(32.f,21.f,12.f,5.f)) );
+
+ c /= b;
+ CHECK( all(c == float4(8.f,7.f,6.f,5.f)) );
+
+ c = -c;
+ CHECK( all(c == float4(-8.f,-7.f,-6.f,-5.f)) );
+
+ c -= .5f;
+ CHECK( all(c == float4(-8.5f,-7.5f,-6.5f,-5.5f)) );
+
+ c *= 2.f;
+ CHECK( all(c == float4(-17.f,-15.f,-13.f,-11.f)) );
+
+ c /= 3.f;
+ Vstorepf(c.eval(), v, 0);
+ CHECK_CLOSE(-17.f/3.f, v[0], epsilon);
+ CHECK_CLOSE(-15.f/3.f, v[1], epsilon);
+ CHECK_CLOSE(-13.f/3.f, v[2], epsilon);
+ CHECK_CLOSE(-11.f/3.f, v[3], epsilon);
+ }
+
+ TEST_FIXTURE( SimdFixture, vecexpr1_operator )
+ {
+ float ATTRIBUTE_ALIGN(ALIGN4F) v[4];
+
+ constant_float4(c,-1,2,-3,4);
+ float4 t(5,6,7,8);
+
+ t.x() *= float1(-1.f);
+ CHECK( all(t == float4(-5.f, 6.f, 7.f, 8.f)));
+
+ t.y() += float1(4.f);
+ CHECK( all(t == float4(-5.f, 10.f, 7.f, 8.f)));
+
+ t.z() -= float1(-2.f);
+ CHECK( all(t == float4(-5.f, 10.f, 9.f, 8.f)));
+
+ t.w() /= float1(-2.f);
+ CHECK( all(t == float4(-5.f, 10.f, 9.f, -4.f)));
+
+ t.x() *= c.w();
+ CHECK( all(t == float4(-20.f, 10.f, 9.f, -4.f)));
+
+ t.y() /= c.z();
+ Vstorepf(t.eval(), v, 0);
+ CHECK_CLOSE(-20.f, v[0], epsilon);
+ CHECK_CLOSE(10.f/-3.f, v[1], epsilon);
+ CHECK_CLOSE(9.f, v[2], epsilon);
+ CHECK_CLOSE(-4.f, v[3], epsilon);
+
+ t.w() += c.y();
+ Vstorepf(t.eval(), v, 0);
+ CHECK_CLOSE(-20.f, v[0], epsilon);
+ CHECK_CLOSE(10.f/-3.f, v[1], epsilon);
+ CHECK_CLOSE(9.f, v[2], epsilon);
+ CHECK_CLOSE(-2.f, v[3], epsilon);
+
+ t.z() -= c.x();
+ Vstorepf(t.eval(), v, 0);
+ CHECK_CLOSE(-20.f, v[0], epsilon);
+ CHECK_CLOSE(10.f/-3.f, v[1], epsilon);
+ CHECK_CLOSE(10.f, v[2], epsilon);
+ CHECK_CLOSE(-2.f, v[3], epsilon);
+
+ float x = -c.x().tofloat();
+ CHECK( x == 1.f );
+ }
+
+
+ TEST_FIXTURE( SimdFixture, generic )
+ {
+ float ATTRIBUTE_ALIGN(ALIGN4F) v[4];
+
+ float4 a(-1.f, -.263f, 345.f, 0.f);
+ float4 b(5.f, 2.34f, -12.76f, 54.f);
+ float4 c;
+
+ float1 s;
+
+ c = abs(a);
+ CHECK( all(c == float4(1.f, .263f, 345.f, 0.f)));
+
+ c = math::clamp(c, float4(0.f, 1.f, 100.f, -2.f), float4(2.f, 3.f, 200.f, -10.f));
+ CHECK( all(c == float4(1.f, 1.f, 200.f, -10.f)));
+
+ c = cond(bool4(true), a, b);
+ CHECK( all(c == a));
+
+ c = cond(bool4(false), a, b);
+ CHECK( all(c == b));
+
+ c = cond(a<b, a, b);
+ CHECK( all(c == float4(-1.f, -.263f, -12.76f, 0.f)));
+
+
+ a = float4(-1.f, 0.f, 0.f, 0.f);
+ b = float4(0.f, 1.f, 0.f, 0.f);
+ c = cross(a, b);
+ CHECK( all(c == float4(0.f, 0.f, -1.f, 0.f)));
+
+ a = float4(-1.f, 2.f, -4.f, 1.f);
+ b = float4(4.f, 1.f, -3.f, 1.f);
+ c = cross(a, b);
+ CHECK( all(c == float4(-2.f, -19.f, -9.f, 0.f)));
+
+ c = degrees(float4( float(M_PIf), float(M_PI_2f), float(M_PI_4f), 0.f));
+ CHECK( all(c == float4(180.f, 90.f, 45.f, 0.f)));
+
+ c = radians(float4(180.f, 90.f, 45.f, 0.f));
+ CHECK( all(c == float4( float(M_PIf), float(M_PI_2f), float(M_PI_4f), 0.f)));
+
+ float1 teta = dot(float4(1,0,0,0), float4(0,1,0,0));
+ CHECK_CLOSE( 0.f, teta.tofloat(), epsilon);
+
+ teta = dot(float4(1,0,0,0), float4(1,0,0,0));
+ CHECK_CLOSE( 1.f, teta.tofloat(), epsilon);
+
+ teta = dot(float4(1,0,0,0), normalize(float4(1,1,0,0)));
+ CHECK_CLOSE( 0.70710f, teta.tofloat(), epsilon);
+
+ s = dot(float4( 10.f, 5.f, 2.f, 0.f));
+ CHECK_CLOSE( 129.0f, s.tofloat(), epsilon);
+
+ s = length( float4( 1.f, 0.f, 0.f, 0.f));
+ CHECK_CLOSE( 1.f, s.tofloat(), epsilon );
+
+ s = length( float4( 10.f, 5.f, 2.f, 0.f));
+ CHECK_CLOSE( 11.357816f, s.tofloat(), epsilon);
+
+ s = length( float4( 0.f, 0.f, 0.f, 0.f));
+ CHECK_CLOSE( 0.f, s.tofloat(), epsilon );
+
+ s = lerp(float1(3), float1(6), float1(.3333333f));
+ CHECK_CLOSE( 4.f, s.tofloat(), epsilon);
+
+ c = lerp(float4(1,2,3,4), float4(3,4,5,6), float1(.5f));
+ CHECK_CLOSE( 2.f, c.x().tofloat(), epsilon);
+ CHECK_CLOSE( 3.f, c.y().tofloat(), epsilon);
+ CHECK_CLOSE( 4.f, c.z().tofloat(), epsilon);
+ CHECK_CLOSE( 5.f, c.w().tofloat(), epsilon);
+
+ c = lerp(float4(1,2,3,4), float4(3,4,5,6), float4(-.5f,0,1.0,1.5f));
+ CHECK_CLOSE( 0.f, c.x().tofloat(), epsilon);
+ CHECK_CLOSE( 2.f, c.y().tofloat(), epsilon);
+ CHECK_CLOSE( 5.f, c.z().tofloat(), epsilon);
+ CHECK_CLOSE( 7.f, c.w().tofloat(), epsilon);
+
+ s = maximum(float4(-1.f, -.263f, 345.f, 0.f));
+ CHECK_CLOSE( 345.f, s.tofloat(), epsilon);
+
+ s = minimum(float4(-1.f, -.263f, 345.f, 0.f));
+ CHECK_CLOSE( -1.f, s.tofloat(), epsilon);
+
+ c = normalize(float4( 0.f, 0.f, 0.f, 1.f));
+ CHECK_CLOSE( 0.f, c.x().tofloat(), epsilon);
+ CHECK_CLOSE( 0.f, c.y().tofloat(), epsilon);
+ CHECK_CLOSE( 0.f, c.z().tofloat(), epsilon);
+ CHECK_CLOSE( 1.f, c.w().tofloat(), epsilon);
+
+ c = normalize(float4( 10.f, 5.f, 2.f, 0.f));
+ CHECK_CLOSE( 0.880451f, c.x().tofloat(), epsilon);
+ CHECK_CLOSE( 0.440225f, c.y().tofloat(), epsilon);
+ CHECK_CLOSE( 0.176090f, c.z().tofloat(), epsilon);
+ CHECK_CLOSE( 0.f, c.w().tofloat(), epsilon);
+
+ c = rcp(float4( -25.f, 45.f, .5f, 1.f));
+ CHECK_CLOSE( 1.f/-25.f, c.x().tofloat(), epsilon);
+ CHECK_CLOSE( 1.f/45.f, c.y().tofloat(), epsilon);
+ CHECK_CLOSE( 1.f/.5f, c.z().tofloat(), epsilon);
+ CHECK_CLOSE( 1.f, c.w().tofloat(), epsilon);
+
+ //float4 rsqrt(float4 const& r);
+
+ float f = saturate(-2.f);
+ CHECK_CLOSE( 0.f, f, epsilon);
+
+ f = saturate(.5f);
+ CHECK_CLOSE( .5f, f, epsilon);
+
+ f = saturate(1.5f);
+ CHECK_CLOSE( 1.f, f, epsilon);
+
+ c = saturate(float4( -25.f, 0.f, .5f, 1.5f));
+ CHECK( all(c == float4( 0.f, 0.f, .5f, 1.f)));
+
+ f = sgn(-25.f);
+ CHECK_CLOSE( -1.f, f, epsilon);
+
+ f = sgn(0.f);
+ CHECK_CLOSE( 1.f, f, epsilon);
+
+ f = sgn(3.f);
+ CHECK_CLOSE( 1.f, f, epsilon);
+
+ c = sgn(float4( -25.f, 0.f, .5f, 1.5f));
+ CHECK( all(c == float4( -1.f, 1.f, 1.f, 1.f)));
+
+ c = sgn(float4( 25.f, 0.f, -.5f, -1.5f));
+ CHECK( all(c == float4( 1.f, 1.f, -1.f, -1.f)));
+
+ // inconsistant how sgn of -0 is interpreted. should not matter in any real world scenarios
+/* c = sgn(float4( -25.f, -0.f, .5f, -1.5f));
+ CHECK( all(c == float4( -1.f, -1.f, 1.f, -1.f)));
+*/
+ f = sign(-25.f);
+ CHECK_CLOSE( -1.f, f, epsilon);
+
+ f = sign(0.f);
+ CHECK_CLOSE( 0.f, f, epsilon);
+
+ f = sign(3.f);
+ CHECK_CLOSE( 1.f, f, epsilon);
+
+ c = sign(float4( -25.f, 0.f, .5f, 1.5f));
+ CHECK( all(c == float4( -1.f, 0.f, 1.f, 1.f)));
+
+ c = sign(float4( 25.f, -0.f, .5f, -1.5f));
+ CHECK( all(c == float4( 1.f, 0.f, 1.f, -1.f)));
+
+ c = sqrt(float4( 9.f, 81.f, 49.f, 74.f));
+ Vstorepf(c.eval(), v, 0);
+ CHECK_CLOSE(3.f, v[0], epsilon);
+ CHECK_CLOSE(9.f, v[1], epsilon);
+ CHECK_CLOSE(7.f, v[2], epsilon);
+ CHECK_CLOSE(8.602325f, v[3], epsilon);
+
+ s = sum(float4( 9.f, 81.f, 49.f, 74.f));
+ CHECK_CLOSE( 213.f, s.tofloat(), epsilon);
+
+ c = math::vector(float4( -25.f, 0.f, .5f, 1.5f));
+ CHECK( all(c == float4( -25.f, 0.f, .5f, 0.f)));
+
+ a = float4(-1.f, -4.f, 8.f, 0.f);
+ b = float4(5.f, 2.f, -2.f, 54.f);
+ c = float4( -25.f, 0.f, .5f, 1.5f);
+ float4 d = float4(Vmadd(a.eval(),b.eval(),c.eval()));
+ CHECK(all(d == float4(-30.f,-8.f,-15.5f,1.5f)));
+
+ d = float4(Vmsub(a.eval(),b.eval(),c.eval()));
+ CHECK(all(d == float4(20.f,-8.f,-16.5f,-1.5f)));
+
+ bool4 bv = bool4(0, 0, 0, 0);
+ CHECK( any(bv) == false );
+
+ bv = bool4(0, 1, 0, 0);
+ CHECK( any(bv) == true );
+
+ bv = bool4(1, 1, 1, 1);
+ CHECK( any(bv) == true );
+ }
+
+ TEST_FIXTURE( SimdFixture, quaternion )
+ {
+ float epsilon = 1e-4f;
+
+ float4 qx(1,0,0,0);
+ float4 qy(0,1,0,0);
+ float4 qz(0,0,1,0);
+
+ float4 vz(0,0,1,0);
+
+ float4 v;
+
+ v = quatMulVec(qy, vz);
+ CHECK( all(v == float4(0.f, 0.f, -1.f, 0.f)));
+
+ v = quatMulVec(qz, vz);
+ CHECK( all(v == float4(0.f, 0.f, 1.f, 0.f)));
+
+ v = quatMulVec(qx, vz);
+ CHECK( all(v == float4(0.f, 0.f, -1.f, 0.f)));
+
+ float4 euler(radians(-38.22f), radians(16.16f), radians(-45.96f), 0.f );
+ float4 q = quatEulerToQuat(euler);
+ v = quatMulVec(q, float4(32.21f, 61.03f, -11.19f, 0.f) );
+ CHECK_CLOSE( 41.990852f, v.x().tofloat(), epsilon);
+ CHECK_CLOSE( 15.592499f, v.y().tofloat(), epsilon);
+ CHECK_CLOSE( -53.674984f, v.z().tofloat(), epsilon);
+ CHECK_CLOSE( 0.f, v.w().tofloat(), epsilon);
+
+ float4 q1(radians(-38.22f), radians(16.16f), radians(-45.96f), 0.f );
+ float4 q2(radians(79.24f), radians(-1.61f), radians(-33.15f), 0.f );
+ float4 q3(radians(38.40f), radians(-6.50f), radians(-70.45f), 0.f);
+ q3 = quatEulerToQuat(q3);
+ q = quatMul(quatEulerToQuat(q1), quatEulerToQuat(q2));
+ CHECK_CLOSE( q.x().tofloat(), q3.x().tofloat(), epsilon);
+ CHECK_CLOSE( q.y().tofloat(), q3.y().tofloat(), epsilon);
+ CHECK_CLOSE( q.z().tofloat(), q3.z().tofloat(), epsilon);
+ CHECK_CLOSE( q.w().tofloat(), q3.w().tofloat(), epsilon);
+
+ q3 = quatConj(q3);
+ CHECK_CLOSE( -q.x().tofloat(), q3.x().tofloat(), epsilon);
+ CHECK_CLOSE( -q.y().tofloat(), q3.y().tofloat(), epsilon);
+ CHECK_CLOSE( -q.z().tofloat(), q3.z().tofloat(), epsilon);
+ CHECK_CLOSE( q.w().tofloat(), q3.w().tofloat(), epsilon);
+
+ Axes cAxes;
+ q3 = ToAxes(cAxes,q2);
+ CHECK_CLOSE( 3.121f, q3.x().tofloat(), epsilon);
+ CHECK_CLOSE( 0.792092f, q3.y().tofloat(), epsilon);
+ CHECK_CLOSE( -0.0492416f, q3.z().tofloat(), epsilon);
+ CHECK_CLOSE( 0.0f, q3.w().tofloat(), epsilon);
+
+ q3 = FromAxes(cAxes,q3);
+ CHECK_CLOSE( 0.922363f, q3.x().tofloat(), epsilon);
+ CHECK_CLOSE( -0.0187406f, q3.y().tofloat(), epsilon);
+ CHECK_CLOSE( -0.38587f, q3.z().tofloat(), epsilon);
+ CHECK_CLOSE( 0.0f, q3.w().tofloat(), epsilon);
+
+ Axes aAxiz (float4(0,-0.268f,-0.364f,1),float4(0,-2,1,0),float4(-1,-1,1,1),17,math::kZYRoll);
+ q3 = ToAxes(aAxiz,q2);
+ CHECK_CLOSE( 1.28582f, q3.x().tofloat(), epsilon);
+ CHECK_CLOSE( 1.69701f, q3.y().tofloat(), epsilon);
+ CHECK_CLOSE( -0.652772f, q3.z().tofloat(), epsilon);
+ CHECK_CLOSE( 0.0f, q3.w().tofloat(), epsilon);
+
+ q3 = FromAxes(aAxiz,q3);
+ CHECK_CLOSE( 0.922363f, q3.x().tofloat(), epsilon);
+ CHECK_CLOSE( -0.0187405f, q3.y().tofloat(), epsilon);
+ CHECK_CLOSE( -0.38587f, q3.z().tofloat(), epsilon);
+ CHECK_CLOSE( -0.0f, q3.w().tofloat(), epsilon);
+
+
+
+
+ /* float4 left(0.999886f, 0.011893f, -0.006366f, 0);
+ float4 up(-0.012257f, 0.998085f, -0.060634f, 0);
+ float4 front(0.005632f, 0.060705f, 0.998116f, 0);
+ float4 rootX( -0.068884f, -0.000000f, -0.000000f, 0.997625f);
+
+ math::float4 ret = math::normalize(math::quatMul(math::quatMatrixToQuat(left,up,front),math::quatConj(rootX)));
+ CHECK_CLOSE( 0.278572f, ret.x().tofloat(), epsilon);
+ CHECK_CLOSE( 0.247044f, ret.y().tofloat(), epsilon);
+ CHECK_CLOSE( 0.216015f, ret.z().tofloat(), epsilon);
+ CHECK_CLOSE( 0.902610f, ret.w().tofloat(), epsilon);
+ */
+ }
+
+ TEST_FIXTURE( SimdFixture, trigonometric )
+ {
+
+ int degree;
+ for(degree=-90;degree<90;degree++)
+ {
+ float rad = radians( static_cast<float>(degree) );
+
+ float sin_stl = math::sin(rad);
+ math::float4 sin_unity = math::sin_est( math::float4(rad) );
+
+ CHECK_CLOSE( sin_stl, sin_unity.x().tofloat(), 9.2e-5f);
+ }
+
+ for(degree=-90;degree<90;degree++)
+ {
+ float rad = radians( static_cast<float>(degree) );
+
+ float cos_stl = math::cos(rad);
+ math::float4 cos_unity = math::cos_est( math::float4(rad) );
+
+ CHECK_CLOSE( cos_stl, cos_unity.x().tofloat(), 9.0e-4);
+ }
+
+ /*
+ float sin_stl = 0;
+ ABSOLUTE_TIME time_stl = START_TIME;
+
+ for(int i=0;i<1000;i++)
+ {
+
+ for(degree=-90;degree<90;degree++)
+ {
+ float rad = radians( static_cast<float>(degree) );
+ sin_stl += math::sin(rad);
+ }
+ }
+
+ time_stl = ELAPSED_TIME(time_stl);
+
+ math::float4 sin_unity = math::float4::zero();
+ ABSOLUTE_TIME time_unity = START_TIME;
+
+ for(int i=0;i<1000;i++)
+ {
+ for(degree=-90;degree<90;degree++)
+ {
+ float rad = radians( static_cast<float>(degree) );
+ sin_unity += math::sin_est( math::float4(rad) );
+ }
+ }
+
+ time_unity = ELAPSED_TIME(time_unity);
+
+ CHECK_CLOSE( sin_stl, sin_unity.x().tofloat(), 9.0e-4);
+ CHECK_CLOSE( time_stl, time_unity, 1);
+ */
+ }
+}
+
+#endif
diff --git a/Runtime/Math/Simd/bool1.h b/Runtime/Math/Simd/bool1.h
new file mode 100644
index 0000000..d195a4c
--- /dev/null
+++ b/Runtime/Math/Simd/bool1.h
@@ -0,0 +1,42 @@
+#ifndef SIMD_BOOL1_H
+#define SIMD_BOOL1_H
+
+#include "Runtime/Math/Simd/intrinsic.h"
+
+namespace math
+{
+
+struct ATTRIBUTE_ALIGN(ALIGN4F) bool1
+{
+ enum {
+ RHS_SWZ = kXYZW
+ };
+
+ typedef bool scalar_type;
+ typedef vec4bs packed_type;
+
+ packed_type s;
+
+ MECANIM_FORCE_INLINE bool1()
+ {}
+
+ MECANIM_FORCE_INLINE explicit bool1(packed_type x) : s(x)
+ {}
+
+ MECANIM_FORCE_INLINE explicit bool1(scalar_type x) : s(Vloadsb(x))
+ {}
+
+ MECANIM_FORCE_INLINE operator scalar_type() const
+ { return Vstoresb(s); }
+
+ MECANIM_FORCE_INLINE bool1 &operator=(const bool1 &r)
+ { s = r.s; return *this; }
+
+ // unary operators
+ MECANIM_FORCE_INLINE bool1 operator!() const
+ { bool1 r = bool1(Vnot(s)); return r; }
+};
+
+}
+
+#endif
diff --git a/Runtime/Math/Simd/bool4.h b/Runtime/Math/Simd/bool4.h
new file mode 100644
index 0000000..05afc1d
--- /dev/null
+++ b/Runtime/Math/Simd/bool4.h
@@ -0,0 +1,61 @@
+#ifndef SIMD_BOOL4_H
+#define SIMD_BOOL4_H
+
+
+#include "Runtime/Math/Simd/intrinsic.h"
+#include "Runtime/Math/Simd/bool1.h"
+
+namespace math
+{
+
+template<typename T> struct vecexp4;
+
+struct ATTRIBUTE_ALIGN(ALIGN4F) bool4
+{
+ typedef bool scalar_type;
+ typedef vec4b packed_type;
+
+ packed_type v;
+
+ MECANIM_FORCE_INLINE bool4() {}
+
+ MECANIM_FORCE_INLINE bool4(const bool4 &r):v(r.v) { }
+
+ MECANIM_FORCE_INLINE bool4(const packed_type &r):v(r) { }
+
+ explicit MECANIM_FORCE_INLINE bool4(bool s):v(Vloadsb(s)) { }
+
+ explicit MECANIM_FORCE_INLINE bool4(bool1 const& r):v(r.s) { }
+
+ MECANIM_FORCE_INLINE bool4(bool x, bool y, bool z, bool w):v(Vload4sb(x,y,z,w)) { }
+
+ MECANIM_FORCE_INLINE bool4 &operator=(const bool4 &r) { v = r.v; return *this; }
+
+ MECANIM_FORCE_INLINE bool4 &operator=(bool s) { v = Vloadsb(s); return *this; }
+
+ MECANIM_FORCE_INLINE bool4 operator!() const { bool4 r = Vnot(v); return r; }
+};
+
+static MECANIM_FORCE_INLINE bool4 operator==(bool4 const& l, bool4 const& r)
+{
+ return bool4(Vxnor(l.v, r.v) );
+}
+
+static MECANIM_FORCE_INLINE bool4 operator!=(bool4 const& l, bool4 const& r)
+{
+ return bool4(Vxor(l.v, r.v) );
+}
+
+static MECANIM_FORCE_INLINE bool4 operator&&(bool4 const& l, bool4 const& r)
+{
+ return bool4(Vand(l.v, r.v) );
+}
+
+static MECANIM_FORCE_INLINE bool4 operator||(bool4 const& l, bool4 const& r)
+{
+ return bool4(Vor(l.v, r.v) );
+}
+
+}
+
+#endif
diff --git a/Runtime/Math/Simd/float1.h b/Runtime/Math/Simd/float1.h
new file mode 100644
index 0000000..90c17ae
--- /dev/null
+++ b/Runtime/Math/Simd/float1.h
@@ -0,0 +1,232 @@
+#ifndef SIMD_FLOAT1_H
+#define SIMD_FLOAT1_H
+
+#include "Runtime/Serialize/TransferFunctions/SerializeTransfer.h"
+
+#include "Runtime/Math/Simd/intrinsic.h"
+
+#include "Runtime/Math/Simd/bool1.h"
+
+namespace math
+{
+
+struct float1;
+
+template<typename T> struct ATTRIBUTE_ALIGN(ALIGN4F) vecexp1 : T
+{
+ typedef T value_type;
+ typedef typename T::scalar_type scalar_type;
+ typedef typename T::packed_type packed_type;
+
+ MECANIM_FORCE_INLINE vecexp1() {}
+
+ MECANIM_FORCE_INLINE vecexp1(vecexp1 const& e):value_type(e) { }
+
+ explicit MECANIM_FORCE_INLINE vecexp1(scalar_type s):value_type(s){ }
+
+ explicit MECANIM_FORCE_INLINE vecexp1(packed_type vector):value_type(vector) { }
+
+ MECANIM_FORCE_INLINE vecexp1(value_type const& r):value_type(r) { }
+
+ MECANIM_FORCE_INLINE vecexp1(scalar_type x, scalar_type y, scalar_type z, scalar_type w):value_type(x,y,z,w) { }
+
+ template<typename R> MECANIM_FORCE_INLINE vecexp1(const vecexp1<R> &e){
+ value_type::v = Vswizzle<value_type::LHS_SWZ>::lhs(value_type::v, e.eval() );
+ }
+
+ inline bool IsFinite ()const
+ {
+ return ::IsFinite(Vstoresf(value_type::v));
+ }
+
+ MECANIM_FORCE_INLINE vecexp1 &operator=(const vecexp1 &e) {
+ SIMD_ASSERT_IF(!e.IsFinite());
+ value_type::v = Vswizzle<value_type::LHS_SWZ>::lhs(value_type::v, e.eval() );
+ return *this;
+ }
+
+ template<typename R> MECANIM_FORCE_INLINE vecexp1 &operator=(const vecexp1<R> &e){
+ SIMD_ASSERT_IF(!e.IsFinite());
+ value_type::v = Vswizzle<value_type::LHS_SWZ>::lhs(value_type::v, e.eval() );
+ return *this;
+ }
+
+ MECANIM_FORCE_INLINE vecexp1 &operator=(scalar_type s) {
+ SIMD_ASSERT_IF(!::IsFinite(s));
+ value_type::operator =(s);
+ return *this; }
+
+ MECANIM_FORCE_INLINE const vecexp1 &operator+() const { return *this; }
+ MECANIM_FORCE_INLINE vecexp1 operator-() const { return vecexp1(Vneg( value_type::eval() ) ); }
+
+ template<typename R>MECANIM_FORCE_INLINE vecexp1 &operator+=(const vecexp1<R> &r) {
+ value_type::v = Vswizzle<value_type::LHS_SWZ>::lhs(value_type::v, Vadd( value_type::eval(), r.eval() )); return *this;
+ }
+
+ template<typename R> MECANIM_FORCE_INLINE vecexp1 &operator-=(const vecexp1<R> &r) {
+ value_type::v = Vswizzle<value_type::LHS_SWZ>::lhs(value_type::v, Vsub( value_type::eval(), r.eval() )); return *this;
+ }
+
+ template<typename R> MECANIM_FORCE_INLINE vecexp1 &operator*=(const vecexp1<R> &r) {
+ value_type::v = Vswizzle<value_type::LHS_SWZ>::lhs(value_type::v, Vmul(value_type::eval(), r.eval())); return *this;
+ }
+
+ template<typename R> MECANIM_FORCE_INLINE vecexp1 &operator/=(const vecexp1<R> &r) {
+ value_type::v = Vswizzle<value_type::LHS_SWZ>::lhs(value_type::v, Vdiv(value_type::eval(), r.eval())); return *this;
+ }
+};
+
+struct ATTRIBUTE_ALIGN(ALIGN4F) vec1
+{
+ typedef float scalar_type;
+ typedef vec4f packed_type;
+ typedef vec4f const& const_reference_packed_type;
+ typedef vec4f& reference_packed_type;
+
+ enum{
+ RHS_SWZ = kXYZW,
+ LHS_SWZ = kXYZW
+ };
+
+ MECANIM_FORCE_INLINE vec1() {}
+
+ MECANIM_FORCE_INLINE vec1(scalar_type scalar):v(Vloadsf(scalar)) { }
+
+ MECANIM_FORCE_INLINE vec1(packed_type vector):v(vector){ }
+
+ template<typename T> MECANIM_FORCE_INLINE vec1(const T& vector) { v = vector.eval(); }
+
+ MECANIM_FORCE_INLINE scalar_type tofloat() const { return Vstoresf( v ); }
+
+ MECANIM_FORCE_INLINE vec1 &operator=(const vec1 &l) { v = l.v; return *this; }
+
+ MECANIM_FORCE_INLINE packed_type eval()const{ return v; }
+
+protected:
+ packed_type v;
+};
+
+struct ATTRIBUTE_ALIGN(ALIGN4F) float1 : public vecexp1<vec1>
+{
+ DEFINE_GET_TYPESTRING(float1)
+
+ typedef vec1 value_type;
+ typedef vec1::scalar_type scalar_type;
+ typedef vec1::packed_type packed_type;
+
+ MECANIM_FORCE_INLINE float1() {}
+
+ MECANIM_FORCE_INLINE explicit float1(scalar_type s):vecexp1<value_type>(s) {}
+
+ MECANIM_FORCE_INLINE explicit float1(packed_type const& v):vecexp1<value_type>(v) { }
+
+ MECANIM_FORCE_INLINE float1(const float1 &f):vecexp1<value_type>(f) { }
+
+ template<typename R> MECANIM_FORCE_INLINE float1(const vecexp1<R> &e){ value_type::v = e.eval(); }
+
+ MECANIM_FORCE_INLINE float1 &operator=(const float1 &f) { value_type::v = f.eval(); return *this; }
+
+ template<typename R> MECANIM_FORCE_INLINE float1 &operator=(const vecexp1<R> &e) { value_type::v = e.eval(); return *this; }
+
+ template<typename R>MECANIM_FORCE_INLINE float1 &operator+=(const vecexp1<R> &r) {
+ value_type::v = Vadd( value_type::eval(), r.eval() ); return *this;
+ }
+
+ template<typename R> MECANIM_FORCE_INLINE float1 &operator-=(const vecexp1<R> &r) {
+ value_type::v = Vsub( value_type::eval(), r.eval() ); return *this;
+ }
+
+ template<typename R> MECANIM_FORCE_INLINE float1 &operator*=(const vecexp1<R> &r) {
+ value_type::v = Vmul(value_type::eval(), r.eval() ); return *this;
+ }
+
+ template<typename R> MECANIM_FORCE_INLINE float1 &operator/=(const vecexp1<R> &r) {
+ value_type::v = Vdiv(value_type::eval(), r.eval() ); return *this;
+ }
+
+
+ MECANIM_FORCE_INLINE float1 &operator++() { value_type::v = Vinc(value_type::v); return *this; }
+ MECANIM_FORCE_INLINE float1 operator++(int) { float1 r = *this; value_type::v = Vinc(value_type::v); return r; }
+
+ MECANIM_FORCE_INLINE float1 &operator--() { value_type::v = Vdec(value_type::v); return *this; }
+ MECANIM_FORCE_INLINE float1 operator--(int) { float1 r = *this; value_type::v = Vdec(value_type::v); return r; }
+
+ static float1 zero() {return float1(Vzero()); } // 0
+ static float1 one() {return float1(Vone());} // 1
+
+ template<class TransferFunction>
+ MECANIM_FORCE_INLINE void Transfer (TransferFunction& transfer)
+ {
+ /////@TODO: This is wrong. It will not work for SafeBinaryRead
+ /////// Probably other places in the code too!
+
+ float x;
+ if(transfer.IsReading())
+ {
+ transfer.Transfer(x, "x");
+ *this = float1(x);
+ }
+ else if(transfer.IsWriting())
+ {
+ x = tofloat();
+
+ transfer.Transfer(x, "x");
+ }
+ else
+ {
+ transfer.Transfer(x, "x");
+ }
+ }
+};
+
+// vecexp1 Arithemtic
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp1<vec1> operator+(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return vecexp1<vec1>( Vadd( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp1<vec1> operator-(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return vecexp1<vec1>( Vsub( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp1<vec1> operator*(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return vecexp1<vec1>( Vmul( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp1<vec1> operator/(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return vecexp1<vec1>( Vdiv( l.eval(), r.eval() ));
+}
+
+// vecexp1 logic
+template <typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool1 operator<(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool1( Vcmplt( l.eval(), r.eval() ) );
+}
+template <typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool1 operator<=(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool1( Vcmple( l.eval(), r.eval() ) );
+}
+template <typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool1 operator==(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool1( Vcmpeq( l.eval(), r.eval() ) );
+}
+template <typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool1 operator!=(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool1( Vcmpneq( l.eval(), r.eval() ) );
+}
+template <typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool1 operator>=(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool1( Vcmpge( l.eval(), r.eval() ));
+}
+template <typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool1 operator>(const vecexp1<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool1( Vcmpgt( l.eval(), r.eval() ) );
+}
+
+}
+
+
+#endif
diff --git a/Runtime/Math/Simd/float4.h b/Runtime/Math/Simd/float4.h
new file mode 100644
index 0000000..7b653e6
--- /dev/null
+++ b/Runtime/Math/Simd/float4.h
@@ -0,0 +1,397 @@
+#ifndef SIMD_FLOAT4_H
+#define SIMD_FLOAT4_H
+
+#include "Runtime/Serialize/TransferFunctions/SerializeTransfer.h"
+
+#include "Runtime/Math/Simd/intrinsic.h"
+
+#include "Runtime/Math/Simd/bool4.h"
+#include "Runtime/Math/Simd/float1.h"
+
+namespace math
+{
+
+template<typename T> struct ATTRIBUTE_ALIGN(ALIGN4F) vecexp4 : T
+{
+ typedef T value_type;
+ typedef typename T::scalar_type scalar_type;
+ typedef typename T::packed_type packed_type;
+
+ MECANIM_FORCE_INLINE vecexp4() {}
+
+ MECANIM_FORCE_INLINE vecexp4(vecexp4 const& e):value_type(e) { }
+
+ template<typename R> MECANIM_FORCE_INLINE vecexp4(const vecexp4<R> &e):value_type(e){ }
+
+ MECANIM_FORCE_INLINE vecexp4(scalar_type const& x, scalar_type const& y, scalar_type const& z, scalar_type const& w):value_type(x,y,z,w) { }
+
+ MECANIM_FORCE_INLINE vecexp4(value_type const& r):value_type(r) { }
+
+ //template<typename R1, typename R2, typename R3, typename R4> MECANIM_FORCE_INLINE vecexp4(const vecexp1<R1> &x, const vecexp1<R2> &y, const vecexp1<R3> &z, const vecexp1<R4> &w) {
+ // value_type::v = Vcombine( x.eval(), y.eval(), z.eval(), w.eval() );
+ //}
+ MECANIM_FORCE_INLINE vecexp4(const float1 &x, const float1 &y, const float1 &z, const float1 &w) {
+ value_type::v = Vcombine( x.eval(), y.eval(), z.eval(), w.eval() );
+ }
+
+ MECANIM_FORCE_INLINE vecexp4 &operator=(const vecexp4 &l) { value_type::operator =(l); return *this; }
+
+ template<typename R> MECANIM_FORCE_INLINE vecexp4 &operator=(const vecexp4<R> &e) {
+ value_type::v = e.eval(); return *this;
+ }
+
+ template<typename R> MECANIM_FORCE_INLINE vecexp4 &operator=(const vecexp1<R> &e) {
+ value_type::v = e.eval(); return *this;
+ }
+
+ MECANIM_FORCE_INLINE const vecexp4 &operator+() const { return *this; }
+ MECANIM_FORCE_INLINE vecexp4 operator-() const { return vecexp4(Vneg( value_type::eval() )); }
+};
+
+template<typename SCALAR, typename RHS_VECTOR, typename LHS_VECTOR, int RHS_MASK, int LHS_MASK> struct ATTRIBUTE_ALIGN(ALIGN4F) swizzle1
+{
+ typedef SCALAR scalar_type;
+ typedef LHS_VECTOR packed_type;
+ typedef RHS_VECTOR rhs_packed_type;
+
+ enum{
+ RHS_SWZ = RHS_MASK,
+ LHS_SWZ = LHS_MASK
+ };
+
+
+ packed_type v;
+
+ MECANIM_FORCE_INLINE swizzle1(packed_type vector):v(vector) {}
+
+ MECANIM_FORCE_INLINE swizzle1 &operator=(const scalar_type &s) { v = Vswizzle<LHS_SWZ>::lhs(v, Vloadsf(s)); return *this; }
+
+ MECANIM_FORCE_INLINE scalar_type tofloat() { return Vstoresf( Vswizzle<RHS_SWZ>::rhs(v) ); }
+
+ MECANIM_FORCE_INLINE rhs_packed_type eval()const{ return Vswizzle<RHS_SWZ>::rhs(v); }
+private:
+
+
+ MECANIM_FORCE_INLINE swizzle1 &operator=(const swizzle1 &s) {return *this;}
+};
+
+template<typename SCALAR, typename VECTOR, int MASK> struct ATTRIBUTE_ALIGN(ALIGN4F) swizzle
+{
+ typedef SCALAR scalar_type;
+ typedef VECTOR packed_type;
+
+ enum{
+ RHS_SWZ = MASK
+ };
+
+ MECANIM_FORCE_INLINE swizzle(packed_type const& vector):v(vector) {}
+
+ MECANIM_FORCE_INLINE packed_type eval()const{ return Vswizzle<RHS_SWZ>::rhs(v); }
+
+protected:
+ packed_type v;
+};
+
+struct ATTRIBUTE_ALIGN(ALIGN4F) vec4
+{
+ typedef float scalar_type;
+ typedef vec4f packed_type;
+ typedef vec4f const& const_reference_packed_type;
+ typedef vec4f& reference_packed_type;
+
+ enum{
+ RHS_SWZ = kXYZW
+ };
+
+ MECANIM_FORCE_INLINE vec4() {}
+
+ MECANIM_FORCE_INLINE vec4(scalar_type x, scalar_type y, scalar_type z, scalar_type w):v(Vload4sf(x,y,z,w)) { }
+
+ MECANIM_FORCE_INLINE vec4(scalar_type s):v(Vloadsf(s)) { }
+
+ MECANIM_FORCE_INLINE vec4(packed_type vector):v(vector){ }
+
+ template<typename T> MECANIM_FORCE_INLINE vec4(const T& vector):v(vector.eval()){ }
+
+ MECANIM_FORCE_INLINE vec4 &operator=(const vec4 &l) {
+ SIMD_ASSERT_IF(!l.IsFinite());
+ v = l.v;
+ return *this;
+ }
+
+ MECANIM_FORCE_INLINE packed_type eval()const{ return v; }
+
+ inline bool IsFinite ()const
+ {
+ return ::IsFinite( x().tofloat() ) & ::IsFinite( y().tofloat() ) & ::IsFinite( z().tofloat() ) & ::IsFinite( w().tofloat() );
+ }
+
+ MECANIM_FORCE_INLINE vecexp1< swizzle1<scalar_type, packed_type, packed_type, kXXXX, kXYZW> > x()const { return vecexp1< swizzle1<scalar_type, packed_type, packed_type, kXXXX, kXYZW> >(v); }
+ MECANIM_FORCE_INLINE vecexp1< swizzle1<scalar_type, packed_type, packed_type, kYYYY, kYXZW> > y()const { return vecexp1< swizzle1<scalar_type, packed_type, packed_type, kYYYY, kYXZW> >(v); }
+ MECANIM_FORCE_INLINE vecexp1< swizzle1<scalar_type, packed_type, packed_type, kZZZZ, kZYXW> > z()const { return vecexp1< swizzle1<scalar_type, packed_type, packed_type, kZZZZ, kZYXW> >(v); }
+ MECANIM_FORCE_INLINE vecexp1< swizzle1<scalar_type, packed_type, packed_type, kWWWW, kWYZX> > w()const { return vecexp1< swizzle1<scalar_type, packed_type, packed_type, kWWWW, kWYZX> >(v); }
+
+ MECANIM_FORCE_INLINE vecexp1< swizzle1<scalar_type, packed_type, reference_packed_type, kXXXX, kXYZW> > x() { return vecexp1< swizzle1<scalar_type, packed_type, reference_packed_type, kXXXX, kXYZW> >(v); }
+ MECANIM_FORCE_INLINE vecexp1< swizzle1<scalar_type, packed_type, reference_packed_type, kYYYY, kYXZW> > y() { return vecexp1< swizzle1<scalar_type, packed_type, reference_packed_type, kYYYY, kYXZW> >(v); }
+ MECANIM_FORCE_INLINE vecexp1< swizzle1<scalar_type, packed_type, reference_packed_type, kZZZZ, kZYXW> > z() { return vecexp1< swizzle1<scalar_type, packed_type, reference_packed_type, kZZZZ, kZYXW> >(v); }
+ MECANIM_FORCE_INLINE vecexp1< swizzle1<scalar_type, packed_type, reference_packed_type, kWWWW, kWYZX> > w() { return vecexp1< swizzle1<scalar_type, packed_type, reference_packed_type, kWWWW, kWYZX> >(v); }
+
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kWXZY> > wxzy()const { return vecexp4< swizzle<scalar_type, packed_type, kWXZY> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kXZWY> > xzwy()const { return vecexp4< swizzle<scalar_type, packed_type, kXZWY> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kXWYZ> > xwyz()const { return vecexp4< swizzle<scalar_type, packed_type, kXWYZ> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kWYXZ> > wyxz()const { return vecexp4< swizzle<scalar_type, packed_type, kWYXZ> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kZYWX> > zywx()const { return vecexp4< swizzle<scalar_type, packed_type, kZYWX> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kYWZX> > ywzx()const { return vecexp4< swizzle<scalar_type, packed_type, kYWZX> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kYZXW> > yzxw()const { return vecexp4< swizzle<scalar_type, packed_type, kYZXW> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kZXYW> > zxyw()const { return vecexp4< swizzle<scalar_type, packed_type, kZXYW> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kZWXY> > zwxy()const { return vecexp4< swizzle<scalar_type, packed_type, kZWXY> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kWWWZ> > wwwz()const { return vecexp4< swizzle<scalar_type, packed_type, kWWWZ> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kWWZZ> > wwzz()const { return vecexp4< swizzle<scalar_type, packed_type, kWWZZ> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kWZYX> > wzyx()const { return vecexp4< swizzle<scalar_type, packed_type, kWZYX> >(v); }
+ MECANIM_FORCE_INLINE vecexp4< swizzle<scalar_type, packed_type, kYXWZ> > yxwz()const { return vecexp4< swizzle<scalar_type, packed_type, kYXWZ> >(v); }
+
+protected:
+ packed_type v;
+};
+
+struct ATTRIBUTE_ALIGN(ALIGN4F) float4 : vecexp4<vec4>
+{
+ DEFINE_GET_TYPESTRING(float4)
+
+ typedef vec4 value_type;
+ typedef vec4::scalar_type scalar_type;
+ typedef vec4::packed_type packed_type;
+
+ MECANIM_FORCE_INLINE float4() {}
+
+ MECANIM_FORCE_INLINE float4(float4 const& vector):vecexp4<value_type>(vector.v) { }
+
+ explicit MECANIM_FORCE_INLINE float4(scalar_type s):vecexp4<value_type>(s) { }
+
+ explicit MECANIM_FORCE_INLINE float4(packed_type const& vector):vecexp4<value_type>(vector) { }
+
+ template<typename R> MECANIM_FORCE_INLINE float4(const vecexp4<R> &r):vecexp4<value_type>(r) { }
+
+ MECANIM_FORCE_INLINE float4(scalar_type x, scalar_type y, scalar_type z, scalar_type w):vecexp4<value_type>(x,y,z,w) { }
+
+ MECANIM_FORCE_INLINE float4(const float1 &x, const float1 &y, const float1 &z, const float1 &w):vecexp4<value_type>(x,y,z,w) { }
+
+ MECANIM_FORCE_INLINE float4 &operator=(const float4 &r) { vecexp4<vec4>::operator =(r); return *this; }
+
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator=(const vecexp4<R> &r) { vecexp4<vec4>::operator =(r); return *this; }
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator=(const vecexp1<R> &r) { vecexp4<vec4>::operator =(r); return *this; }
+
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator+=(const vecexp4<R> &r) { value_type::v = Vadd(value_type::v, r.eval()); return *this; }
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator+=(const vecexp1<R> &r) { value_type::v = Vadd(value_type::v, r.eval()); return *this; }
+ MECANIM_FORCE_INLINE float4 &operator+=(float r) { value_type::v = Vadd(value_type::v, Vloadsf(r)); return *this; }
+
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator-=(const vecexp4<R> &r) { value_type::v = Vsub(value_type::v, r.eval()); return *this; }
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator-=(const vecexp1<R> &r) { value_type::v = Vsub(value_type::v, r.eval()); return *this; }
+ MECANIM_FORCE_INLINE float4 &operator-=(float r) { value_type::v = Vsub(value_type::v, Vloadsf(r)); return *this; }
+
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator*=(const vecexp4<R> &r) { value_type::v = Vmul(value_type::v, r.eval()); return *this; }
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator*=(const vecexp1<R> &r) { value_type::v = Vmul(value_type::v, r.eval()); return *this; }
+ MECANIM_FORCE_INLINE float4 &operator*=(float r) { value_type::v = Vmul(value_type::v, Vloadsf(r)); return *this; }
+
+
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator/=(const vecexp4<R> &r) { value_type::v = Vdiv(value_type::v, r.eval()); return *this; }
+ template<typename R> MECANIM_FORCE_INLINE float4 &operator/=(const vecexp1<R> &r) { value_type::v = Vdiv(value_type::v, r.eval()); return *this; }
+ MECANIM_FORCE_INLINE float4 &operator/=(float r) { value_type::v = Vdiv(value_type::v, Vloadsf(r)); return *this; }
+
+ // prefix decrement
+ MECANIM_FORCE_INLINE float4 &operator++() { value_type::v = Vinc(value_type::v); return *this; }
+ // postfix increment
+ MECANIM_FORCE_INLINE float4 operator++(int) { float4 r = *this; value_type::v = Vinc(value_type::eval() ); return r; }
+
+ // prefix decrement
+ MECANIM_FORCE_INLINE float4 &operator--() { value_type::v = Vdec(value_type::v); return *this; }
+ // postfix decrement
+ MECANIM_FORCE_INLINE float4 operator--(int) { float4 r = *this; value_type::v = Vdec(value_type::eval() ); return r; }
+
+
+ static float4 zero() {return float4(Vzero()); } // 0
+ static float4 one() {return float4(Vone());} // 1
+
+
+ template<class TransferFunction>
+ MECANIM_FORCE_INLINE void Transfer (TransferFunction& transfer)
+ {
+ /////@TODO: This is wrong. It will not work for SafeBinaryRead
+ /////// Probably other places in the code too!
+
+ float ATTRIBUTE_ALIGN(ALIGN4F) buf[4] = {0.0f, 0.0f, 0.0f, 0.0f};
+ if(transfer.IsReading())
+ {
+ transfer.Transfer(buf[0], "x");
+ transfer.Transfer(buf[1], "y");
+ transfer.Transfer(buf[2], "z");
+ transfer.Transfer(buf[3], "w");
+
+ v = Vloadpf(buf, 0);
+ }
+ else if(transfer.IsWriting())
+ {
+ Vstorepf(v, buf, 0);
+
+ transfer.Transfer(buf[0], "x");
+ transfer.Transfer(buf[1], "y");
+ transfer.Transfer(buf[2], "z");
+ transfer.Transfer(buf[3], "w");
+ }
+ else
+ {
+ transfer.Transfer(buf[0], "x");
+ transfer.Transfer(buf[1], "y");
+ transfer.Transfer(buf[2], "z");
+ transfer.Transfer(buf[3], "w");
+ }
+ }
+};
+
+// vecexp4 Arithemtic
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator+(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return vecexp4<vec4>( Vadd( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator+(const vecexp4<LHS> &l, const vecexp1<RHS> &r)
+{
+ return vecexp4<vec4>( Vadd( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator+(const vecexp1<LHS> &l, const vecexp4<RHS> &r)
+{
+ return vecexp4<vec4>( Vadd( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator-(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return vecexp4<vec4>( Vsub( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator-(const vecexp4<LHS> &l, const vecexp1<RHS> &r)
+{
+ return vecexp4<vec4>( Vsub( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator-(const vecexp1<LHS> &l, const vecexp4<RHS> &r)
+{
+ return vecexp4<vec4>( Vsub( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator*(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return vecexp4<vec4>( Vmul( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator*(const vecexp4<LHS> &l,const vecexp1<RHS> &r)
+{
+ return vecexp4<vec4>( Vmul( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator*(const vecexp1<LHS> &l, const vecexp4<RHS> &r)
+{
+ return vecexp4<vec4>( Vmul( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator/(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return vecexp4<vec4>( Vdiv( l.eval(), r.eval() ));
+}
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator/(const vecexp4<LHS> &l, const vecexp1<RHS> &r)
+{
+ return vecexp4<vec4>( Vdiv( l.eval(), r.eval() ));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE vecexp4<vec4> operator/(const vecexp1<LHS> &l, const vecexp4<RHS> &r)
+{
+ return vecexp4<vec4>( Vdiv( l.eval(), r.eval() ));
+}
+
+
+// vecexp4 logic
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator<(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4(Vcmplt(l.eval(), r.eval()));
+}
+template<typename LHS> static MECANIM_FORCE_INLINE bool4 operator<(const vecexp4<LHS> &l, const float1 &r)
+{
+ return bool4(Vcmplt(l.eval(), r.eval()));
+}
+template<typename RHS> static MECANIM_FORCE_INLINE bool4 operator<(const float1 &l, const vecexp4<RHS> &r)
+{
+ return bool4(Vcmplt(l.eval(), r.eval()));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator<=(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4(Vcmple(l.eval(), r.eval()));
+}
+template<typename LHS> static MECANIM_FORCE_INLINE bool4 operator<=(const vecexp4<LHS> &l, const float1 &r)
+{
+ return bool4(Vcmple(l.eval(), r.eval()));
+}
+template<typename RHS> static MECANIM_FORCE_INLINE bool4 operator<=(const float1 &l, const vecexp4<RHS> &r)
+{
+ return bool4(Vcmple(l.eval(), r.eval()));
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator==(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4( Vcmpeq(l.eval(), r.eval()) );
+}
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator==(const vecexp4<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool4( Vcmpeq(l.eval(), r.eval()) );
+}
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator==(const vecexp1<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4( Vcmpeq(l.eval(), r.eval()) );
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator!=(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4( Vcmpneq(l.eval(), r.eval()) );
+}
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator!=(const vecexp1<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4( Vcmpneq(l.eval(), r.eval()) );
+}
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator!=(const vecexp4<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool4( Vcmpneq(l.eval(), r.eval()) );
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator>=(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4( Vcmpge(l.eval(), r.eval()) );
+}
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator>=(const vecexp1<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4( Vcmpge(l.eval(), r.eval()) );
+}
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator>=(const vecexp4<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool4( Vcmpge(l.eval(), r.eval()) );
+}
+
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator>(const vecexp4<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4( Vcmpgt(l.eval(), r.eval()) );
+}
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator>(const vecexp4<LHS> &l, const vecexp1<RHS> &r)
+{
+ return bool4( Vcmpgt(l.eval(), r.eval()) );
+}
+template<typename LHS, typename RHS> static MECANIM_FORCE_INLINE bool4 operator>(const vecexp1<LHS> &l, const vecexp4<RHS> &r)
+{
+ return bool4( Vcmpgt(l.eval(), r.eval()) );
+}
+
+#define constant_float4(name, x,y,z,w) \
+ cvec4f(c##name, x,y,z,w); \
+ math::float4 const name(c##name); \
+
+}
+
+#endif
diff --git a/Runtime/Math/Simd/fpu.h b/Runtime/Math/Simd/fpu.h
new file mode 100644
index 0000000..20bd800
--- /dev/null
+++ b/Runtime/Math/Simd/fpu.h
@@ -0,0 +1,245 @@
+#ifndef SIMD_FPU_H
+#define SIMD_FPU_H
+
+// vector 4 packed
+struct vec4f
+{
+ vec4f(float s = 0.0f){m128_f32[0] = m128_f32[1] = m128_f32[2] = m128_f32[3]= s;}
+ vec4f(float x, float y, float z, float w){m128_f32[0] = x; m128_f32[1] = y; m128_f32[2] = z; m128_f32[3]= w;}
+
+ float m128_f32[4];
+};
+typedef vec4f vec4fs; // vector 4 scalar
+
+// vector 4 bool packed
+struct vec4b
+{
+ vec4b(bool s = false){m128_f32[0] = m128_f32[1] = m128_f32[2] = m128_f32[3]= s;}
+ vec4b(bool x, bool y, bool z, bool w){m128_f32[0] = x; m128_f32[1] = y; m128_f32[2] = z; m128_f32[3]= w;}
+
+ bool m128_f32[4];
+};
+
+typedef vec4b vec4bs; // vector 4 bool scalar
+
+#define cvec4f(name, x,y,z,w) static const vec4f name((x),(y),(z),(w))
+#define cvec4b(name, x,y,z,w) static const vec4b name((x),(y),(z),(w))
+#define cvec4fs(name, s) static const vec4fs name(s)
+
+
+#define SWZ_MASK(x, y, z, w) (((w) << 6) | ((z) << 4) | ((y) << 2) | ((x)))
+#define SWZ_X(MASK) (((MASK) >> 0) & 3)
+#define SWZ_Y(MASK) (((MASK) >> 2) & 3)
+#define SWZ_Z(MASK) (((MASK) >> 4) & 3)
+#define SWZ_W(MASK) (((MASK) >> 6) & 3)
+#define SWZ_I(MASK, I) (((MASK) >> (I*2)) & 3)
+
+enum simd_mask
+{
+ kXYZW = SWZ_MASK(0,1,2,3),
+ kXXXX = SWZ_MASK(0,0,0,0),
+ kYYYY = SWZ_MASK(1,1,1,1),
+ kZZZZ = SWZ_MASK(2,2,2,2),
+ kWWWW = SWZ_MASK(3,3,3,3),
+
+ kXWYZ = SWZ_MASK(0,3,1,2),
+ kXZWY = SWZ_MASK(0,2,3,1),
+
+ kYZWX = SWZ_MASK(1,2,3,0),
+ kYXZW = SWZ_MASK(1,0,2,3),
+ kYWZX = SWZ_MASK(1,3,2,0),
+ kYZXW = SWZ_MASK(1,2,0,3),
+ kYXWZ = SWZ_MASK(1,0,3,2),
+
+ kZWXY = SWZ_MASK(2,3,0,1),
+ kZYXW = SWZ_MASK(2,1,0,3),
+ kZYWX = SWZ_MASK(2,1,3,0),
+ kZXYW = SWZ_MASK(2,0,1,3),
+
+ kWYZX = SWZ_MASK(3,1,2,0),
+ kWXZY = SWZ_MASK(3,0,2,1),
+ kWYXZ = SWZ_MASK(3,1,0,2),
+ kWWWZ = SWZ_MASK(3,3,3,2),
+ kWWZZ = SWZ_MASK(3,3,2,2),
+ kWZYX = SWZ_MASK(3,2,1,0),
+};
+
+#define Vzero() vec4f(0.f)
+#define Vone() vec4f(1.f)
+
+static MECANIM_FORCE_INLINE vec4f Vpermute(vec4f v, int mask)
+{
+ return vec4f(v.m128_f32[SWZ_X(mask)], v.m128_f32[SWZ_Y(mask)], v.m128_f32[SWZ_Z(mask)], v.m128_f32[SWZ_W(mask)]);
+}
+
+#define Vmove(l, r) vec4f(r.m128_f32[0], l.m128_f32[1], l.m128_f32[2], l.m128_f32[3])
+
+// This template is part of the back-end because some instruction set support some swizzle operation that could be specialized, like xbox vmx rotate instruction that is use in dot product
+template<int SWZ> struct Vswizzle
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return Vpermute(r, SWZ);
+ }
+
+ static MECANIM_FORCE_INLINE vec4f lhs(vec4f l, vec4f r)
+ {
+ return Vswizzle<SWZ>::rhs(Vmove(Vswizzle<SWZ>::rhs(l), r));
+ }
+};
+
+template<> struct Vswizzle<kXYZW>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return r;
+ }
+ static MECANIM_FORCE_INLINE vec4f lhs(vec4f l, vec4f r)
+ {
+ return Vmove(l, r);
+ }
+};
+
+
+// Aligned store, store vector at address base as 4 float
+#define Vstorepf(v, base, offset) memcpy( (base)+(offset), &v.m128_f32[0], sizeof(v))
+
+// Return component x as a float
+#define Vstoresf(r) r.m128_f32[0]
+
+// Return component x as a bool
+#define Vstoresb(r) r.m128_f32[0]
+
+// Aligned store, store vector at address base as 4 bool
+static MECANIM_FORCE_INLINE void Vstorepb(vec4b v, bool* r)
+{
+ r[0] = v.m128_f32[0];
+ r[1] = v.m128_f32[1];
+ r[2] = v.m128_f32[2];
+ r[3] = v.m128_f32[3];
+}
+
+// Aligned load, load 4 float at address v in vector register
+static MECANIM_FORCE_INLINE vec4f Vloadpf(float const* v, int offset)
+{
+ float const* p = v+offset;
+ return vec4f( p[0], p[1], p[2], p[3] );
+}
+
+// Load float value in vector register and replicate value in all component
+#define Vloadsf(s) vec4f(s)
+
+// Load bool value in vector register and replicate value in all component
+#define Vloadsb(s) vec4bs(s)
+
+// Load 4 float value in vector register
+#define Vload4sf(x, y, z, w) vec4f(x,y,z,w)
+
+// Load 4 bool value in vector register
+#define Vload4sb(x, y, z, w) vec4b(x,y,z,w)
+
+static MECANIM_FORCE_INLINE vec4f Vadd(vec4f l, vec4f r)
+{
+ return vec4f(l.m128_f32[0]+r.m128_f32[0], l.m128_f32[1]+r.m128_f32[1], l.m128_f32[2]+r.m128_f32[2], l.m128_f32[3]+r.m128_f32[3]);
+}
+
+static MECANIM_FORCE_INLINE vec4f Vsub(vec4f l, vec4f r)
+{
+ return vec4f(l.m128_f32[0]-r.m128_f32[0], l.m128_f32[1]-r.m128_f32[1], l.m128_f32[2]-r.m128_f32[2], l.m128_f32[3]-r.m128_f32[3]);
+}
+
+static MECANIM_FORCE_INLINE vec4f Vmul( vec4f l, vec4f r)
+{
+ return vec4f(l.m128_f32[0]*r.m128_f32[0], l.m128_f32[1]*r.m128_f32[1], l.m128_f32[2]*r.m128_f32[2], l.m128_f32[3]*r.m128_f32[3]);
+}
+
+static MECANIM_FORCE_INLINE vec4f Vdiv( vec4f l, vec4f r)
+{
+ return vec4f(l.m128_f32[0]/r.m128_f32[0], l.m128_f32[1]/r.m128_f32[1], l.m128_f32[2]/r.m128_f32[2], l.m128_f32[3]/r.m128_f32[3]);
+}
+
+#define Vmadd( a, b, c) vec4f(a.m128_f32[0]*b.m128_f32[0] + c.m128_f32[0], a.m128_f32[1]*b.m128_f32[1] + c.m128_f32[1], a.m128_f32[2]*b.m128_f32[2] + c.m128_f32[2], a.m128_f32[3]*b.m128_f32[3] + c.m128_f32[3])
+#define Vmsub( a, b, c) vec4f(a.m128_f32[0]*b.m128_f32[0] - c.m128_f32[0], a.m128_f32[1]*b.m128_f32[1] - c.m128_f32[1], a.m128_f32[2]*b.m128_f32[2] - c.m128_f32[2], a.m128_f32[3]*b.m128_f32[3] - c.m128_f32[3])
+#define Vneg(r) vec4f(-r.m128_f32[0], -r.m128_f32[1], -r.m128_f32[2], -r.m128_f32[3])
+
+// Vector sgn: return -1, 1
+#define Vsgn(r) vec4f( r.m128_f32[0] < 0 ? -1.f : 1.f, r.m128_f32[1] < 0 ? -1.f : 1.f, r.m128_f32[2] < 0 ? -1.f : 1.f, r.m128_f32[3] < 0 ? -1.f : 1.f)
+
+// Vector sgn: return -1, 0, 1
+static MECANIM_FORCE_INLINE vec4f Vsign(vec4f r)
+{
+ return vec4f( r.m128_f32[0] < 0 ? -1.f : r.m128_f32[0] > 0 ? 1.f : 0.f,
+ r.m128_f32[1] < 0 ? -1.f : r.m128_f32[1] > 0 ? 1.f : 0.f,
+ r.m128_f32[2] < 0 ? -1.f : r.m128_f32[2] > 0 ? 1.f : 0.f,
+ r.m128_f32[3] < 0 ? -1.f : r.m128_f32[3] > 0 ? 1.f : 0.f);
+}
+
+#define Vinc(r) Vadd( (r), Vone())
+#define Vdec(r) Vsub( (r), Vone())
+#define Vabs(r) vec4f( abs(r.m128_f32[0]), abs(r.m128_f32[1]), abs(r.m128_f32[2]), abs(r.m128_f32[3]))
+#define Vmax( l, r) vec4f( l.m128_f32[0] > r.m128_f32[0] ? l.m128_f32[0] : r.m128_f32[0], l.m128_f32[1] > r.m128_f32[1] ? l.m128_f32[1] : r.m128_f32[1], l.m128_f32[2] > r.m128_f32[2] ? l.m128_f32[2] : r.m128_f32[2], l.m128_f32[3] > r.m128_f32[3] ? l.m128_f32[3] : r.m128_f32[3])
+#define Vmin( l, r) vec4f( l.m128_f32[0] < r.m128_f32[0] ? l.m128_f32[0] : r.m128_f32[0], l.m128_f32[1] < r.m128_f32[1] ? l.m128_f32[1] : r.m128_f32[1], l.m128_f32[2] < r.m128_f32[2] ? l.m128_f32[2] : r.m128_f32[2], l.m128_f32[3] < r.m128_f32[3] ? l.m128_f32[3] : r.m128_f32[3])
+
+// Return the largest of the 4 component
+static MECANIM_FORCE_INLINE vec4fs Vlargest(vec4f r)
+{
+ r = Vmax(r, Vswizzle<kYZWX>::rhs(r));
+ r = Vmax(r, Vswizzle<kZWXY>::rhs(r));
+ return r.m128_f32[0];
+}
+
+// Return the smallest of the 4 component
+static MECANIM_FORCE_INLINE vec4fs Vsmallest(vec4f r)
+{
+ r = Vmin(r, Vswizzle<kYZWX>::rhs(r));
+ r = Vmin(r, Vswizzle<kZWXY>::rhs(r));
+ return r.m128_f32[0];;
+}
+
+static MECANIM_FORCE_INLINE vec4fs Vsum(vec4f r)
+{
+ r = Vadd(r, Vswizzle<kYZWX>::rhs(r) );
+ r = Vadd(r, Vswizzle<kZWXY>::rhs(r) );
+ r = Vswizzle<kXXXX>::rhs(r);
+ return r.m128_f32[0];
+}
+
+#define Vdot( l, r) Vsum( Vmul((l), (r)) )
+
+#define Vsqrt(r) vec4f( sqrt(r.m128_f32[0]), sqrt(r.m128_f32[1]), sqrt(r.m128_f32[2]), sqrt(r.m128_f32[3]))
+
+static MECANIM_FORCE_INLINE vec4f Vrsqrt(vec4f r)
+{
+ vec4f const e = Vdiv(vec4f(1.f), Vsqrt(r));
+ return Vmul(Vmul(e, Vsub(vec4f(3.0f), Vmul(Vmul(e,e),r))), vec4f(.5f));
+}
+
+static MECANIM_FORCE_INLINE vec4f Vrcp(vec4f r)
+{
+ return Vdiv(vec4f(1.f), r );
+}
+
+
+// Merge 4 vector low bytes
+#define Vcombine(x,y,z,w) vec4f(x.m128_f32[0], y.m128_f32[0], z.m128_f32[0], w.m128_f32[0])
+
+// Vector comparison
+#define Vcmpeq( a, b) vec4b(a.m128_f32[0] == b.m128_f32[0], a.m128_f32[1] == b.m128_f32[1], a.m128_f32[2] == b.m128_f32[2], a.m128_f32[3] == b.m128_f32[3])
+#define Vcmpneq( a, b) vec4b(a.m128_f32[0] != b.m128_f32[0], a.m128_f32[1] != b.m128_f32[1], a.m128_f32[2] != b.m128_f32[2], a.m128_f32[3] != b.m128_f32[3])
+#define Vcmpgt( a, b) vec4b(a.m128_f32[0] > b.m128_f32[0], a.m128_f32[1] > b.m128_f32[1], a.m128_f32[2] > b.m128_f32[2], a.m128_f32[3] > b.m128_f32[3])
+#define Vcmpge( a, b) vec4b(a.m128_f32[0] >= b.m128_f32[0], a.m128_f32[1] >= b.m128_f32[1], a.m128_f32[2] >= b.m128_f32[2], a.m128_f32[3] >= b.m128_f32[3])
+#define Vcmplt( a, b) vec4b(a.m128_f32[0] < b.m128_f32[0], a.m128_f32[1] < b.m128_f32[1], a.m128_f32[2] < b.m128_f32[2], a.m128_f32[3] < b.m128_f32[3])
+#define Vcmple( a, b) vec4b(a.m128_f32[0] <= b.m128_f32[0], a.m128_f32[1] <= b.m128_f32[1], a.m128_f32[2] <= b.m128_f32[2], a.m128_f32[3] <= b.m128_f32[3])
+
+#define Vsel( c, a, b) vec4f(c.m128_f32[0] ? a.m128_f32[0] : b.m128_f32[0], c.m128_f32[1] ? a.m128_f32[1] : b.m128_f32[1], c.m128_f32[2] ? a.m128_f32[2] : b.m128_f32[2], c.m128_f32[3] ? a.m128_f32[3] : b.m128_f32[3])
+
+// vector logics
+#define Vnot(r) vec4b(!r.m128_f32[0], !r.m128_f32[1], !r.m128_f32[2], !r.m128_f32[3])
+#define Vxnor( a, b) vec4b(!(a.m128_f32[0] ^ b.m128_f32[0]), !(a.m128_f32[1] ^ b.m128_f32[1]), !(a.m128_f32[2] ^ b.m128_f32[2]), !(a.m128_f32[3] ^ b.m128_f32[3]))
+#define Vxor( a, b) vec4b(a.m128_f32[0] ^ b.m128_f32[0], a.m128_f32[1] ^ b.m128_f32[1], a.m128_f32[2] ^ b.m128_f32[2], a.m128_f32[3] ^ b.m128_f32[3])
+#define Vand( a, b) vec4b(a.m128_f32[0] && b.m128_f32[0], a.m128_f32[1] && b.m128_f32[1], a.m128_f32[2] && b.m128_f32[2], a.m128_f32[3] && b.m128_f32[3])
+#define Vor( a, b) vec4b(a.m128_f32[0] || b.m128_f32[0], a.m128_f32[1] || b.m128_f32[1], a.m128_f32[2] || b.m128_f32[2], a.m128_f32[3] || b.m128_f32[3])
+#define Vall(a) (a.m128_f32[0] && a.m128_f32[1] && a.m128_f32[2] && a.m128_f32[3])
+#define Vany(a) (a.m128_f32[0] || a.m128_f32[1] || a.m128_f32[2] || a.m128_f32[3])
+
+#endif // SIMD_FPU_H \ No newline at end of file
diff --git a/Runtime/Math/Simd/intrinsic.h b/Runtime/Math/Simd/intrinsic.h
new file mode 100644
index 0000000..eb58c2e
--- /dev/null
+++ b/Runtime/Math/Simd/intrinsic.h
@@ -0,0 +1,184 @@
+#ifndef SIMD_INTRINSIC_H
+#define SIMD_INTRINSIC_H
+
+/* Here the Math library back-end interface
+ When you declare a function always returns results by values, you want to be sure that simd register stay in register, otherwise you may get poor performance if the CPU need to push back the register into memory
+ Vector data is declared purely, ex: typedef __m128 vec4f. most compile won't recognize encapsulated vector type in class and thus generate more temporary and push back vector in memory.
+
+ to support a new platform you need at least to support this function set
+
+ typedef __m128 vec4f; // vector 4 float packed
+ typedef __m128 vec4fs; // vector 4 float scalar
+ typedef __m128 vec4b; // vector 4 bool packed
+ typedef __m128 vec4bs; // vector 4 bool scalar
+
+ #define Vzero()
+ #define Vone()
+ #define Vpermute(v, mask)
+ #define Vmove(l, r)
+
+ // This template is part of the back-end because some instruction set support some swizzle operation that could be specialized, like xbox vmx rotate instruction that is use in dot product
+ template<int SWZ> struct Vswizzle
+ {
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return Vpermute(r, SWZ);
+ }
+
+ static MECANIM_FORCE_INLINE vec4f lhs(vec4f l, vec4f r)
+ {
+ return Vswizzle<SWZ>::rhs(Vmove(Vswizzle<SWZ>::rhs(l), r));
+ }
+ };
+
+
+ // Aligned store, store vector at adress base as 4 float
+ #define Vstorepf(v, base, offset)
+
+ // Return component x as a float
+ #define Vstoresf(r)
+
+ // Return component x as a bool
+ #define Vstoresb(r)
+
+ // Aligned store, store vector at adress base as 4 bool
+ #define Vstorepb(vec4f v, bool* r)
+
+ // Aligned load, load 4 float at adress v in vector register
+ #define Vloadpf(v, offset)
+
+ // Load float value in vector register and replicate value in all component
+ #define Vloadsf(s)
+
+ // Load bool value in vector register and replicate value in all component
+ #define Vloadsb(s)
+
+ // Load 4 float value in vector register
+ #define Vload4sf(x, y, z, w)
+
+ // Load 4 bool value in vector register
+ #define Vload4sb( x, y, z, w)
+
+ #define Vadd(l, r)
+ #define Vsub( l, r)
+ #define Vmul( l, r)
+ #define Vdiv( l, r)
+ #define Vmadd( a, b, c)
+ #define Vmsub( a, b, c)
+ #define Vneg(r)
+
+ // Vector sgn: return -1, 1
+ #define Vsgn(r)
+
+ // Vector sgn: return -1, 0, 1
+ #define Vsign(r)
+
+ #define Vinc(r)
+ #define Vdec(r)
+ #define Vabs(r)
+ #define Vmax( l, r)
+ #define Vmin( l, r)
+
+ // Return the largest of the 4 component
+ #define Vlargest(r)
+
+ // Return the smallest of the 4 component
+ #define Vsmallest(r)
+ #define Vsum(r)
+ #define Vdot( l, r)
+ #define Vsqrt(r)
+
+ #define Vrsqrt(r)
+ #define Vrcp(r)
+
+ // Merge 4 vector low bytes
+ #define Vcombine(x,y,z,w)
+
+ // Vector comparison
+ #define Vcmpeq( a, b)
+ #define Vcmpneq( a, b)
+ #define Vcmpgt( a, b)
+ #define Vcmpge( a, b)
+ #define Vcmplt( a, b)
+ #define Vcmple( a, b)
+
+ #define Vsel( c, a, b)
+
+ // vector logics
+ #define Vnot(r)
+ #define Vxnor( a, b)
+ #define Vxor( a, b)
+ #define Vand( a, b)
+ #define Vor( a, b)
+ #define Vall(a)
+ #define Vany( a)
+
+*/
+#if defined(__INTEL_COMPILER) || defined(__ICL) || defined(_MSC_VER)
+ #include <cstddef>
+ #define ATTRIBUTE_ALIGN(a) __declspec(align(a))
+ #define ALIGN4F 16
+ #define MECANIM_FORCE_INLINE __forceinline
+#elif defined(__GNUC__) || defined(__clang__)
+ #include <cstddef>
+
+ #ifndef __has_attribute
+ #define __has_attribute(x) 0
+ #endif
+
+ #if ((__GNUC__ >= 3) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ >= 4) || __has_attribute(always_inline)
+ #ifdef _DEBUG
+ #ifndef MECANIM_FORCE_INLINE
+ #define MECANIM_FORCE_INLINE inline
+ #endif
+ #else
+ #ifndef MECANIM_FORCE_INLINE
+ #define MECANIM_FORCE_INLINE inline __attribute__((always_inline))
+ #endif
+ #endif
+ #endif
+
+ #if defined(__GNUC__) || __has_attribute(aligned)
+ #define ATTRIBUTE_ALIGN(a) __attribute__ ((aligned(a)))
+ #endif
+
+ #define ALIGN4F 16
+#endif
+
+#ifndef MECANIM_FORCE_INLINE
+ #define MECANIM_FORCE_INLINE inline
+#endif
+
+#ifndef ATTRIBUTE_ALIGN
+ #define ATTRIBUTE_ALIGN(a)
+#endif
+
+#ifndef ALIGN4F
+ #define ALIGN4F 16
+#endif
+
+#if UNITY_FORCE_FPU
+ #include "Runtime/Math/Simd/fpu.h"
+#elif UNITY_XENON
+ #include "Runtime/Math/Simd/xenon.h"
+#elif UNITY_PS3
+ #include "Runtime/Math/Simd/ppu.h"
+#elif UNITY_WIN && UNITY_SUPPORTS_SSE
+ #include "Runtime/Math/Simd/sse.h"
+#elif UNITY_OSX
+ #include "Runtime/Math/Simd/sse.h"
+#elif UNITY_SUPPORTS_NEON && (!UNITY_ANDROID)
+ #include "Runtime/Math/Simd/neon.h"
+#else
+ #include "Runtime/Math/Simd/fpu.h"
+#endif
+
+//#define DEBUG_SIMD_ASSERT_IF 1
+#if DEBUG_SIMD_ASSERT_IF
+ #define SIMD_ASSERT_IF(x) AssertIf(x)
+#else
+ #define SIMD_ASSERT_IF(x)
+#endif
+
+#endif
+
diff --git a/Runtime/Math/Simd/math.h b/Runtime/Math/Simd/math.h
new file mode 100644
index 0000000..43a8837
--- /dev/null
+++ b/Runtime/Math/Simd/math.h
@@ -0,0 +1,678 @@
+#ifndef SIMD_MATH_H
+#define SIMD_MATH_H
+
+#include <cmath>
+
+// Standard macro define in cmath
+#ifndef M_EPSF
+#define M_EPSF 1e-6f
+#endif
+#ifndef M_PIf
+#define M_PIf 3.14159265358979323846f
+#endif
+#ifndef M_PI_2f
+#define M_PI_2f 1.57079632679489661923f
+#endif
+#ifndef M_PI_4f
+#define M_PI_4f 0.785398163397448309616f
+#endif
+#ifndef M_1_PIf
+#define M_1_PIf 0.318309886183790671538f
+#endif
+#ifndef M_2_PIf
+#define M_2_PIf 0.636619772367581343076f
+#endif
+#ifndef M_DEG_2_RADf
+#define M_DEG_2_RADf 0.0174532925f
+#endif
+#ifndef M_RAD_2_DEGf
+#define M_RAD_2_DEGf 57.295779513f
+#endif
+
+#include "Runtime/Math/Simd/float4.h"
+#include "Runtime/Math/Simd/bool4.h"
+
+namespace math
+{
+
+static inline bool all(bool4 const& r);
+static inline bool any(bool4 const& r);
+template <typename T> static inline T clamp(T const& v, T const& a, T const& b);
+static inline float cond(bool c, float const& a, float const& b);
+static inline int cond(bool c, int const& a, int const& b);
+static inline float cubic(float const& a, float const& b, float const& c, float const& d, float const& u);
+static inline float4 cubic(float4 const& a, float4 const& b, float4 const& c, float4 const& d, float4 const& u);
+static inline float4 cross(float4 const& a, float4 const& b);
+static inline float degrees(float const& deg);
+static inline float4 degrees(float4 const& deg);
+static inline float1 dot(float4 const& l, float4 const& r);
+static inline float1 dot(float4 const& r);
+static inline float1 length(float4 const& r);
+static inline float lerp(float const& a, float const& b, float x);
+static inline float1 lerp(float1 const& a, float1 const& b, float1 const& x);
+static inline float4 lerp(float4 const& a, float4 const& b, float1 const& x);
+static inline float4 lerp(float4 const& a, float4 const& b, float4 const& x);
+template <typename T> static inline T maximum(T const& a, T const& b);
+template <typename T> static inline T minimum(T const& a, T const& b);
+static inline float1 maximum(float4 const& r);
+static inline float1 minimum(float4 const& r);
+static inline float4 normalize(float4 const& r);
+static inline float pow(float const& r, float const& e);
+static inline float4 pow(float4 const& r, float1 const& e);
+static inline float radians(float const& deg);
+static inline float4 radians(float4 const& deg);
+static inline float4 rcp(float4 const& r);
+static inline float1 rcp(float1 const& r);
+static inline float4 rsqrt(float4 const& r );
+static inline float saturate(float const& r);
+static inline float1 saturate(float1 const& r);
+static inline float4 saturate(float4 const& r);
+static inline float4 scaleIdentity();
+static inline float4 scaleWeight(float4 const& s, float1 const& w);
+static inline void sincos(float4 const& u, float4& s, float4& c);
+static inline void sincos(float1 const& u, float1& s, float1& c);
+static inline void sincose(float4 const& u, float4& s, float4& c);
+static inline void sincose(float1 const& u, float1& s, float1& c);
+static inline float sgn(float const& r);
+static inline float1 sgn(float1 const& r);
+template <typename T> static inline vecexp4<vec4> sgn(vecexp4<T> const& x);
+template <typename T> static inline vecexp1<vec4> sgn(vecexp1<T> const& x);
+static inline float sign(float const& r);
+static inline float4 sign(float4 const& r);
+static inline float smoothstep( float min, float max, float x);
+static inline float smoothpulse( float minmin, float minmax, float maxmin, float maxmax, float x);
+static inline float1 sqrt(float1 const& r);
+static inline float4 sqrt(float4 const& r);
+static inline float1 sum(float4 const& r);
+static inline float4 vector(float4 const& v);
+static inline float unrollangle(float angleRef, float angle);
+static inline float4 load(float const* v);
+static inline void store(float4 const& v, float* r);
+static inline void store(bool4 const& v, bool* r);
+
+
+static inline float abs(const float &r)
+{
+ return std::abs(r);
+}
+
+static inline float cos(float const& theta)
+{
+ return std::cos(theta);
+}
+
+static inline float rcp(const float &r)
+{
+ return 1.f/r;
+}
+
+static inline float rsqrt(const float& r)
+{
+ return 1.f/std::sqrt(r);
+}
+
+static inline float sin(float const& theta)
+{
+ return std::sin(theta);
+}
+
+static inline void sincos(float const& u, float& s, float& c)
+{
+ s = sin(u);
+ c = cos(u);
+}
+
+static inline float tan(float const& theta)
+{
+ return std::tan(theta);
+}
+
+static inline float atan(float const& t)
+{
+ return std::atan(t);
+}
+
+static inline float sqrt(const float& r)
+{
+ return std::sqrt(r);
+}
+
+static inline float modf(float x, float &ip)
+{
+#if UNITY_FLASH
+ float intPart;
+ __asm __volatile__("%[RES] = (%[FARG] < 0 ? Math.ceil(%[FARG]) : Math.floor(%[FARG]));//modf" : [RES] "=f" (intPart) : [FARG] "f" (x));
+ ip = intPart;
+ return x-intPart;
+#else
+ return std::modf(x, &ip);
+#endif
+}
+
+static inline float fmod(float x, float y)
+{
+ return std::fmod(x,y);
+}
+
+static inline float pow(const float& x,const float& y)
+{
+ return std::pow(x,y);
+}
+
+template <typename T> static inline vecexp4<vec4> abs(vecexp4<T> const& x)
+{
+ return vecexp4<vec4>( Vabs( x.eval() ) );
+}
+
+template <typename T> static inline vecexp1<T> abs(vecexp1<T> const& x)
+{
+ return vecexp1<T>( Vabs( x.eval() ) );
+}
+
+static inline float1 abs(float1 const& x)
+{
+ return float1( Vabs( x.eval() ) );
+}
+
+static inline bool all(bool4 const& r)
+{
+ return Vall(r.v);
+}
+static inline bool any(bool4 const& r)
+{
+ return Vany(r.v);
+}
+
+// x clamped to the range [a, b] as follows:
+// Returns a if x is less than a.
+// Returns b if x is greater than b.
+// Returns x otherwise.
+template <typename T> static inline T clamp(T const& v, T const& a, T const& b)
+{
+ return minimum(b, maximum(a, v));
+}
+
+template <typename L, typename R> static inline vecexp4<vec4> cond(bool4 const& c, vecexp4<L> const& l, vecexp4<R> const& r)
+{
+ return vecexp4<vec4>( Vsel(c.v, l.eval(), r.eval()) );
+}
+
+template <typename L, typename R> static inline vecexp4<vec4> cond(bool1 const& c, vecexp4<L> const& l, vecexp4<R> const& r)
+{
+ return vecexp4<vec4>( Vsel(c.s, l.eval(), r.eval()) );
+}
+
+template <typename L, typename R> static inline vecexp1<vec4> cond(bool1 const& c, vecexp1<L> const& l, vecexp1<R> const& r)
+{
+ return vecexp4<vec4>( Vsel(c.s, l.eval(), r.eval()) );
+}
+
+static inline float cond(bool c, float const& a, float const& b)
+{
+ return c ? a : b;
+}
+
+static inline int cond(bool c, int const& a, int const& b)
+{
+ return int(b + (-int(c) & (a - b)));
+}
+
+static inline int cond(bool c, long int const& a, long int const& b)
+{
+ typedef long int long_int;
+ return long_int(b + (-long_int(c) & (a - b)));
+}
+
+static inline unsigned long cond(bool c, unsigned long const& a, unsigned long const& b)
+{
+ return b + (- long(c) & (a - b));
+}
+
+static inline unsigned int cond(bool c, unsigned int const& a, unsigned int const& b)
+{
+ return b + (- int(c) & (a - b));
+}
+
+// De Casteljau construction of bezier
+static inline float cubic(float const& a, float const& b, float const& c, float const& d, float const& u)
+{
+ const float ab = lerp(a,b,u);
+ const float bc = lerp(b,c,u);
+ const float cd = lerp(c,d,u);
+ const float abc = lerp(ab,bc,u);
+ const float bcd = lerp(bc,cd,u);
+ return lerp(abc, bcd, u);
+}
+
+static inline float4 cubic(float4 const& a, float4 const& b, float4 const& c, float4 const& d, float4 const& u)
+{
+ const float4 ab = lerp(a,b,u);
+ const float4 bc = lerp(b,c,u);
+ const float4 cd = lerp(c,d,u);
+ const float4 abc = lerp(ab,bc,u);
+ const float4 bcd = lerp(bc,cd,u);
+ return lerp(abc, bcd, u);
+}
+
+static inline float4 cross(float4 const& a, float4 const& b)
+{
+ return float4(a.yzxw()*b.zxyw() - a.zxyw()*b.yzxw());
+}
+
+static inline float degrees(float const& rad)
+{
+ return M_RAD_2_DEGf*rad;
+}
+
+static inline float4 degrees(float4 const& rad)
+{
+ return float1(M_RAD_2_DEGf)*rad;
+}
+
+static inline float1 degrees(float1 const& rad)
+{
+ return float1(M_RAD_2_DEGf)*rad;
+}
+
+static inline float1 dot(float4 const& l, float4 const& r)
+{
+ return float1( Vdot(l.eval(), r.eval()) );
+}
+
+static inline float1 dot(float4 const& r)
+{
+ return float1( Vdot(r.eval(), r.eval()) );
+}
+
+static inline float1 length(float4 const& r)
+{
+ return float1(Vsqrt( Vdot(r.eval(), r.eval()) ));
+}
+
+static inline float lerp(float const& a, float const& b, float x)
+{
+ return a + x*(b - a);
+}
+
+static inline float1 lerp(float1 const& a, float1 const& b, float1 const& x)
+{
+ return a + x*(b - a);
+}
+
+static inline float4 lerp(float4 const& a, float4 const& b, float1 const& x)
+{
+ return a + x*(b - a);
+}
+
+static inline float4 lerp(float4 const& a, float4 const& b, float4 const& x)
+{
+ return a + x*(b - a);
+}
+
+template <typename T> static inline T maximum(T const& a, T const& b)
+{
+ return cond(a > b, a, b);
+}
+
+static inline float1 maximum(float4 const& r)
+{
+ return float1( Vlargest(r.eval()) );
+}
+
+template <typename T> static inline T minimum(T const& a, T const& b)
+{
+ return cond(a < b, a, b);
+}
+
+static inline float1 minimum(float4 const& r)
+{
+ return float1( Vsmallest(r.eval()) );
+}
+
+static inline float4 normalize(float4 const& r)
+{
+ return float4( Vmul(r.eval(), Vrsqrt(Vdot(r.eval(), r.eval()) ) ));
+}
+
+static inline float4 pow(float4 const& r, float1 const& e)
+{
+ float e1 = e.tofloat();
+
+ return float4( std::pow( r.x().tofloat(), e1), std::pow( r.y().tofloat(), e1), std::pow( r.z().tofloat(), e1), std::pow( r.w().tofloat(), e1));
+}
+
+static inline float radians(float const& deg)
+{
+ return M_DEG_2_RADf*deg;
+}
+
+static inline float4 radians(float4 const& deg)
+{
+ return float1(M_DEG_2_RADf)*deg;
+}
+
+static inline float4 rcp(float4 const& r)
+{
+ return float4(Vrcp(r.eval()));
+}
+
+static inline float1 rcp(float1 const& r)
+{
+ return float1(Vrcp(r.eval()));
+}
+
+static inline float4 rsqrt(float4 const& r)
+{
+ return float4(Vrsqrt(r.eval()));
+}
+
+static inline float saturate(float const& r)
+{
+ return clamp(r, 0.f, 1.f);
+}
+
+static inline float1 saturate(float1 const& r)
+{
+ return float1(Vmin( Vmax(r.eval(), Vzero()), Vone()));
+}
+
+static inline float4 saturate(float4 const& r)
+{
+ return float4(Vmin( Vmax(r.eval(), Vzero()), Vone()));
+}
+
+static inline float4 scaleIdentity()
+{
+ return float4::one();
+}
+
+static inline float4 scaleWeight(float4 const& s, float1 const& w)
+{
+ float4 s_abs = math::abs(s);
+ float4 s_sng = math::sgn(s);
+
+ return s_sng * pow( s_abs, w);
+}
+
+static inline float4 scaleBlend(float4 const& sa, float4 const& sb,float1 const& w)
+{
+ const float4 saw = scaleWeight(sa, float1::one() - w);
+ const float4 sbw = scaleWeight(sb, w);
+ const float4 s_sng = math::sgn( cond( w > float1(.5), sb, sa) );
+ return s_sng * math::abs(saw * sbw);
+}
+
+// return -1 if r < 0
+// return 1 if r >= 0
+static inline float sgn(float const& r)
+{
+ return cond(r >= 0.f, 1.f, -1.f);
+}
+
+// return -1 if r < 0
+// return 1 if r >= 0
+static inline float1 sgn(float1 const& r)
+{
+ return float1(Vsgn(r.eval()));
+}
+
+// return -1 if r < 0
+// return 1 if r >= 0
+template <typename T> static inline vecexp4<vec4> sgn(vecexp4<T> const& x)
+{
+ return vecexp4<vec4>( Vsgn( x.eval()) );
+}
+
+// return -1 if r < 0
+// return 1 if r >= 0
+template <typename T> static inline vecexp1<vec4> sgn(vecexp1<T> const& x)
+{
+ return vecexp1<vec4>( Vsgn( x.eval() ) );
+}
+
+// return -1 if r < 0
+// return 0 if r == 0
+// return 1 if r > 0
+static inline float sign(float const& r)
+{
+ return cond( r > 0, 1.f, cond( r < 0, -1.f, 0.f));
+}
+
+// return -1 if r < 0
+// return 0 if r == 0
+// return 1 if r > 0
+static inline float4 sign(float4 const& r)
+{
+ return float4(Vsign(r.eval()));
+}
+
+static inline float4 smoothClamp(float4 const& v, float4 const& m, float1 const& r)
+{
+ return cond(v-m>float1::zero(),m+r*((v-m)/(v-m+r)),v);
+}
+
+static inline float smoothstep( float min, float max, float x)
+{
+ x = math::clamp(x, min, max);
+ return -2.f * math::pow((x-min)/(max-min), 3.f) + 3.f * math::pow((x-min)/(max-min), 2.f);
+}
+
+static inline float smoothpulse( float minmin, float minmax, float maxmin, float maxmax, float x)
+{
+ return smoothstep(minmin,minmax,x) - smoothstep(maxmin,maxmax,x);
+}
+
+static inline float1 sqrt(float1 const& r)
+{
+ return float1(Vsqrt(r.eval()));
+}
+
+static inline float4 sqrt(float4 const& r)
+{
+ return float4(Vsqrt(r.eval()));
+}
+
+static inline float1 sum(float4 const& r)
+{
+ return float1(Vsum(r.eval()));
+}
+
+static inline float1 triangleAngle(math::float1 const& aLen, math::float1 const& aLen1, math::float1 const& aLen2)
+{
+ math::float1 c = clamp<float1>((aLen1*aLen1 + aLen2*aLen2 - aLen*aLen) / (aLen1*aLen2) / float1(2.f), -float1::one() , float1::one());
+ return math::float1(acos(c.tofloat()));
+}
+
+static inline float4 vector(float4 const& v)
+{
+ constant_float4( mask, 1.f,1.f,1.f,0.f);
+ return v*mask;
+}
+
+static inline float4 vector(float const& x, float const& y, float const& z)
+{
+ return float4(x, y, z, 0);
+}
+
+static inline float unrollangle(float angleRef, float angle)
+{
+ float i;
+ float f = math::modf( (angleRef-angle)/(2.f*M_PIf), i);
+ return angle + ( (i+(math::abs(f) > 0.5f ? math::sgn(f) * 1 : 0)) * 2.f * M_PIf);
+}
+
+static inline float4 doubleAtan(float4 const& v)
+{
+ float ATTRIBUTE_ALIGN(ALIGN4F) av[4];
+
+ store(v, av);
+
+ return float4(2.0f*atan(av[0]),2.0f*atan(av[1]),2.0f*atan(av[2]),2.0f*atan(av[3]));
+}
+
+
+// between range [-pi/2, pi/2] the maximum error is 8.186e-4
+static inline vec4f cos_estimate(vec4f x)
+{
+ // cos(x) = 1 - (c2*x^2) + (c4*x^4) - (c6*x^6)
+ // cos(x) = 1 + (-c2*x^2) + (c4*x^4) + (-c6*x^6) // 3 mul and 3 mul add
+ // let's bake sign into constant to remove some complexity
+ cvec4fs(c2, -0.5f);
+ cvec4fs(c4, 4.166666666667e-2f);
+ cvec4fs(c6, -1.38888889e-3f);
+
+ // Use horner form to reduce the polynomial instruction count
+ // cos(x) = 1 + x^2*(c2 + x^2*(c4 + x^2*(c6))) // 1 mul and 3 mul add
+ vec4f x2 = Vmul(x,x);
+ return Vmadd(Vmadd(Vmadd(c6, x2, c4), x2, c2), x2, Vone());
+}
+
+
+// between range [-pi/2, pi/2] the maximum error is 9.1e-5
+static inline vec4f sin_estimate(vec4f x)
+{
+ // sin(x) = x - (c3*x^3) + (c5*x^5) - (c7*x^7)
+ // sin(x) = x + (-c3*x^3) + (c5*x^5) + (-c7*x^7) // 4 mul and 3 mul add
+ // let's bake sign into constant to remove some complexity
+ cvec4fs(c3, -0.166666567325592041015625f);
+ cvec4fs(c5, 8.33220803e-3f);
+ cvec4fs(c7, -1.95168955e-4f);
+
+ // Use horner form to reduce the polynomial instruction count
+ // sin(x) = x * ( 1 + x^2*(c3 + x^2*(c5 + x^2*c7))) // 2 mul and 3 mul add
+ vec4f x2 = Vmul(x,x);
+ return Vmul(x, Vmadd(Vmadd(Vmadd(c7, x2, c5), x2, c3), x2, Vone()));
+}
+
+static inline float4 sin_est(float4 const& x)
+{
+ return float4( sin_estimate( x.eval() ) );
+}
+
+static inline float4 cos_est(float4 const& x)
+{
+ return float4( cos_estimate( x.eval() ) );
+}
+
+static inline void sincos(float4 const& u, float4& s, float4& c)
+{
+ float ATTRIBUTE_ALIGN(ALIGN4F) sv[4];
+ float ATTRIBUTE_ALIGN(ALIGN4F) cv[4];
+ float ATTRIBUTE_ALIGN(ALIGN4F) uv[4];
+
+ store(u, uv);
+
+ sincos(uv[0], sv[0], cv[0]);
+ sincos(uv[1], sv[1], cv[1]);
+ sincos(uv[2], sv[2], cv[2]);
+ sincos(uv[3], sv[3], cv[3]);
+
+ s = load(sv);
+ c = load(cv);
+}
+
+static inline void sincos(float1 const& u, float1& s, float1& c)
+{
+ float sv;
+ float cv;
+
+ sincos(u.tofloat(), sv, cv);
+
+ s = float1(sv);
+ c = float1(cv);
+}
+
+static inline void sincos_est(float4 const& u, float4& s, float4& c)
+{
+ s = sin_est(u);
+ c = cos_est(u);
+}
+
+static inline void sincos_est(float1 const& u, float1& s, float1& c)
+{
+ s = float1( sin_estimate( u.eval() ) );
+ c = float1( cos_estimate( u.eval() ) );
+}
+
+static inline float4 tan(float4 const& x)
+{
+ vec4f x2,x3;
+
+ // Compute x^2 and x^3
+ //
+ x2 = Vmul(x.eval(),x.eval());
+ x3 = Vmul(x2,x.eval());
+
+ // Compute both the sin and cos of the angles
+ // using a polynomial expression:
+ // cx = 1.0f + x2 * (C0 * x2 + C1), and
+ // sx = xl + x3 * S0
+ //
+ cvec4fs(c0, 0.0097099364f);
+ cvec4fs(c1, -0.4291161787f);
+ cvec4fs(s0, -0.0957822992f);
+
+ vec4f ct2 = Vmadd(c0,x2,c1);
+
+ vec4f cx = Vmadd(ct2,x2, Vone());
+ vec4f sx = Vmadd(s0,x3, x.eval());
+
+ return float4(Vdiv(sx,cx));
+}
+
+static inline float4 atan(float4 const& x)
+{
+ //x - (x^3)/3 + (x^5)/5 - (x^7)/7 + ...
+
+ cvec4fs(c3, 3.f);
+ cvec4fs(c5, 5.f);
+ cvec4fs(c7, 7.f);
+ vec4f x2 = Vmul(x.eval(),x.eval());
+ vec4f x3 = Vmul(x2,x.eval());
+ vec4f x5 = Vmul(x3,x2);
+ vec4f x7 = Vmul(x5,x2);
+
+ return float4(Vsub( x.eval(), Vadd(Vdiv( x3, c3), Vsub(Vdiv(x5,c5), Vdiv(x7,c7)))));
+}
+
+static inline float halfTan(float a)
+{
+ //float x = math::fmod(0.5f*abs(a)+M_PI_2,float(M_PI));
+ float x1 = (0.5f*abs(a)+M_PI_2f);
+ return tan(clamp(sign(a)*(x1-M_PI_2f),-M_PI_2f+M_EPSF,M_PI_2f-M_EPSF));
+}
+
+static inline float4 halfTan(float4 const& a)
+{
+ static const float4 nM_PI_2(-M_PI_2f+M_EPSF);
+ static const float4 pM_PI_2( M_PI_2f+M_EPSF);
+
+ float4 x = float1(0.5f) * abs(a) + float1(M_PI_2f);
+ return tan( math::clamp<float4>( sign(a) * (x-float4(M_PI_2f)), nM_PI_2, pM_PI_2 ));
+}
+
+static inline float4 mirror(float4 const& t)
+{
+ constant_float4(mirrorT,-1,1,1,1);
+ return t * mirrorT;
+}
+
+static inline float4 load(float const* v)
+{
+ return float4(Vloadpf(v, 0));
+}
+
+static inline void store(float4 const& v, float* r)
+{
+ Vstorepf(v.eval(), r, 0);
+}
+
+static inline void store(bool4 const& v, bool* r)
+{
+ Vstorepb(v.v, r);
+}
+
+}
+
+#endif
+
diff --git a/Runtime/Math/Simd/neon.h b/Runtime/Math/Simd/neon.h
new file mode 100644
index 0000000..08196f9
--- /dev/null
+++ b/Runtime/Math/Simd/neon.h
@@ -0,0 +1,548 @@
+#ifndef SIMD_NEON_H
+#define SIMD_NEON_H
+
+#include <arm_neon.h>
+
+typedef float32x4_t vec4f;
+typedef float32x4_t vec4fs;
+typedef uint32x4_t vec4b;
+typedef uint32x4_t vec4bs;
+
+#define SWZ_MASK(x, y, z, w) (((w) << 6) | ((z) << 4) | ((y) << 2) | ((x)))
+#define SWZ_X(MASK) (((MASK) >> 0) & 3)
+#define SWZ_Y(MASK) (((MASK) >> 2) & 3)
+#define SWZ_Z(MASK) (((MASK) >> 4) & 3)
+#define SWZ_W(MASK) (((MASK) >> 6) & 3)
+
+//VPERMWI_CONST(x, y, z, w)
+#if UNITY_WINRT
+#define cvec4f(name, x,y,z,w) static const vec4f name = Vload4sf(x, y, z, w)
+#define cvec4b(name, x,y,z,w) static const vec4b name = Vload4sb(x, y, z, w)
+#define cvec4fs(name, s) static const vec4fs name = Vloadsf(s)
+#else
+#define cvec4f(name, x,y,z,w) static const vec4f name = {(x),(y),(z),(w)}
+#define cvec4b(name, x,y,z,w) static const vec4b name = {(x),(y),(z),(w)}
+#define cvec4fs(name, s) static const vec4fs name = {(s),(s),(s),(s)}
+#endif
+
+enum simd_mask
+{
+ kXYZW = SWZ_MASK(0,1,2,3),
+ kXXXX = SWZ_MASK(0,0,0,0),
+ kYYYY = SWZ_MASK(1,1,1,1),
+ kZZZZ = SWZ_MASK(2,2,2,2),
+ kWWWW = SWZ_MASK(3,3,3,3),
+
+ kXWYZ = SWZ_MASK(0,3,1,2),
+ kXZWY = SWZ_MASK(0,2,3,1),
+
+ kYZWX = SWZ_MASK(1,2,3,0),
+ kYXZW = SWZ_MASK(1,0,2,3),
+ kYWZX = SWZ_MASK(1,3,2,0),
+ kYZXW = SWZ_MASK(1,2,0,3),
+ kYXWZ = SWZ_MASK(1,0,3,2),
+
+ kZWXY = SWZ_MASK(2,3,0,1),
+ kZYXW = SWZ_MASK(2,1,0,3),
+ kZYWX = SWZ_MASK(2,1,3,0),
+ kZXYW = SWZ_MASK(2,0,1,3),
+
+ kWYZX = SWZ_MASK(3,1,2,0),
+ kWXZY = SWZ_MASK(3,0,2,1),
+ kWYXZ = SWZ_MASK(3,1,0,2),
+ kWWWZ = SWZ_MASK(3,3,3,2),
+ kWWZZ = SWZ_MASK(3,3,2,2),
+ kWZYX = SWZ_MASK(3,2,1,0),
+};
+
+#define Vzero() vdupq_n_f32(0.0f)
+#define Vone() vdupq_n_f32(1.0f)
+
+#define Vfalse() vdupq_n_u32(0)
+#define Vtrue() vdupq_n_f32(0xFFFFFFFF)
+
+union U { float32x2x2_t f2x2; float32x4_t f4; uint8x8x2_t b8x2; float32_t f[4]; };
+
+#define LHS_FUNTION() \
+ static MECANIM_FORCE_INLINE vec4f lhs(vec4f l, vec4f r)\
+ {\
+ vec4f m = Vmove(rhs(l), r);\
+ return rhs(m);\
+ }
+
+//#define Vpermute(v, mask) v
+//#define Vmove(l, r) vextq_f32(l, r, 0)
+MECANIM_FORCE_INLINE vec4f Vmove(vec4f l, vec4f r)
+{
+ uint32x4_t sel = Vfalse();
+ sel = vsetq_lane_u32(0xFFFFFFFF,sel,0);
+ return vbslq_f32(sel, r, l);
+}
+template<int SWZ> struct Vswizzle;
+/*
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ ::uint32_t lanes[4];
+ uint32x4_t u = vreinterpretq_u32_f32(r);
+ uint32x4_t result;
+
+ lanes[0] = vgetq_lane_u32(u, 0);
+ lanes[1] = vgetq_lane_u32(u, 1);
+ lanes[2] = vgetq_lane_u32(u, 2);
+ lanes[3] = vgetq_lane_u32(u, 3);
+
+ result = vdupq_n_u32(lanes[SWZ_X(SWZ)]);
+ result = vsetq_lane_u32(lanes[SWZ_Y(SWZ)], result, 1);
+ result = vsetq_lane_u32(lanes[SWZ_Z(SWZ)], result, 2);
+ result = vsetq_lane_u32(lanes[SWZ_W(SWZ)], result, 3);
+
+ return vreinterpretq_f32_u32(result);
+ }
+
+ static MECANIM_FORCE_INLINE vec4f lhs(vec4f l, vec4f r)
+ {
+ vec4f m = Vmove(Vswizzle<SWZ>::rhs(l), r);
+ return Vswizzle<SWZ>::rhs(m);
+ }
+};
+*/
+
+template<> struct Vswizzle<kXYZW>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return r;
+ }
+ static MECANIM_FORCE_INLINE vec4f lhs(vec4f l, vec4f r)
+ {
+ return Vmove(l, r);
+ }
+};
+template<> struct Vswizzle<kXXXX>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return vdupq_lane_f32(vget_low_f32(r),0);
+ }
+
+ LHS_FUNTION()
+};
+template<> struct Vswizzle<kYYYY>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return vdupq_lane_f32(vget_low_f32(r),1);
+ }
+
+ LHS_FUNTION()
+};
+template<> struct Vswizzle<kZZZZ>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return vdupq_lane_f32(vget_high_f32(r),0);
+ }
+ LHS_FUNTION()
+};
+template<> struct Vswizzle<kWWWW>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return vdupq_lane_f32(vget_high_f32(r),1);
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kXWYZ>
+ {
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ U u; u.f2x2 = vtrn_f32(vget_low_f32(p), vrev64_f32(vget_high_f32(p))); return u.f4;
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kXZWY>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vtrn_f32(vget_low_f32(p), vget_high_f32(p)).val[0], vrev64_f32(vtrn_f32(vget_low_f32(p), vget_high_f32(p)).val[1]));
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kYZWX>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vreinterpretq_f32_u32(vextq_u32(vreinterpretq_u32_f32(p), vreinterpretq_u32_f32(p), 1));
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kYXZW>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vrev64_f32(vget_low_f32(p)), vget_high_f32(p));
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kYWZX>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vtrn_f32(vget_low_f32(p), vget_high_f32(p)).val[1], vrev64_f32(vtrn_f32(vget_low_f32(p), vget_high_f32(p)).val[0]));
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kYZXW>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ U u;
+ u.f2x2 = vtrn_f32(vrev64_f32(vget_low_f32(p)), vget_high_f32(p));
+ return u.f4;
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kYXWZ>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vrev64q_f32(p);
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kZWXY>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vget_high_f32(p), vget_low_f32(p));
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kZYXW>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vrev64_f32(vreinterpret_f32_u32(vext_u32(vreinterpret_u32_f32(vget_low_f32(p)), vreinterpret_u32_f32(vget_high_f32(p)), 1))), vrev64_f32(vreinterpret_f32_u32(vext_u32(vreinterpret_u32_f32(vget_high_f32(p)), vreinterpret_u32_f32(vget_low_f32(p)), 1))));
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kZYWX>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vrev64_f32(vreinterpret_f32_u32(vext_u32(vreinterpret_u32_f32(vget_low_f32(p)), vreinterpret_u32_f32(vget_high_f32(p)), 1))), vreinterpret_f32_u32(vext_u32(vreinterpret_u32_f32(vget_high_f32(p)), vreinterpret_u32_f32(vget_low_f32(p)), 1)));
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kZXYW>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vrev64_f32(vtrn_f32(vget_low_f32(p), vget_high_f32(p)).val[0]), vtrn_f32(vget_low_f32(p), vget_high_f32(p)).val[1]);
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kWYZX>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ U u;
+ u.f4 = vrev64q_f32(p);
+ u.f2x2 = vtrn_f32(u.f2x2.val[1], u.f2x2.val[0]);
+ return u.f4;
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kWXZY>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ U u; u.f2x2 = vtrn_f32(vrev64_f32(vget_high_f32(p)), vget_low_f32(p)); return u.f4;
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kWYXZ>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vrev64_f32(vtrn_f32(vget_low_f32(p), vget_high_f32(p)).val[1]), vtrn_f32(vget_low_f32(p), vget_high_f32(p)).val[0]);
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kWWWZ>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vdup_lane_f32(vget_high_f32(p), 1), vrev64_f32(vget_high_f32(p)));
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kWWZZ>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ U u; u.f2x2 = vtrn_f32(vget_high_f32(p), vget_high_f32(p));
+ return vreinterpretq_f32_u32(vextq_u32(vreinterpretq_u32_f32(u.f4), vreinterpretq_u32_f32(u.f4), 2));
+ }
+ LHS_FUNTION()
+};
+
+template<> struct Vswizzle<kWZYX>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f p)
+ {
+ return vcombine_f32(vrev64_f32(vget_high_f32(p)), vrev64_f32(vget_low_f32(p)));
+ }
+ LHS_FUNTION()
+};
+
+static MECANIM_FORCE_INLINE float Vstoresf(vec4f r)
+{
+ return vgetq_lane_f32(r, 0);
+}
+
+static MECANIM_FORCE_INLINE bool Vstoresb(vec4b r)
+{
+ return (vgetq_lane_u32(r, 0) > 0) ? true : false;
+}
+
+// Aligned store
+#define Vstorepf(v, base, offset) vst1q_f32((float32_t*)((base)+(offset)),v);
+
+static MECANIM_FORCE_INLINE void Vstorepb(const vec4b v, bool* r)
+{
+ ::uint32_t u;
+ vst1q_lane_u32(&u, v, 0);
+ r[0] = (u > 0) ? true : false;
+ vst1q_lane_u32(&u, v, 1);
+ r[1] = (u > 0) ? true : false;
+ vst1q_lane_u32(&u, v, 2);
+ r[2] = (u > 0) ? true : false;
+ vst1q_lane_u32(&u, v, 3);
+ r[3] = (u > 0) ? true : false;
+}
+
+static MECANIM_FORCE_INLINE vec4f Vloadsf(float s)
+{
+ return vmovq_n_f32(s);
+}
+
+static MECANIM_FORCE_INLINE vec4b Vloadsb(bool s)
+{
+ const ::uint32_t false_true[2] = { 0, 0xFFFFFFFF };
+ return vdupq_n_u32(false_true[s ? 1 : 0]);
+}
+
+static MECANIM_FORCE_INLINE vec4f Vload4sf(float x, float y, float z, float w)
+{
+ float32x4_t result;
+ result = vdupq_n_f32(x);
+ result = vsetq_lane_f32(y, result, 1);
+ result = vsetq_lane_f32(z, result, 2);
+ result = vsetq_lane_f32(w, result, 3);
+ return result;
+}
+
+static MECANIM_FORCE_INLINE vec4b Vload4sb(bool x, bool y, bool z, bool w)
+{
+ const ::uint32_t val[4] =
+ {
+ x ? 0xffffffff : 0x00,
+ y ? 0xffffffff : 0x00,
+ z ? 0xffffffff : 0x00,
+ w ? 0xffffffff : 0x00
+ };
+
+ return vld1q_u32(&val[0]);
+}
+
+static MECANIM_FORCE_INLINE vec4f Vloadpf(float const* buf, int offset)
+{
+ return vld1q_f32((float32_t const*)buf + offset);
+}
+
+#define Vadd(l, r) vaddq_f32(l, r)
+#define Vsub(l, r) vsubq_f32(l, r)
+#define Vmul(l, r) vmulq_f32(l, r)
+
+
+// return a*b+c : be aware that vmlaq does a+b*c
+#define Vmadd(a, b, c) vmlaq_f32(c, a, b)
+// return a*b-c : be aware that vmlaq does a-b*c
+#define Vmsub(a, b, c) Vneg(vmlsq_f32(c, a, b))
+
+static MECANIM_FORCE_INLINE vec4f Vneg(vec4f r)
+{
+ uint32x4_t sign_constant = vdupq_n_u32(0x80000000);
+ uint32x4_t negated = veorq_u32(vreinterpretq_u32_f32(r), sign_constant);
+ return vreinterpretq_f32_u32(negated);
+}
+
+// vector sgn: return -1, 1
+static MECANIM_FORCE_INLINE vec4f Vsgn(vec4f r)
+{
+ uint32x4_t sign_constant = vdupq_n_u32(0x80000000);
+ uint32x4_t signs = vandq_u32(vreinterpretq_u32_f32(r), sign_constant);
+ uint32x4_t ones = vdupq_n_u32 (0x3f800000);
+
+ return vreinterpretq_f32_u32(vorrq_u32(signs,ones));
+/* float32x4_t ones = Vone();
+ float32x4_t nones = Vneg(ones);
+ uint32x4_t cmp = vcltq_f32(r,Vzero());
+ return vbslq_f32(cmp,nones,ones);*/
+}
+
+// vector sgn: return -1, 0, 1
+static MECANIM_FORCE_INLINE vec4f Vsign(vec4f r)
+{
+ uint32x4_t sign_constant = vdupq_n_u32(0x80000000);
+ uint32x4_t signs = vandq_u32(vreinterpretq_u32_f32(r), sign_constant);
+ uint32x4_t ones = vdupq_n_u32 (0x3f800000);
+
+ return vreinterpretq_f32_u32(vorrq_u32( signs, vandq_u32( vmvnq_u32( vceqq_f32( r, Vzero())), ones)));
+}
+
+#define Vinc(r) Vadd( (r), Vone())
+#define Vdec(r) Vsub( (r), Vone())
+
+static MECANIM_FORCE_INLINE vec4f Vabs(vec4f r)
+{
+ return vabsq_f32(r);
+}
+
+#define Vmax( l, r) vmaxq_f32(l, r)
+#define Vmin( l, r) vminq_f32(l, r)
+
+static MECANIM_FORCE_INLINE vec4fs Vlargest(vec4f r)
+{
+ float32x2_t temp = vpmax_f32 ( vget_high_f32(r), vget_low_f32(r) );
+ temp = vpmax_f32(temp, temp);
+ return vcombine_f32(temp,temp);
+}
+
+static MECANIM_FORCE_INLINE vec4fs Vsmallest(vec4f r)
+{
+ float32x2_t temp = vpmin_f32 ( vget_high_f32(r), vget_low_f32(r) );
+ temp = vpmin_f32(temp, temp);
+ return vcombine_f32(temp,temp);
+}
+
+static MECANIM_FORCE_INLINE vec4fs Vsum(vec4f r)
+{
+ float32x2_t temp = vpadd_f32 ( vget_high_f32(r), vget_low_f32(r) );
+ temp = vpadd_f32(temp, temp);
+ return vcombine_f32(temp,temp);
+}
+
+#define Vdot( l, r) Vsum( Vmul((l), (r)) )
+
+static MECANIM_FORCE_INLINE vec4f Vrsqrt(vec4f r)
+{
+ float32x4_t e = vrsqrteq_f32(r);
+ float32x4_t s = vmulq_f32(e, r);
+ float32x4_t v = vrsqrtsq_f32(s, e);
+
+ e = vmulq_f32(e,v);
+ s = vmulq_f32(e, r);
+ v = vrsqrtsq_f32(s, e);
+
+ return vmulq_f32(e,v);
+}
+
+static MECANIM_FORCE_INLINE vec4f Vrcp(vec4f r)
+{
+ cvec4fs(C0,-3.402823466e+38f);
+ cvec4fs(C1, 3.402823466e+38f);
+
+ float32x4_t R0 = vrecpeq_f32(r);
+ R0 = vmaxq_f32(R0, C0);
+ R0 = vminq_f32(R0, C1);
+
+ float32x4_t R1 = vrecpsq_f32(r, R0);
+ R0 = vmulq_f32(R0, R1);
+ R0 = vmaxq_f32(R0, C0);
+ R0 = vminq_f32(R0, C1);
+ R1 = vrecpsq_f32(r, R0);
+ return vmulq_f32(R0, R1);
+
+ //float32x4_t inv = vrecpeq_f32(r);
+ //float32x4_t step = vrecpsq_f32(r, inv);
+ //return vmulq_f32(step, inv);
+}
+
+static MECANIM_FORCE_INLINE vec4f Vdiv(const vec4f l, const vec4f r)
+{
+ return Vmul(l, Vrcp(r));
+}
+
+static MECANIM_FORCE_INLINE vec4f Vcombine(vec4f x, vec4f y, vec4f z, vec4f w)
+{
+ float32x2x2_t temp1 = vtrn_f32(vget_high_f32(x), vget_high_f32(y));
+ float32x2x2_t temp2 = vtrn_f32(vget_high_f32(z), vget_high_f32(w));
+ return vcombine_f32(temp1.val[0], temp2.val[0]);
+}
+
+// Vector comparison
+#define Vcmpeq( a, b) vceqq_f32(a, b)
+#define Vcmpneq( a, b) Vnot(vceqq_f32(a, b))
+#define Vcmpgt( a, b) vcgtq_f32(a, b)
+#define Vcmpge( a, b) vcgeq_f32(a, b)
+#define Vcmplt( a, b) vcltq_f32(a, b)
+#define Vcmple( a, b) vcleq_f32(a, b)
+
+static MECANIM_FORCE_INLINE vec4f Vsel(vec4b c, vec4f a, vec4f b)
+{
+ return vbslq_f32(c, a, b);
+}
+
+#define Vsqrt(r) Vsel( Vcmpeq(r, Vzero()), Vzero(), Vmul(r,Vrsqrt(r)))
+
+// vector logics
+#define Vnot(r) vmvnq_u32(r)
+#define Vxnor(a, b) Vnot(veorq_u32(a, b))
+#define Vxor(a, b) veorq_u32(a, b)
+#define Vand(a, b) vandq_u32(a, b)
+#define Vor(a, b) vorrq_u32(a, b)
+
+static MECANIM_FORCE_INLINE bool Vall(const vec4b a)
+{
+ ::uint32_t u[4];
+
+ vst1q_lane_u32(&u[0], a, 0);
+ vst1q_lane_u32(&u[1], a, 1);
+ vst1q_lane_u32(&u[2], a, 2);
+ vst1q_lane_u32(&u[3], a, 3);
+
+ return (u[0] & u[1] & u[2] & u[3]);
+};
+
+static MECANIM_FORCE_INLINE bool Vany(const vec4b a)
+{
+ ::uint32_t u[4];
+
+ vst1q_lane_u32(&u[0], a, 0);
+ vst1q_lane_u32(&u[1], a, 1);
+ vst1q_lane_u32(&u[2], a, 2);
+ vst1q_lane_u32(&u[3], a, 3);
+
+ return (u[0] | u[1] | u[2] | u[3]);
+};
+
+#endif
diff --git a/Runtime/Math/Simd/ppu.h b/Runtime/Math/Simd/ppu.h
new file mode 100644
index 0000000..bfa8832
--- /dev/null
+++ b/Runtime/Math/Simd/ppu.h
@@ -0,0 +1,1944 @@
+#ifndef SIMD_PPU
+#define SIMD_PPU
+
+#include "Runtime/Math/Simd/SimdMath.h"
+#define USE_WPERMWI_EQUIVALENT 1
+typedef vec_float4 vec4f;
+typedef vec_bint4 vec4b;
+typedef vec_bint4 vec4bs;
+
+
+#if USE_WPERMWI_EQUIVALENT
+# define SWZ_MASK(x, y, z, w) (((x&3)<<6) | ((y&3)<<4) | ((z&3)<<2) | (w&3))
+#else
+# define SWZ_MASK(x, y, z, w) (((x)<<24) | ((y)<<16) | ((z)<<8) | (w))
+#endif
+
+#define cvec4f(name, x,y,z,w) static const vec4f name = {(x),(y),(z),(w)}
+#define cvec4b(name, x,y,z,w) static const vec4b name = {(x),(y),(z),(w)}
+#define cvec4fs(name, s) static const vec4f name = {(s),(s),(s),(s)}
+
+enum simd_mask
+{
+ kXYZW = SWZ_MASK(0,1,2,3),
+ kXXXX = SWZ_MASK(0,0,0,0),
+ kYYYY = SWZ_MASK(1,1,1,1),
+ kZZZZ = SWZ_MASK(2,2,2,2),
+ kWWWW = SWZ_MASK(3,3,3,3),
+
+ kXWYZ = SWZ_MASK(0,3,1,2),
+ kXZWY = SWZ_MASK(0,2,3,1),
+
+ kYZWX = SWZ_MASK(1,2,3,0),
+ kYXZW = SWZ_MASK(1,0,2,3),
+ kYWZX = SWZ_MASK(1,3,2,0),
+ kYZXW = SWZ_MASK(1,2,0,3),
+ kYXWZ = SWZ_MASK(1,0,3,2),
+
+ kZWXY = SWZ_MASK(2,3,0,1),
+ kZYXW = SWZ_MASK(2,1,0,3),
+ kZYWX = SWZ_MASK(2,1,3,0),
+ kZXYW = SWZ_MASK(2,0,1,3),
+
+ kWYZX = SWZ_MASK(3,1,2,0),
+ kWXZY = SWZ_MASK(3,0,2,1),
+ kWYXZ = SWZ_MASK(3,1,0,2),
+ kWWWZ = SWZ_MASK(3,3,3,2),
+ kWWZZ = SWZ_MASK(3,3,2,2),
+ kWZYX = SWZ_MASK(3,2,1,0),
+};
+
+#define Vzero() __vzero()
+#define Vone() vec_ctf(vec_splat_u32(1), 0)
+
+
+#if USE_WPERMWI_EQUIVALENT
+# define Vpermute(v, mask) __vpermwi2<mask>( (v) )
+#else
+# define Vpermute(v, mask) __vpermwi3( (v), (mask) )
+#endif
+
+#if USE_WPERMWI_EQUIVALENT
+
+template <const int i>
+vec_float4 __vpermwi2(vec_float4 v0a)
+{
+#if 1
+ if (i == SWZ_MASK(0,0,0,0))
+ {
+ return vec_splat( v0a, 0 );
+ }
+ else if (i == SWZ_MASK(1,0,0,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v1a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(2,0,0,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v1a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(3,0,0,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ return vec_sld( v0a, v1a, 12 );
+ }
+ else if (i == SWZ_MASK(0,1,0,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(1,1,0,0))
+ {
+ vec_float4 v1a = vec_mergeh( v0a, v0a );
+ return vec_sld( v1a, v1a, 8 );
+ }
+ else if (i == SWZ_MASK(2,1,0,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ vec_float4 v3a = vec_mergeh( v1a, v0a );
+ return vec_sld( v3a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(3,1,0,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v0a, v1a );
+ return vec_sld( v2a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(0,2,0,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_mergeh( v1a, v2a );
+ }
+ else if (i == SWZ_MASK(1,2,0,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(2,2,0,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(3,2,0,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 12 );
+ return vec_mergeh( v1a, v2a );
+ }
+ else if (i == SWZ_MASK(0,3,0,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergel( v1a, v2a );
+ }
+ else if (i == SWZ_MASK(1,3,0,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(2,3,0,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ return vec_sld( v0a, v1a, 8 );
+ }
+ else if (i == SWZ_MASK(3,3,0,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ return vec_mergel( v1a, v1a );
+ }
+ else if (i == SWZ_MASK(0,0,1,0))
+ {
+ vec_float4 v1a = vec_mergeh( v0a, v0a );
+ return vec_mergeh( v0a, v1a );
+ }
+ else if (i == SWZ_MASK(1,0,1,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_mergel( v2a, v1a );
+ }
+ else if (i == SWZ_MASK(2,0,1,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 8 );
+ return vec_sld( v2a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(3,0,1,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ return vec_sld( v1a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(0,1,1,0))
+ {
+ vec_float4 v1a = vec_mergeh( v0a, v0a );
+ return vec_sld( v1a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(1,1,1,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_sld( v1a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(2,1,1,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(3,1,1,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 8 );
+ return vec_sld( v0a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(0,2,1,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 12 );
+ return vec_mergeh( v0a, v2a );
+ }
+ else if (i == SWZ_MASK(1,2,1,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 12 );
+ return vec_sld( v2a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(2,2,1,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ vec_float4 v3a = vec_sld( v1a, v0a, 12 );
+ return vec_sld( v2a, v3a, 8 );
+ }
+ else if (i == SWZ_MASK(3,2,1,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 12 );
+ vec_float4 v3a = vec_mergel( v0a, v2a );
+ return vec_sld( v3a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(0,3,1,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ return vec_mergeh( v0a, v1a );
+ }
+ else if (i == SWZ_MASK(1,3,1,0))
+ {
+ vec_float4 v1a = vec_mergeh( v0a, v0a );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergel( v1a, v2a );
+ }
+ else if (i == SWZ_MASK(2,3,1,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v0a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(3,3,1,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 4 );
+ return vec_mergel( v2a, v1a );
+ }
+ else if (i == SWZ_MASK(0,0,2,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v1a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(1,0,2,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v2a, v1a );
+ }
+ else if (i == SWZ_MASK(2,0,2,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_mergel( v1a, v2a );
+ }
+ else if (i == SWZ_MASK(3,0,2,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v0a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(0,1,2,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ return vec_sld( v1a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(1,1,2,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v0a, v1a );
+ return vec_sld( v2a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(2,1,2,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(3,1,2,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(0,2,2,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ vec_float4 v3a = vec_sld( v1a, v0a, 8 );
+ return vec_sld( v2a, v3a, 12 );
+ }
+ else if (i == SWZ_MASK(1,2,2,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v1a, v1a );
+ return vec_sld( v2a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(2,2,2,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ return vec_sld( v1a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(3,2,2,0))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ vec_float4 v2a = vec_sld( v0a, v1a, 8 );
+ return vec_sld( v2a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(0,3,2,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v1a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(1,3,2,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 12 );
+ return vec_mergel( v2a, v1a );
+ }
+ else if (i == SWZ_MASK(2,3,2,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergel( v1a, v2a );
+ }
+ else if (i == SWZ_MASK(3,3,2,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v0a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(0,0,3,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v2a, v1a, 8 );
+ }
+ else if (i == SWZ_MASK(1,0,3,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v0a, v1a );
+ return vec_sld( v2a, v1a, 8 );
+ }
+ else if (i == SWZ_MASK(2,0,3,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ return vec_mergel( v0a, v1a );
+ }
+ else if (i == SWZ_MASK(3,0,3,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_mergel( v2a, v1a );
+ }
+ else if (i == SWZ_MASK(0,1,3,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 12 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(1,1,3,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_sld( v2a, v1a, 8 );
+ }
+ else if (i == SWZ_MASK(2,1,3,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 4 );
+ return vec_mergel( v0a, v2a );
+ }
+ else if (i == SWZ_MASK(3,1,3,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(0,2,3,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v2a, v1a, 12 );
+ }
+ else if (i == SWZ_MASK(1,2,3,0))
+ {
+ return vec_sld( v0a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(2,2,3,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v2a, v1a, 12 );
+ }
+ else if (i == SWZ_MASK(3,2,3,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ return vec_sld( v0a, v1a, 12 );
+ }
+ else if (i == SWZ_MASK(0,3,3,0))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v1a, v1a );
+ return vec_sld( v1a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(1,3,3,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 8 );
+ return vec_sld( v2a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(2,3,3,0))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ return vec_sld( v1a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(3,3,3,0))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v0a, 4 );
+ }
+ else if (i == SWZ_MASK(0,0,0,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ return vec_sld( v1a, v0a, 8 );
+ }
+ else if (i == SWZ_MASK(1,0,0,1))
+ {
+ vec_float4 v1a = vec_mergeh( v0a, v0a );
+ return vec_sld( v1a, v1a, 12 );
+ }
+ else if (i == SWZ_MASK(2,0,0,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v0a, v0a );
+ return vec_sld( v1a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(3,0,0,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ return vec_mergeh( v1a, v0a );
+ }
+ else if (i == SWZ_MASK(0,1,0,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ return vec_sld( v1a, v0a, 8 );
+ }
+ else if (i == SWZ_MASK(1,1,0,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_sld( v1a, v0a, 8 );
+ }
+ else if (i == SWZ_MASK(2,1,0,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v0a, 8 );
+ }
+ else if (i == SWZ_MASK(3,1,0,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergel( v2a, v1a );
+ }
+ else if (i == SWZ_MASK(0,2,0,1))
+ {
+ vec_float4 v1a = vec_mergeh( v0a, v0a );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_mergeh( v1a, v2a );
+ }
+ else if (i == SWZ_MASK(1,2,0,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ return vec_sld( v1a, v0a, 8 );
+ }
+ else if (i == SWZ_MASK(2,2,0,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ return vec_sld( v1a, v0a, 8 );
+ }
+ else if (i == SWZ_MASK(3,2,0,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v2a, v0a, 8 );
+ }
+ else if (i == SWZ_MASK(0,3,0,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v2a, v1a, 12 );
+ }
+ else if (i == SWZ_MASK(1,3,0,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_sld( v2a, v1a, 12 );
+ }
+ else if (i == SWZ_MASK(2,3,0,1))
+ {
+ return vec_sld( v0a, v0a, 8 );
+ }
+ else if (i == SWZ_MASK(3,3,0,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v0a, 8 );
+ }
+ else if (i == SWZ_MASK(0,0,1,1))
+ {
+ return vec_mergeh( v0a, v0a );
+ }
+ else if (i == SWZ_MASK(1,0,1,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_mergeh( v1a, v0a );
+ }
+ else if (i == SWZ_MASK(2,0,1,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v0a, v1a );
+ return vec_sld( v2a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(3,0,1,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_sld( v1a, v2a, 4 );
+ }
+ else if (i == SWZ_MASK(0,1,1,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_mergeh( v0a, v1a );
+ }
+ else if (i == SWZ_MASK(1,1,1,1))
+ {
+ return vec_splat( v0a, 1 );
+ }
+ else if (i == SWZ_MASK(2,1,1,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_sld( v1a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(3,1,1,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_sld( v0a, v1a, 12 );
+ }
+ else if (i == SWZ_MASK(0,2,1,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v1a, 4 );
+ }
+ else if (i == SWZ_MASK(1,2,1,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(2,2,1,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v2a, v1a, 8 );
+ }
+ else if (i == SWZ_MASK(3,2,1,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ vec_float4 v3a = vec_mergel( v0a, v1a );
+ return vec_sld( v3a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(0,3,1,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 12 );
+ return vec_mergeh( v0a, v2a );
+ }
+ else if (i == SWZ_MASK(1,3,1,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 4 );
+ return vec_mergel( v1a, v2a );
+ }
+ else if (i == SWZ_MASK(2,3,1,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_sld( v0a, v1a, 8 );
+ }
+ else if (i == SWZ_MASK(3,3,1,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+ else if (i == SWZ_MASK(0,0,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_mergeh( v2a, v0a );
+ }
+ else if (i == SWZ_MASK(1,0,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v1a, v0a );
+ }
+ else if (i == SWZ_MASK(2,0,2,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ return vec_mergeh( v1a, v0a );
+ }
+ else if (i == SWZ_MASK(3,0,2,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 12 );
+ return vec_mergeh( v2a, v0a );
+ }
+ else if (i == SWZ_MASK(0,1,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_sld( v1a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(1,1,2,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(2,1,2,1))
+ {
+ vec_float4 v1a = vec_mergeh( v0a, v0a );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(3,1,2,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v0a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(0,2,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ vec_float4 v3a = vec_sld( v2a, v1a, 8 );
+ return vec_sld( v1a, v3a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,2,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v1a, v1a );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(2,2,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(3,2,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,3,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ vec_float4 v3a = vec_sld( v1a, v2a, 8 );
+ return vec_sld( v3a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(1,3,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 12 );
+ return vec_mergeh( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(2,3,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v0a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(3,3,2,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ vec_float4 v3a = vec_mergel( v0a, v1a );
+ return vec_sld( v2a, v3a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,0,3,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v0a, v1a );
+ return vec_sld( v2a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,0,3,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(2,0,3,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ return vec_mergel( v0a, v1a );
+ }
+
+ else if (i == SWZ_MASK(3,0,3,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ return vec_mergeh( v1a, v0a );
+ }
+
+ else if (i == SWZ_MASK(0,1,3,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v3a = vec_mergel( v2a, v1a );
+ return vec_sld( v3a, v3a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,1,3,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 12 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(2,1,3,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_mergel( v0a, v1a );
+ }
+
+ else if (i == SWZ_MASK(3,1,3,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_mergel( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(0,2,3,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 8 );
+ return vec_sld( v1a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,2,3,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_sld( v0a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(2,2,3,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v2a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(3,2,3,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 8 );
+ return vec_sld( v0a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(0,3,3,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ vec_float4 v3a = vec_sld( v0a, v1a, 4 );
+ return vec_mergel( v2a, v3a );
+ }
+
+ else if (i == SWZ_MASK(1,3,3,1))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 12 );
+ vec_float4 v3a = vec_mergel( v1a, v0a );
+ return vec_sld( v3a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(2,3,3,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v0a, v0a );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(3,3,3,1))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(0,0,0,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(1,0,0,2))
+ {
+ vec_float4 v1a = vec_mergeh( v0a, v0a );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ vec_float4 v3a = vec_sld( v1a, v1a, 8 );
+ return vec_sld( v3a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(2,0,0,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 4 );
+ vec_float4 v3a = vec_mergeh( v0a, v1a );
+ return vec_sld( v2a, v3a, 8 );
+ }
+
+ else if (i == SWZ_MASK(3,0,0,2))
+ {
+ vec_float4 v1a = vec_mergeh( v0a, v0a );
+ vec_float4 v2a = vec_sld( v0a, v1a, 8 );
+ return vec_sld( v2a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(0,1,0,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(1,1,0,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(2,1,0,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 4 );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(3,1,0,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 12 );
+ return vec_mergel( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(0,2,0,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(1,2,0,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(2,2,0,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 4 );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(3,2,0,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(0,3,0,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,3,0,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(2,3,0,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v1a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(3,3,0,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v2a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(0,0,1,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ return vec_sld( v1a, v0a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,0,1,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_sld( v1a, v0a, 12 );
+ }
+
+ else if (i == SWZ_MASK(2,0,1,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ return vec_sld( v1a, v0a, 12 );
+ }
+
+ else if (i == SWZ_MASK(3,0,1,2))
+ {
+ return vec_sld( v0a, v0a, 12 );
+ }
+
+ else if (i == SWZ_MASK(0,1,1,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v0a, v1a );
+ }
+
+ else if (i == SWZ_MASK(1,1,1,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(2,1,1,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergel( v1a, v1a );
+ return vec_sld( v1a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(3,1,1,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergeh( v1a, v1a );
+ return vec_sld( v0a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(0,2,1,2))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ return vec_mergeh( v0a, v1a );
+ }
+
+ else if (i == SWZ_MASK(1,2,1,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_mergel( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(2,2,1,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v2a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(3,2,1,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 4 );
+ return vec_sld( v2a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,3,1,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 12 );
+ return vec_mergeh( v0a, v2a );
+ }
+
+ else if (i == SWZ_MASK(1,3,1,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(2,3,1,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ return vec_sld( v0a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(3,3,1,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v2a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,0,2,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,0,2,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v3a = vec_mergel( v1a, v0a );
+ return vec_mergeh( v2a, v3a );
+ }
+
+ else if (i == SWZ_MASK(2,0,2,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 4 );
+ return vec_sld( v2a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(3,0,2,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,1,2,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,1,2,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ return vec_mergel( v1a, v1a );
+ }
+
+ else if (i == SWZ_MASK(2,1,2,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(3,1,2,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v0a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(0,2,2,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v1a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,2,2,2))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(2,2,2,2))
+ {
+ return vec_splat( v0a, 2 );
+ }
+
+ else if (i == SWZ_MASK(3,2,2,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ return vec_sld( v0a, v1a, 12 );
+ }
+
+ else if (i == SWZ_MASK(0,3,2,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ vec_float4 v3a = vec_mergel( v1a, v0a );
+ return vec_sld( v3a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,3,2,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v2a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(2,3,2,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ return vec_sld( v0a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(3,3,2,2))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ return vec_sld( v1a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,0,3,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ vec_float4 v3a = vec_sld( v0a, v1a, 12 );
+ return vec_sld( v2a, v3a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,0,3,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ vec_float4 v3a = vec_sld( v2a, v0a, 8 );
+ return vec_mergeh( v3a, v2a );
+ }
+
+ else if (i == SWZ_MASK(2,0,3,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_mergeh( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(3,0,3,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,1,3,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 12 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,1,3,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v3a = vec_mergel( v1a, v0a );
+ return vec_mergel( v3a, v2a );
+ }
+
+ else if (i == SWZ_MASK(2,1,3,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ return vec_mergel( v0a, v1a );
+ }
+
+ else if (i == SWZ_MASK(3,1,3,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(0,2,3,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ vec_float4 v3a = vec_sld( v0a, v1a, 8 );
+ return vec_sld( v2a, v3a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,2,3,2))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ return vec_sld( v0a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(2,2,3,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ return vec_mergel( v0a, v1a );
+ }
+
+ else if (i == SWZ_MASK(3,2,3,2))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(0,3,3,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,3,3,2))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ vec_float4 v3a = vec_sld( v1a, v1a, 8 );
+ return vec_sld( v2a, v3a, 12 );
+ }
+
+ else if (i == SWZ_MASK(2,3,3,2))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ return vec_sld( v1a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(3,3,3,2))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(0,0,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(1,0,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v0a, v1a );
+ return vec_sld( v2a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(2,0,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v0a, v1a );
+ vec_float4 v3a = vec_sld( v1a, v0a, 12 );
+ return vec_mergeh( v3a, v2a );
+ }
+
+ else if (i == SWZ_MASK(3,0,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v1a, v1a );
+ return vec_sld( v2a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(0,1,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 4 );
+ return vec_sld( v2a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(1,1,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ vec_float4 v3a = vec_mergeh( v0a, v1a );
+ return vec_sld( v2a, v3a, 8 );
+ }
+
+ else if (i == SWZ_MASK(2,1,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v0a, v1a );
+ vec_float4 v3a = vec_sld( v2a, v1a, 8 );
+ return vec_sld( v1a, v3a, 12 );
+ }
+
+ else if (i == SWZ_MASK(3,1,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v0a, v1a );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(0,2,0,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ return vec_mergel( v1a, v0a );
+ }
+
+ else if (i == SWZ_MASK(1,2,0,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 4 );
+ return vec_mergel( v2a, v0a );
+ }
+
+ else if (i == SWZ_MASK(2,2,0,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 4 );
+ return vec_mergel( v2a, v0a );
+ }
+
+ else if (i == SWZ_MASK(3,2,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ return vec_mergel( v1a, v0a );
+ }
+
+ else if (i == SWZ_MASK(0,3,0,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(1,3,0,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(2,3,0,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(3,3,0,3))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(0,0,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(1,0,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ vec_float4 v3a = vec_sld( v1a, v0a, 8 );
+ return vec_sld( v3a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(2,0,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 8 );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(3,0,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(0,1,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergeh( v0a, v0a );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(1,1,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_sld( v2a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(2,1,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ vec_float4 v3a = vec_sld( v2a, v1a, 8 );
+ return vec_sld( v1a, v3a, 12 );
+ }
+
+ else if (i == SWZ_MASK(3,1,1,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ vec_float4 v3a = vec_sld( v0a, v1a, 8 );
+ return vec_sld( v3a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(0,2,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ return vec_mergel( v1a, v0a );
+ }
+
+ else if (i == SWZ_MASK(1,2,1,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ return vec_mergel( v1a, v0a );
+ }
+
+ else if (i == SWZ_MASK(2,2,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v1a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(3,2,1,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v0a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(0,3,1,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ return vec_mergeh( v0a, v1a );
+ }
+
+ else if (i == SWZ_MASK(1,3,1,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_mergel( v1a, v2a );
+ }
+
+ else if (i == SWZ_MASK(2,3,1,3))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(3,3,1,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_mergel( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(0,0,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 0 );
+ return vec_sld( v2a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,0,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_sld( v1a, v0a, 4 );
+ return vec_sld( v2a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(2,0,2,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v3a = vec_mergel( v2a, v1a );
+ return vec_sld( v3a, v3a, 4 );
+ }
+
+ else if (i == SWZ_MASK(3,0,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,1,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 1 );
+ return vec_sld( v2a, v1a, 12 );
+ }
+
+ else if (i == SWZ_MASK(2,1,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 2 );
+ return vec_sld( v2a, v1a, 12 );
+ }
+
+ else if (i == SWZ_MASK(3,1,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ return vec_sld( v0a, v1a, 12 );
+ }
+
+ else if (i == SWZ_MASK(0,2,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v0a, v0a );
+ return vec_sld( v1a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,2,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 12 );
+ return vec_mergel( v1a, v0a );
+ }
+
+ else if (i == SWZ_MASK(2,2,2,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 2 );
+ return vec_mergel( v1a, v0a );
+ }
+
+ else if (i == SWZ_MASK(3,2,2,3))
+ {
+ vec_float4 v1a = vec_mergel( v0a, v0a );
+ return vec_sld( v0a, v1a, 12 );
+ }
+
+ else if (i == SWZ_MASK(0,3,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_sld( v0a, v1a, 12 );
+ return vec_sld( v2a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,3,2,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_sld( v0a, v0a, 4 );
+ return vec_mergeh( v2a, v1a );
+ }
+
+ else if (i == SWZ_MASK(2,3,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ return vec_sld( v0a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(3,3,2,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v2a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,0,3,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 0 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,0,3,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ vec_float4 v3a = vec_sld( v1a, v0a, 4 );
+ return vec_sld( v3a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(2,0,3,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_sld( v2a, v2a, 4 );
+ }
+
+ else if (i == SWZ_MASK(3,0,3,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,1,3,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(1,1,3,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 1 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v2a, 8 );
+ }
+
+ else if (i == SWZ_MASK(2,1,3,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_mergel( v1a, v0a );
+ return vec_mergel( v0a, v2a );
+ }
+
+ else if (i == SWZ_MASK(3,1,3,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ vec_float4 v2a = vec_mergeh( v1a, v0a );
+ return vec_sld( v2a, v1a, 8 );
+ }
+
+ else if (i == SWZ_MASK(0,2,3,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_mergel( v0a, v1a );
+ return vec_sld( v1a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,2,3,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ return vec_sld( v0a, v1a, 4 );
+ }
+
+ else if (i == SWZ_MASK(2,2,3,3))
+ {
+ return vec_mergel( v0a, v0a );
+ }
+
+ else if (i == SWZ_MASK(3,2,3,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ return vec_mergel( v1a, v0a );
+ }
+
+ else if (i == SWZ_MASK(0,3,3,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 4 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v2a, 12 );
+ }
+
+ else if (i == SWZ_MASK(1,3,3,3))
+ {
+ vec_float4 v1a = vec_sld( v0a, v0a, 8 );
+ vec_float4 v2a = vec_splat( v0a, 3 );
+ return vec_sld( v1a, v2a, 12 );
+ }
+ else if (i == SWZ_MASK(2,3,3,3))
+ {
+ vec_float4 v1a = vec_splat( v0a, 3 );
+ return vec_sld( v0a, v1a, 8 );
+ }
+ else if (i == SWZ_MASK(3,3,3,3))
+ {
+ return vec_splat( v0a, 3 );
+ }
+ else if (i == SWZ_MASK(0,1,2,3))
+ {
+ return v0a;
+ }
+ return v0a;
+#endif
+}
+
+#else
+
+MECANIM_FORCE_INLINE vec4f __vpermwi3(vec4f v0a, const unsigned int mask)
+{
+
+ unsigned int w = (mask >> 0 ) & 0xff;
+ unsigned int z = (mask >> 8 ) & 0xff;
+ unsigned int y = (mask >> 16 ) & 0xff;
+ unsigned int x = (mask >> 24 ) & 0xff;
+
+ vec_uint4 wmask = V4BuildPermuteMask(x,y,z,w);
+ vec_float4 v = vec_perm(v0a, v0a, (vec_uchar16)wmask);
+ return v;
+
+}
+#endif
+
+
+
+MECANIM_FORCE_INLINE vec4f Vmove(vec4f l, vec4f r)
+ {
+ static const vec_uint4 vu32 = {0xFFFFFFFF,0,0,0};
+ vec_float4 v = __vsel(l, r, vu32);
+ return v;
+ }
+
+template<int SWZ> struct Vswizzle
+{
+ static inline vec4f rhs(vec4f r)
+ {
+ return Vpermute(r, SWZ);
+ }
+
+ static inline vec4f lhs(vec4f l, vec4f r)
+ {
+ vec4f m = Vmove(Vswizzle<SWZ>::rhs(l), r);
+ vec4f v = Vswizzle<SWZ>::rhs(m);
+ return v;
+ }
+};
+template<> struct Vswizzle<kXYZW>
+{
+ static inline vec4f rhs(vec4f r)
+ {
+ return r;
+ }
+ static inline vec4f lhs(vec4f l, vec4f r)
+ {
+ return Vmove(l, r);
+ }
+};
+
+MECANIM_FORCE_INLINE float Vstoresf(vec4f r)
+{
+ float f;
+ vec_ste(__vspltw(r, 0), 0, &f);
+ return f;
+}
+
+MECANIM_FORCE_INLINE bool Vstoresb(vec4b r)
+{
+ r=__vspltw(r, 0);
+ return !vec_all_eq((vec4f)r, Vzero());
+}
+
+// Aligned store
+#define Vstorepf(v, base, offset) __stvx((v), (base), (offset))
+
+MECANIM_FORCE_INLINE void Vstorepb(vec4b v, bool* r)
+{
+ union {
+ vec4b v;
+ int i[4];
+ } a; a.v = v;
+ r[0] = a.i[0] != 0;
+ r[1] = a.i[1] != 0;
+ r[2] = a.i[2] != 0;
+ r[3] = a.i[3] != 0;
+}
+
+MECANIM_FORCE_INLINE vec4f Vloadsf(float s)
+{
+ vec4f v = {s,s,s,s};
+ return v;
+}
+
+MECANIM_FORCE_INLINE vec4b Vloadsb(bool s)
+{
+ vec4b vTrue = (vec4b)vec_splat_u32(0xffffffff);
+
+ return s ? vTrue : (vec4b)Vzero();
+}
+
+MECANIM_FORCE_INLINE vec4f Vload4sf(float x, float y, float z, float w)
+{
+ vec4f v = {x,y,z,w};
+ return v;
+}
+
+static MECANIM_FORCE_INLINE vec4b Vload4sb(bool x, bool y, bool z, bool w)
+{
+ static const unsigned int false_true[2] = {0,~0};
+
+ vec4b v = (vec4b)(vec_uint4) { false_true[x], false_true[y], false_true[z], false_true[w] };
+ return v;
+}
+
+#define Vloadpf(v, offset) __lvx((v), (offset))
+
+#define Vadd(l, r) __vaddfp((l), (r))
+
+#define Vsub( l, r) __vsubfp((l), (r))
+
+#define Vmul( l, r) __vmulfp((l), (r))
+
+MECANIM_FORCE_INLINE vec4f Vrcp(vec4f r)
+{
+ // This function does two iterations of Newton's method! (taken from XMVector)
+ vec_float4 Reciprocal = vec_re(r);
+
+ // First refinement iteration (Newton-Raphson) for 1.0 / x
+ // y0 = reciprocal_estimate(x)
+ // y1 = y0 + y0 * (1.0 - x * y0)
+
+ vec_float4 vone = Vone();
+ vec_float4 Scale = vec_nmsub(r, Reciprocal, vone);
+ vec_float4 Result = vec_madd(Reciprocal, Scale, Reciprocal);
+
+ // Second refinement iteration
+ // y2 = y1 + y1 * (1.0 - x * y1)
+
+ Scale = vec_nmsub(r, Result, vone);
+ vec_bint4 Refine = vec_cmpeq(Result, Result);
+ Result = vec_madd(Result, Scale, Result);
+ return (vec_sel(Reciprocal, Result, Refine));
+}
+
+MECANIM_FORCE_INLINE vec4f Vdiv(vec4f l, vec4f r)
+{
+ // This function does two iterations of Newton's method!
+ return Vmul(l, Vrcp(r));
+}
+
+#define Vmadd( a, b, c) __vmaddfp((a), (b), (c))
+
+#define Vmsub( a, b, c) Vneg(__vnmsubfp((a), (b), (c)))
+
+#define Vneg(r) __vxor( (r), __vsignedzero)
+
+
+// vector sgn: return -1, 1
+#define Vsgn(r) __vor(Vone(), __vand(__vsignedzero, (r) ))
+
+// vector sgn: return -1, 0, 1
+static MECANIM_FORCE_INLINE vec4f Vsign(vec4f r)
+{
+ vec4f c = (vec4f)__vcmpeqfp(r, Vzero());
+ return __vor( __vand(vec_nor(c,c), Vone()), __vand(__vsignedzero, r ));
+}
+
+#define Vinc(r) Vadd( (r), Vone())
+#define Vdec(r) Vsub( (r), Vone())
+#define Vabs(r) __vandc((r), __vsignedzero)
+#define Vmax( l, r) __vmaxfp((l), (r))
+#define Vmin( l, r) __vminfp((l), (r))
+
+MECANIM_FORCE_INLINE vec4f Vlargest(vec4f r)
+{
+ r = Vmax(r, Vswizzle<kYZWX>::rhs(r));
+ r = Vmax(r, Vswizzle<kZWXY>::rhs(r));
+ return r;
+}
+
+MECANIM_FORCE_INLINE vec4f Vsmallest(vec4f r)
+{
+ r = Vmin(r, Vswizzle<kYZWX>::rhs(r));
+ r = Vmin(r, Vswizzle<kZWXY>::rhs(r));
+ return r;
+}
+
+MECANIM_FORCE_INLINE vec4f Vsum(vec4f r)
+{
+ r = Vadd(r, Vswizzle<kYZWX>::rhs(r) );
+ r = Vadd(r, Vswizzle<kZWXY>::rhs(r) );
+ return Vswizzle<kXXXX>::rhs(r);
+}
+
+#define Vdot( l, r) __vmsum4fp( (l), (r) )
+
+MECANIM_FORCE_INLINE vec4f Vrsqrt(vec4f r)
+{
+ static const vec4f three = {3.f,3.f,3.f,3.f};
+ static const vec4f a = {0.5f,0.5f,0.5f,0.5f};
+
+ vec4f const e = __vrsqrtefp(r);
+ return Vmul( Vmul(e, Vsub(three, Vmul( Vmul(e,e),r))), a);
+}
+
+
+#define Vsqrt(r) __vsel( Vmul(r, Vrsqrt(r)), Vzero(), __vcmpeqfp(r, Vzero()))
+
+#define Vcombine(x,y,z,w) __vmrghw(__vmrghw((x), (z)), __vmrghw((y), (w)))
+
+// Vector comparison
+#define Vcmpeq( a, b) (vec_bint4)__vcmpeqfp((a), (b))
+
+MECANIM_FORCE_INLINE vec4b Vcmpneq( vec4f a, vec4f b)
+{
+ vec4f c = (vec4f)Vcmpeq(a, b);
+ return (vec4b)__vnor(c, c);
+}
+
+#define Vcmpgt( a, b) (vec_bint4)__vcmpgtfp((a), (b))
+#define Vcmpge( a, b) (vec_bint4)__vcmpgefp((a), (b))
+#define Vcmplt( a, b) (vec_bint4)__vcmpgtfp((b), (a))
+#define Vcmple( a, b) (vec_bint4)__vcmpgefp((b), (a))
+
+#define Vsel( c, a, b) __vxor(b, __vand(__vxor(a, b), c))
+
+// vector logics
+#define Vnot(r) __vnor( (r), (r) )
+#define Vxnor( a, b) Vnot(__vxor((a), (b)))
+#define Vxor( a, b) __vxor((a), (b))
+#define Vand( a, b) __vand((a), (b))
+#define Vor( a, b) __vor((a), (b))
+
+MECANIM_FORCE_INLINE bool Vall(vec4b a)
+{
+ return vec_all_ne((vec4f)a, Vzero());
+}
+
+MECANIM_FORCE_INLINE bool Vany(vec4b a)
+{
+ // Not all words equal to 0
+ return vec_any_ne((vec4f)a, Vzero());
+}
+
+#endif
diff --git a/Runtime/Math/Simd/quaternion.h b/Runtime/Math/Simd/quaternion.h
new file mode 100644
index 0000000..ccb5077
--- /dev/null
+++ b/Runtime/Math/Simd/quaternion.h
@@ -0,0 +1,253 @@
+#ifndef SIMD_QUATERNION_H
+#define SIMD_QUATERNION_H
+
+#include "Runtime/Math/Simd/math.h"
+
+namespace math
+{
+
+static inline float4 quatIdentity()
+{
+ cvec4f(id, 0,0,0,1);
+ return float4(id);
+}
+
+static inline float4 quatConj(float4 const& q)
+{
+ cvec4f(conj, -1,-1,-1,1);
+ return float4(Vmul(q.eval(), conj));
+}
+
+static inline float4 quatMul(float4 const& a, float4 const& b)
+{
+ return quatConj(a.zxyw()*b.yzxw() - a.ywzx()*b.zywx() - a.wyxz()*b.xwyz() - a.xzwy()*b.wxzy());
+}
+
+static inline float4 quatMulVec(float4 const& q, float4 const& v1)
+{
+ const float4 v = math::vector(v1 + v1);
+ const float4 qv = q*v;
+ return q.w()*(cross(q, v) + q.w()*v) + q*(qv + qv.yzxw() + qv.zxyw()) + quatConj(v1);
+}
+
+static inline float4 quatLerp(float4 const& p, float4 const& q, float1 const& blend)
+{
+ return normalize(p + blend*(q*sgn(dot(p, q)) - p));
+}
+
+static inline float4 quatArcRotate(float4 const& a, float4 const& b)
+{
+ float4 q = cross(a, b);
+ q.w() = dot(a, b) + math::sqrt( float1(dot(a)*dot(b)) );
+ return q;
+}
+
+static inline float4 quatArcRotateX(float4 const& n)
+{
+ return float4(float1::zero(),-n.z(),n.y(),n.x()+float1::one());
+}
+
+static inline float4 quatXcos(float4 const &qn)
+{
+ const float4 qw = qn.w()*qn - float4(0, 0, 0, .5f);
+ const float4 u = qn.x()*qn + float4(1, 1, -1, -1) * qw.wzyx();
+ return u + u;
+}
+
+static inline float4 quatYcos(float4 const &qn)
+{
+ const float4 qw = qn.w()*qn - float4(0, 0, 0, .5f);
+ const float4 v = qn.y()*qn + float4(-1, 1, 1, -1)*qw.zwxy();
+ return v + v;
+}
+
+static inline float4 quatZcos(float4 const &qn)
+{
+ const float4 qw = qn.w()*qn - float4(0, 0, 0, .5f);
+ const float4 w = qn.z()*qn + float4(1, -1, 1, -1)*qw.yxwz();
+
+ return w + w;
+}
+
+static inline float4 quatEulerToQuat(float4 const& euler)
+{
+ float4 s, c; sincos( float1(0.5f)*euler, s, c);
+
+ const float4 t = float4(s.x()*c.z(), s.x()*s.z(), c.x()*s.z(), c.x()*c.z());
+
+ constant_float4( mask, -1.f, 1.f, -1.f, 1.f);
+ return c.y()*t + s.y()*mask*t.zwxy();
+}
+
+static inline float4 quatQuatToEuler(float4 const& q)
+{
+ float4 euler;
+
+ const float4 x = q.x()*q;
+ const float4 y = q.y()*q;
+ const float1 discr = x.z() - y.w();
+
+ if(discr >= float1(.5f - M_EPSF))
+ {
+ float1 _y = x.w() - y.z();
+ float1 _x = -x.z() - y.w();
+
+ euler = float4( atan2( _y.tofloat(), _x.tofloat() ), -M_PI_2f, 0.f, 0.f);
+ }
+ else
+ {
+ const float4 w = q.wwwz()*q.wwzz() - float4(.5f, 0.f, 0.f, 0.f);
+ if(discr <= float1(M_EPSF - .5f))
+ {
+ float1 _y = x.y() - w.z();
+ float1 _x = y.y() + w.x();
+ euler = float4( atan2( _y.tofloat(), _x.tofloat() ), M_PI_2f, 0.f, 0.f);
+ }
+ else
+ {
+ float1 _yX = x.w() + y.z();
+ float1 _xX = w.w() + w.x();
+ float1 discr2 = discr + discr;
+ float1 _yZ = x.y() + w.z();
+ float1 _xZ = x.x() + w.x();
+
+ euler = float4( atan2( _yX.tofloat(), _xX.tofloat() ), -asin( discr2.tofloat() ), atan2( _yZ.tofloat(), _xZ.tofloat() ), 0.f);
+ }
+ }
+ return euler;
+}
+
+// get unit quaternion from rotation matrix
+static inline float4 quatMatrixToQuat(float4 const& u, float4 const& v, float4 const& w)
+{
+ float4 q;
+ if(u.x() >= float1::zero())
+ {
+ const float1 t = v.y() + w.z();
+ if(t >= float1::zero())
+ {
+ float1 x(v.z() - w.y());
+ float1 y(w.x() - u.z());
+ float1 z(u.y() - v.x());
+ float1 ww(float1::one() + u.x() + t);
+ q = float4(x, y, z, ww);
+ // Android doesn't like this expression, it does generate the wrong assembly
+ //q = float4(v.z() - w.y(), w.x() - u.z(), u.y() - v.x(), float1::one() + u.x() + t);
+ }
+ else
+ {
+ float1 x(float1::one() + u.x() - t);
+ float1 y(u.y() + v.x());
+ float1 z(w.x() + u.z());
+ float1 ww(v.z() - w.y());
+ q = float4(x, y, z, ww);
+ // Android doesn't like this expression, it does generate the wrong assembly
+ //q = float4(float1::one() + u.x() - t, u.y() + v.x(), w.x() + u.z(), v.z() - w.y());
+ }
+ }
+ else
+ {
+ const float1 t = v.y() - w.z();
+ if(t >= float1::zero())
+ {
+ float1 x(u.y() + v.x());
+ float1 y(float1::one() - u.x() + t);
+ float1 z(v.z() + w.y());
+ float1 ww(w.x() - u.z());
+ q = float4(x, y, z, ww);
+ // Android doesn't like this expression, it does generate the wrong assembly
+ //q = float4(u.y() + v.x(), float1::one() - u.x() + t, v.z() + w.y(), w.x() - u.z());
+ }
+ else
+ {
+ float1 x(w.x() + u.z());
+ float1 y(v.z() + w.y());
+ float1 z(float1::one() - u.x() - t);
+ float1 ww(u.y() - v.x());
+ q = float4(x, y, z, ww);
+ // Android doesn't like this expression, it does generate the wrong assembly
+ //q = float4(w.x() + u.z(), v.z() + w.y(), float1::one() - u.x() - t, u.y() - v.x());
+ }
+ }
+ return normalize(q);
+}
+
+static inline float4 quatProjOnYPlane(float4 const& q)
+{
+ constant_float4(yMask, 0,1,0,0);
+ constant_float4(ywMask, 0,1,0,1);
+
+ const float4 lQAlignUp = quatArcRotate(quatYcos(q), yMask );
+
+ return normalize( quatMul(lQAlignUp,q) * ywMask);
+}
+
+static inline float4 quatClamp(const float4 &q,float maxAngle)
+{
+ float4 ret = q;
+
+ float1 halfCosMaxAnlge = float1(math::cos(0.5f*maxAngle));
+
+ float4 qn = normalize(q);
+ qn = cond(qn.w() < float1::zero(), -qn, qn);
+
+ if(qn.w() < halfCosMaxAnlge)
+ {
+ float1 fact = (halfCosMaxAnlge - qn.w()) / halfCosMaxAnlge;
+
+ ret = qn * (float1::one() - fact);
+ ret.w() = lerp(halfCosMaxAnlge, float1::one(), fact);
+ }
+
+ return ret;
+}
+
+static inline float4 quatWeight(float4 const& q, float1 const& w)
+{
+ return normalize(float4(q.x()*w,q.y()*w,q.z()*w,q.w()));
+}
+
+static inline float4 quat2Qtan(float4 const& q)
+{
+ float1 w = q.w();
+ w = cond(w == float1::zero(), float1(M_EPSF), w);
+ return math::vector(q/w);
+}
+
+static inline float4 qtan2Quat(float4 const& q)
+{
+ float4 qn = q;
+ qn.w() = float1::one();
+ return normalize(qn);
+}
+
+static inline float4 ZYRoll2Quat(float4 const& zyroll)
+{
+ return normalize(float4(zyroll.x(),zyroll.y()+zyroll.x()*zyroll.z(),zyroll.z()-zyroll.x()*zyroll.y(), float1::one()));
+}
+
+static inline float4 quat2ZYRoll(float4 const& q)
+{
+ const float4 qtan = quat2Qtan(q);
+ const float1 qtanx = qtan.x();
+ const float1 x2p1 = float1::one()+qtanx*qtanx;
+ return float4(qtanx,(qtan.y()-qtanx*qtan.z())/x2p1,(qtan.z()+qtanx*qtan.y())/x2p1,float1::zero());
+}
+
+static inline float4 RollZY2Quat(float4 const& zyroll)
+{
+ return normalize(float4(zyroll.x(),zyroll.y()-zyroll.x()*zyroll.z(),zyroll.z()+zyroll.x()*zyroll.y(),float1::one()));
+}
+
+static inline float4 quat2RollZY(float4 const& q)
+{
+ const float4 qtan = quat2Qtan(q);
+ const float1 qtanx = qtan.x();
+ const float1 x2p1 = float1::one()+qtanx*qtanx;
+ return float4(qtanx,(qtan.y()+qtanx*qtan.z())/x2p1,(qtan.z()-qtanx*qtan.y())/x2p1,float1::zero());
+}
+
+}
+
+#endif
+
diff --git a/Runtime/Math/Simd/sse.h b/Runtime/Math/Simd/sse.h
new file mode 100644
index 0000000..59796d7
--- /dev/null
+++ b/Runtime/Math/Simd/sse.h
@@ -0,0 +1,237 @@
+#ifndef SIMD_SSE_H
+#define SIMD_SSE_H
+
+#include <xmmintrin.h>
+#include <emmintrin.h>
+
+typedef __m128 vec4f; // vector 4 packed
+typedef __m128 vec4fs; // vector 4 scalar
+typedef __m128 vec4b; // vector 4 bool packed
+typedef __m128 vec4bs; // vector 4 bool scalar
+
+#define SWZ_MASK(x, y, z, w) _MM_SHUFFLE(w,z,y,x)
+
+#define cvec4f(name, x,y,z,w) static const vec4f name = {x,y,z,w}
+#define cvec4b(name, x,y,z,w) static const vec4b name = {x,y,z,w}
+#define cvec4fs(name, s) static const vec4f name = {s,s,s,s}
+
+enum simd_mask
+{
+ kXYZW = SWZ_MASK(0,1,2,3),
+ kXXXX = SWZ_MASK(0,0,0,0),
+ kYYYY = SWZ_MASK(1,1,1,1),
+ kZZZZ = SWZ_MASK(2,2,2,2),
+ kWWWW = SWZ_MASK(3,3,3,3),
+
+ kXWYZ = SWZ_MASK(0,3,1,2),
+ kXZWY = SWZ_MASK(0,2,3,1),
+
+ kYZWX = SWZ_MASK(1,2,3,0),
+ kYXZW = SWZ_MASK(1,0,2,3),
+ kYWZX = SWZ_MASK(1,3,2,0),
+ kYZXW = SWZ_MASK(1,2,0,3),
+ kYXWZ = SWZ_MASK(1,0,3,2),
+
+ kZWXY = SWZ_MASK(2,3,0,1),
+ kZYXW = SWZ_MASK(2,1,0,3),
+ kZYWX = SWZ_MASK(2,1,3,0),
+ kZXYW = SWZ_MASK(2,0,1,3),
+
+ kWYZX = SWZ_MASK(3,1,2,0),
+ kWXZY = SWZ_MASK(3,0,2,1),
+ kWYXZ = SWZ_MASK(3,1,0,2),
+ kWWWZ = SWZ_MASK(3,3,3,2),
+ kWWZZ = SWZ_MASK(3,3,2,2),
+ kWZYX = SWZ_MASK(3,2,1,0),
+};
+
+#define Vzero() _mm_setzero_ps()
+
+#define Vone() _mm_set1_ps(1.f)
+
+#define Vpermute(v, mask) _mm_shuffle_ps( (v), (v), (mask) )
+
+#define Vmove(l, r) _mm_move_ss( (l), (r) )
+
+
+template<int SWZ> struct Vswizzle
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return Vpermute(r, SWZ);
+ }
+
+ static MECANIM_FORCE_INLINE vec4f lhs(vec4f l, vec4f r)
+ {
+ vec4f m = Vmove(Vswizzle<SWZ>::rhs(l), r);
+ return Vswizzle<SWZ>::rhs(m);
+ }
+};
+
+template<> struct Vswizzle<kXYZW>
+{
+ static MECANIM_FORCE_INLINE vec4f rhs(vec4f r)
+ {
+ return r;
+ }
+ static MECANIM_FORCE_INLINE vec4f lhs(vec4f l, vec4f r)
+ {
+ return Vmove(l, r);
+ }
+};
+
+static MECANIM_FORCE_INLINE float Vstoresf(vec4f r)
+{
+ float f; _mm_store_ss(&f, r); return f;
+}
+
+#define Vstoresb(r) ( _mm_movemask_ps((r)) & 0x1<<0) != 0
+
+// Aligned store
+#define Vstorepf(v, base, offset) _mm_store_ps((base)+(offset), (v))
+
+static MECANIM_FORCE_INLINE void Vstorepb(vec4f v, bool* r)
+{
+ r[0] = ( _mm_movemask_ps(v) & 0x1<<0) != 0;
+ r[1] = ( _mm_movemask_ps(v) & 0x1<<1) != 0;
+ r[2] = ( _mm_movemask_ps(v) & 0x1<<2) != 0;
+ r[3] = ( _mm_movemask_ps(v) & 0x1<<3) != 0;
+}
+
+#define Vloadsf(s) _mm_set1_ps(s)
+
+static MECANIM_FORCE_INLINE vec4f Vloadsb(bool s)
+{
+ union {
+ int b[4];
+ vec4f v;
+ } static const false_true[2] = {
+ {0,0,0,0},
+ {~0,~0,~0,~0}
+ };
+
+ return false_true[s].v;
+}
+
+#define Vload4sf(x, y, z, w) _mm_set_ps(w, z, y, x)
+
+static MECANIM_FORCE_INLINE vec4f Vload4sb(bool x, bool y, bool z, bool w)
+{
+ union {
+ int b;
+ float f;
+ } static const false_true[2] = {
+ 0,~0
+ };
+
+ return _mm_set_ps(false_true[w].f, false_true[z].f, false_true[y].f, false_true[x].f);
+}
+
+#define Vloadpf(v, offset) _mm_load_ps( (v)+(offset))
+
+#define Vadd(l, r) _mm_add_ps((l), (r))
+
+#define Vsub( l, r) _mm_sub_ps((l), (r))
+
+#define Vmul( l, r) _mm_mul_ps((l), (r))
+
+#define Vdiv( l, r) _mm_div_ps((l), (r))
+
+#define Vmadd( a, b, c) _mm_add_ps(_mm_mul_ps((a), (b)), (c))
+
+#define Vmsub( a, b, c) _mm_sub_ps(_mm_mul_ps((a), (b)), (c))
+
+
+static MECANIM_FORCE_INLINE vec4f Vneg(vec4f r)
+{
+ static const vec4f sign_constant = {-0.f,-0.f,-0.f,-0.f};
+ return _mm_xor_ps( (r), sign_constant);
+}
+
+// vector sgn: return -1, 1
+static MECANIM_FORCE_INLINE vec4f Vsgn(vec4f r)
+{
+ static const vec4f sign_constant = {-0.f,-0.f,-0.f,-0.f};
+ return _mm_or_ps(Vone(), _mm_and_ps(sign_constant, (r) ));
+}
+
+// vector sgn: return -1, 0, 1
+static MECANIM_FORCE_INLINE vec4f Vsign(vec4f r)
+{
+ static const vec4f sign_constant = {-0.f,-0.f,-0.f,-0.f};
+ return _mm_or_ps( _mm_and_ps( _mm_cmpneq_ps(r, Vzero()), Vone()), _mm_and_ps(sign_constant, r ));
+}
+
+#define Vinc(r) Vadd( (r), Vone())
+#define Vdec(r) Vsub( (r), Vone())
+
+static MECANIM_FORCE_INLINE vec4f Vabs(vec4f r)
+{
+ static const vec4f sign_constant = {-0.f,-0.f,-0.f,-0.f};
+ return _mm_andnot_ps(sign_constant, (r));
+}
+
+#define Vmax( l, r) _mm_max_ps((l), (r))
+#define Vmin( l, r) _mm_min_ps((l), (r))
+
+static MECANIM_FORCE_INLINE vec4fs Vlargest(vec4f r)
+{
+ r = Vmax(r, Vswizzle<kYZWX>::rhs(r));
+ r = Vmax(r, Vswizzle<kZWXY>::rhs(r));
+ return r;
+}
+
+static MECANIM_FORCE_INLINE vec4fs Vsmallest(vec4f r)
+{
+ r = Vmin(r, Vswizzle<kYZWX>::rhs(r));
+ r = Vmin(r, Vswizzle<kZWXY>::rhs(r));
+ return r;
+}
+
+static MECANIM_FORCE_INLINE vec4fs Vsum(vec4f r)
+{
+ r = Vadd(r, Vswizzle<kYZWX>::rhs(r) );
+ r = Vadd(r, Vswizzle<kZWXY>::rhs(r) );
+ return Vswizzle<kXXXX>::rhs(r);
+}
+
+#define Vdot( l, r) Vsum( Vmul((l), (r)) )
+#define Vsqrt(r) _mm_sqrt_ps((r))
+
+static MECANIM_FORCE_INLINE vec4f Vrsqrt(vec4f r)
+{
+ vec4f const e = _mm_rsqrt_ps(r);
+ return Vmul(Vmul(e, Vsub(_mm_set1_ps(3.0f), Vmul(Vmul(e,e),r))), _mm_set1_ps(.5f));
+}
+
+static MECANIM_FORCE_INLINE vec4f Vrcp(vec4f r)
+{
+ vec4f e = _mm_rcp_ps( r );
+ return Vsub( Vadd(e, e), Vmul(r, Vmul(e, e)));
+}
+
+#define Vcombine(x,y,z,w) _mm_movelh_ps(_mm_unpacklo_ps( (x), (y) ), _mm_unpacklo_ps((z), (w)))
+
+// Vector comparison
+#define Vcmpeq( a, b) _mm_cmpeq_ps((a), (b))
+#define Vcmpneq( a, b) _mm_cmpneq_ps((a), (b))
+#define Vcmpgt( a, b) _mm_cmpgt_ps((a), (b))
+#define Vcmpge( a, b) _mm_cmpge_ps((a), (b))
+#define Vcmplt( a, b) _mm_cmplt_ps((a), (b))
+#define Vcmple( a, b) _mm_cmple_ps((a), (b))
+
+static MECANIM_FORCE_INLINE vec4f Vsel( vec4f c, vec4f a, vec4f b)
+{
+ return _mm_xor_ps(b, _mm_and_ps(_mm_xor_ps(a, b), c));
+}
+
+// vector logics
+#define Vnot(r) _mm_cmpeq_ps( (r), Vzero() )
+#define Vxnor( a, b) Vnot(_mm_xor_ps((a), (b)))
+#define Vxor( a, b) _mm_xor_ps((a), (b))
+#define Vand( a, b) _mm_and_ps((a), (b))
+#define Vor( a, b) _mm_or_ps((a), (b))
+#define Vall(a) (_mm_movemask_ps((a)) & 0xf) == 0xf
+#define Vany( a) _mm_movemask_ps((a)) != 0
+
+#endif
diff --git a/Runtime/Math/Simd/xenon.h b/Runtime/Math/Simd/xenon.h
new file mode 100644
index 0000000..5ddfc68
--- /dev/null
+++ b/Runtime/Math/Simd/xenon.h
@@ -0,0 +1,275 @@
+#ifndef SIMD_XENON
+#define SIMD_XENON
+
+#include <vectorintrinsics.h>
+#include <xnamath.h>
+
+typedef __vector4 vec4f; // vector 4 packed
+typedef __vector4 vec4fs; // vector 4 scalar
+typedef __vector4 vec4b; // vector 4 bool packed
+typedef __vector4 vec4bs; // vector 4 bool scalar
+
+#define SWZ_MASK(x, y, z, w) VPERMWI_CONST(x, y, z, w)
+
+#define cvec4f(name, x,y,z,w) static const vec4f name = {(x),(y),(z),(w)}
+#define cvec4b(name, x,y,z,w) static const vec4b name = {(x),(y),(z),(w)}
+#define cvec4fs(name, s) static const vec4fs name = {(s),(s),(s),(s)}
+
+enum simd_mask
+{
+ kXYZW = SWZ_MASK(0,1,2,3),
+ kXXXX = SWZ_MASK(0,0,0,0),
+ kYYYY = SWZ_MASK(1,1,1,1),
+ kZZZZ = SWZ_MASK(2,2,2,2),
+ kWWWW = SWZ_MASK(3,3,3,3),
+
+ kXWYZ = SWZ_MASK(0,3,1,2),
+ kXZWY = SWZ_MASK(0,2,3,1),
+
+ kYZWX = SWZ_MASK(1,2,3,0),
+ kYXZW = SWZ_MASK(1,0,2,3),
+ kYWZX = SWZ_MASK(1,3,2,0),
+ kYZXW = SWZ_MASK(1,2,0,3),
+ kYXWZ = SWZ_MASK(1,0,3,2),
+
+ kZWXY = SWZ_MASK(2,3,0,1),
+ kZYXW = SWZ_MASK(2,1,0,3),
+ kZYWX = SWZ_MASK(2,1,3,0),
+ kZXYW = SWZ_MASK(2,0,1,3),
+
+ kWYZX = SWZ_MASK(3,1,2,0),
+ kWXZY = SWZ_MASK(3,0,2,1),
+ kWYXZ = SWZ_MASK(3,1,0,2),
+ kWWWZ = SWZ_MASK(3,3,3,2),
+ kWWZZ = SWZ_MASK(3,3,2,2),
+ kWZYX = SWZ_MASK(3,2,1,0),
+};
+
+#define Vzero() __vzero()
+#define Vone() __vupkd3d(__vspltisw(0), VPACK_D3DCOLOR)
+
+#define Vpermute(v, mask) __vpermwi( (v), (mask) )
+
+ MECANIM_FORCE_INLINE vec4f Vmove(vec4f l, vec4f r)
+ {
+ static const XMVECTORU32 vu32 = {0xFFFFFFFF,0,0,0};
+ return __vsel(l, r, vu32.v);
+ }
+
+template<int SWZ> struct Vswizzle
+{
+ static inline vec4f rhs(vec4f r)
+ {
+ return Vpermute(r, SWZ);
+ }
+
+ static inline vec4f lhs(vec4f l, vec4f r)
+ {
+ vec4f m = Vmove(Vswizzle<SWZ>::rhs(l), r);
+ return Vswizzle<SWZ>::rhs(m);
+ }
+};
+template<> struct Vswizzle<kXYZW>
+{
+ static inline vec4f rhs(vec4f r)
+ {
+ return r;
+ }
+ static inline vec4f lhs(vec4f l, vec4f r)
+ {
+ return Vmove(l, r);
+ }
+};
+
+MECANIM_FORCE_INLINE float Vstoresf(vec4f r)
+{
+ float f; __stvewx(__vspltw(r, 0), &f, 0); return f;
+}
+
+MECANIM_FORCE_INLINE bool Vstoresb(vec4f r)
+{
+ union {
+ vec4f v;
+ int i[4];
+ } a; a.v = r;
+ return a.i[0] != 0;
+}
+
+// Aligned store
+#define Vstorepf(v, base, offset) __stvx((v), (base), (offset))
+
+MECANIM_FORCE_INLINE void Vstorepb(vec4f v, bool* r)
+{
+ union {
+ vec4f v;
+ int i[4];
+ } a; a.v = v;
+ r[0] = a.i[0] != 0;
+ r[1] = a.i[1] != 0;
+ r[2] = a.i[2] != 0;
+ r[3] = a.i[3] != 0;
+}
+
+MECANIM_FORCE_INLINE vec4f Vloadsf(float s)
+{
+ vec4f v = {s,s,s,s};
+ return v;
+}
+
+MECANIM_FORCE_INLINE vec4f Vloadsb(bool s)
+{
+ union {
+ int b[4];
+ vec4f v;
+ } static const false_true[2] = {
+ {0,0,0,0},
+ {~0,~0,~0,~0}
+ };
+
+ return false_true[s].v;
+}
+
+MECANIM_FORCE_INLINE vec4f Vload4sf(float x, float y, float z, float w)
+{
+ vec4f v = {x,y,z,w};
+ return v;
+}
+
+static MECANIM_FORCE_INLINE vec4f Vload4sb(bool x, bool y, bool z, bool w)
+{
+ union {
+ int b;
+ float f;
+ } static const false_true[2] = {
+ 0,~0
+ };
+
+ vec4f v = {false_true[x].f,false_true[y].f,false_true[z].f,false_true[w].f};
+ return v;
+}
+
+#define Vloadpf(v, offset) __lvx( (v), (offset))
+
+#define Vadd(l, r) __vaddfp((l), (r))
+
+#define Vsub( l, r) __vsubfp((l), (r))
+
+#define Vmul( l, r) __vmulfp((l), (r))
+
+MECANIM_FORCE_INLINE vec4f Vrcp(vec4f r)
+{
+ // This function does two iterations of Newton's method!
+ return XMVectorReciprocal(r);
+}
+
+MECANIM_FORCE_INLINE vec4f Vdiv(vec4f l, vec4f r)
+{
+ // This function does two iterations of Newton's method!
+ return XMVectorDivide(l, r);
+}
+
+#define Vmadd( a, b, c) __vmaddfp((a), (b), (c))
+
+#define Vmsub( a, b, c) Vneg(__vnmsubfp((a), (b), (c)))
+
+static const vec4f sign_constant = {-0.f,-0.f,-0.f,-0.f};
+
+#define Vneg(r) __vxor( (r), sign_constant)
+
+
+// vector sgn: return -1, 1
+#define Vsgn(r) __vor(Vone(), __vand(sign_constant, (r) ))
+
+// vector sgn: return -1, 0, 1
+static MECANIM_FORCE_INLINE vec4f Vsign(vec4f r)
+{
+ vec4f c = __vcmpeqfp(r, Vzero());
+ return __vor( __vand(__vnor(c,c), Vone()), __vand(sign_constant, r ));
+}
+
+#define Vinc(r) Vadd( (r), Vone())
+#define Vdec(r) Vsub( (r), Vone())
+#define Vabs(r) __vandc((r), sign_constant)
+#define Vmax( l, r) __vmaxfp((l), (r))
+#define Vmin( l, r) __vminfp((l), (r))
+
+MECANIM_FORCE_INLINE vec4fs Vlargest(vec4f r)
+{
+ r = Vmax(r, Vswizzle<kYZWX>::rhs(r));
+ r = Vmax(r, Vswizzle<kZWXY>::rhs(r));
+ return r;
+}
+
+MECANIM_FORCE_INLINE vec4fs Vsmallest(vec4f r)
+{
+ r = Vmin(r, Vswizzle<kYZWX>::rhs(r));
+ r = Vmin(r, Vswizzle<kZWXY>::rhs(r));
+ return r;
+}
+
+MECANIM_FORCE_INLINE vec4fs Vsum(vec4f r)
+{
+ r = Vadd(r, Vswizzle<kYZWX>::rhs(r) );
+ r = Vadd(r, Vswizzle<kZWXY>::rhs(r) );
+ return Vswizzle<kXXXX>::rhs(r);
+}
+
+#define Vdot( l, r) __vmsum4fp( (l), (r) )
+
+MECANIM_FORCE_INLINE vec4f Vrsqrt(vec4f r)
+{
+ static const vec4f three = {3.f,3.f,3.f,3.f};
+ static const vec4f a = {0.5f,0.5f,0.5f,0.5f};
+
+ vec4f const e = __vrsqrtefp(r);
+ return Vmul( Vmul(e, Vsub(three, Vmul( Vmul(e,e),r))), a);
+}
+
+
+#define Vsqrt(r) __vsel( Vmul(r, Vrsqrt(r)), Vzero(), __vcmpeqfp(r, Vzero()))
+
+#define Vcombine(x,y,z,w) __vmrghw(__vmrghw((x), (z)), __vmrghw((y), (w)))
+
+// Vector comparison
+#define Vcmpeq( a, b) __vcmpeqfp((a), (b))
+
+MECANIM_FORCE_INLINE vec4f Vcmpneq( vec4f a, vec4f b)
+{
+ vec4f c = __vcmpeqfp(a, b);
+ return __vnor(c, c);
+}
+
+#define Vcmpgt( a, b) __vcmpgtfp((a), (b))
+#define Vcmpge( a, b) __vcmpgefp((a), (b))
+#define Vcmplt( a, b) __vcmpgtfp((b), (a))
+#define Vcmple( a, b) __vcmpgefp((b), (a))
+
+#define Vsel( c, a, b) __vxor(b, __vand(__vxor(a, b), c))
+
+// vector logics
+#define Vnot(r) __vnor( (r), (r) )
+#define Vxnor( a, b) Vnot(__vxor((a), (b)))
+#define Vxor( a, b) __vxor((a), (b))
+#define Vand( a, b) __vand((a), (b))
+#define Vor( a, b) __vor((a), (b))
+
+MECANIM_FORCE_INLINE bool Vall(vec4b a)
+{
+ // All words equal
+ static const XMVECTORU32 vu32 = {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
+ const int All = 0x00000080;
+ unsigned int compareResult;
+ __vcmpequwR(a, vu32.v, &compareResult);
+ return (compareResult & All) == All;
+}
+
+MECANIM_FORCE_INLINE bool Vany(vec4b a)
+{
+ // Not all words equal to 0
+ const int All = 0x00000080;
+ unsigned int compareResult;
+ __vcmpequwR(a, Vzero(), &compareResult);
+ return (compareResult & All) == 0;
+}
+
+#endif
diff --git a/Runtime/Math/Simd/xform.h b/Runtime/Math/Simd/xform.h
new file mode 100644
index 0000000..0bfbc13
--- /dev/null
+++ b/Runtime/Math/Simd/xform.h
@@ -0,0 +1,153 @@
+#ifndef SIMD_XFORM_H
+#define SIMD_XFORM_H
+
+#include "Runtime/Serialize/TransferFunctions/SerializeTransfer.h"
+
+#include "Runtime/Math/Simd/float4.h"
+#include "Runtime/Math/Simd/quaternion.h"
+
+namespace math
+{
+
+struct ATTRIBUTE_ALIGN(ALIGN4F) xform
+{
+ DEFINE_GET_TYPESTRING(xform)
+
+ inline xform():t(float4::zero()),q(quatIdentity()),s(float4::one()){}
+
+ inline xform( xform const& x)
+ { t = x.t; q = x.q; s = x.s; }
+
+ inline xform &operator=(xform const& x)
+ { t = x.t; q = x.q; s = x.s; return *this; }
+
+ inline xform(float4 const& t, float4 const& q, float4 const& s)
+ { this->t = t; this->q = q; this->s = s; }
+
+ float4 t;
+ float4 q;
+ float4 s;
+
+ template<class TransferFunction>
+ inline void Transfer (TransferFunction& transfer)
+ {
+ TRANSFER(t);
+ TRANSFER(q);
+ TRANSFER(s);
+ }
+};
+
+static inline xform xformIdentity()
+{
+ return xform( float4::zero(), quatIdentity(), float4::one() );
+}
+
+static inline float4 xformMulVec(xform const& x, float4 const& v)
+{
+ return x.t + quatMulVec(x.q, v * x.s );
+}
+
+static inline float4 xformInvMulVec(xform const& x, float4 const& v)
+{
+ return quatMulVec(quatConj(x.q), v - x.t) / x.s;
+}
+
+static inline float4 xformInvMulVecNS(xform const& x, float4 const& v)
+{
+ return quatMulVec(quatConj(x.q), v - x.t);
+}
+
+static inline xform xformMul(xform const& a, xform const& b)
+{
+ return xform( xformMulVec(a, b.t), normalize(quatMul(a.q, b.q)), a.s * b.s);
+}
+
+static inline xform xformInvMul(xform const& a, xform const& b)
+{
+ return xform(xformInvMulVec(a, b.t), normalize( quatMul( quatConj(a.q), b.q)), b.s / a.s);
+}
+
+static inline xform xformInvMulNS(xform const& a, xform const& b)
+{
+ return xform(xformInvMulVecNS(a, b.t), normalize( quatMul( quatConj(a.q), b.q)), float4::one());
+}
+
+static inline xform xformMulInv(xform const& a, xform const& b)
+{
+ const float4 qinv = quatConj(b.q);
+ const float4 sinv = rcp(b.s);
+
+ return xform(xformMulVec(a, quatMulVec( qinv, -b.t) * sinv),normalize( quatMul( a.q, qinv)), a.s * sinv);
+}
+
+static inline xform xformBlend(xform const &a, xform const &b, float1 const& w)
+{
+ return xform(lerp(a.t, b.t, w),quatLerp(a.q, b.q, w),scaleBlend(a.s, b.s, w));
+}
+
+static inline bool operator==(xform const& l, xform const& r)
+{
+ return all(l.t == r.t) && all(l.q == r.q) && all(l.s == r.s);
+}
+
+static inline xform xformWeight(xform const& x, float1 const& w)
+{
+ return xform(x.t*w,quatWeight(x.q,w),scaleWeight(x.s,w));
+}
+
+static inline xform xformAdd(xform const& a, xform const& b)
+{
+ return xform(a.t+b.t,qtan2Quat(quat2Qtan(a.q)+quat2Qtan(b.q)),a.s*b.s);
+}
+
+static inline xform xformSub(xform const& a, xform const& b)
+{
+ return xform(a.t-b.t,qtan2Quat(quat2Qtan(a.q)-quat2Qtan(b.q)),a.s/b.s);
+}
+
+static inline xform xformBlend(xform const* apXFormArray, float const* apWeightArray, unsigned int aCount)
+{
+ xform ret;
+
+ ret.t = float4::zero();
+ ret.q = float4::zero();
+ ret.s = float4::one();
+
+ float sumW = 0;
+
+ unsigned int i;
+ for(i = 0; i < aCount ; i++)
+ {
+ float w = apWeightArray[i];
+ math::float1 w1(w);
+ sumW += w;
+
+ ret.t += apXFormArray[i].t*w1;
+ ret.q += cond(dot(ret.q,apXFormArray[i].q) < float1::zero(),apXFormArray[i].q * -w1,apXFormArray[i].q * w1);
+ ret.s *= scaleWeight(apXFormArray[i].s,w1);
+ }
+
+ float4 q(0,0,0,saturate(1.0f-sumW));
+ ret.q = normalize(ret.q+q);
+
+ return ret;
+}
+
+static inline xform mirror(xform const& x)
+{
+ constant_float4(mirrorQ,1,-1,-1,1);
+
+ xform ret = x;
+ ret.t = mirror(ret.t);
+ ret.q *= mirrorQ;
+ return ret;
+}
+
+static inline xform cond(bool c, xform const& a, xform const& b)
+{
+ return c?a:b;
+}
+
+}
+
+#endif