17 #ifndef BT_SIMD__QUATERNION_H_    18 #define BT_SIMD__QUATERNION_H_    25 #ifdef BT_USE_DOUBLE_PRECISION    26 #define btQuaternionData btQuaternionDoubleData    27 #define btQuaternionDataName "btQuaternionDoubleData"    29 #define btQuaternionData btQuaternionFloatData    30 #define btQuaternionDataName "btQuaternionFloatData"    31 #endif //BT_USE_DOUBLE_PRECISION    38 #define vOnes (_mm_set_ps(1.0f, 1.0f, 1.0f, 1.0f))    42 #if defined(BT_USE_SSE)     44 #define vQInv (_mm_set_ps(+0.0f, -0.0f, -0.0f, -0.0f))    45 #define vPPPM (_mm_set_ps(-0.0f, +0.0f, +0.0f, +0.0f))    47 #elif defined(BT_USE_NEON)    60 #if (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE))|| defined(BT_USE_NEON)     70                 mVec128 = rhs.mVec128;
   103 #ifndef BT_EULER_DEFAULT_ZYX   135                 setValue(cosRoll * sinPitch * cosYaw + sinRoll * cosPitch * sinYaw,
   136                         cosRoll * cosPitch * sinYaw - sinRoll * sinPitch * cosYaw,
   137                         sinRoll * cosPitch * cosYaw - cosRoll * sinPitch * sinYaw,
   138                         cosRoll * cosPitch * cosYaw + sinRoll * sinPitch * sinYaw);
   155                 setValue(sinRoll * cosPitch * cosYaw - cosRoll * sinPitch * sinYaw, 
   156                          cosRoll * sinPitch * cosYaw + sinRoll * cosPitch * sinYaw, 
   157                          cosRoll * cosPitch * sinYaw - sinRoll * sinPitch * cosYaw, 
   158                          cosRoll * cosPitch * cosYaw + sinRoll * sinPitch * sinYaw); 
   164 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   165                 mVec128 = _mm_add_ps(mVec128, q.mVec128);
   166 #elif defined(BT_USE_NEON)   167                 mVec128 = vaddq_f32(mVec128, q.mVec128);
   181 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   182                 mVec128 = _mm_sub_ps(mVec128, q.mVec128);
   183 #elif defined(BT_USE_NEON)   184                 mVec128 = vsubq_f32(mVec128, q.mVec128);
   198 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   199                 __m128  vs = _mm_load_ss(&s);   
   200                 vs = bt_pshufd_ps(vs, 0);       
   201                 mVec128 = _mm_mul_ps(mVec128, vs);
   202 #elif defined(BT_USE_NEON)   203                 mVec128 = vmulq_n_f32(mVec128, s);
   218 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   219                 __m128 vQ2 = q.get128();
   221                 __m128 A1 = bt_pshufd_ps(mVec128, BT_SHUFFLE(0,1,2,0));
   222                 __m128 B1 = bt_pshufd_ps(vQ2, BT_SHUFFLE(3,3,3,0));
   226                 __m128 A2 = bt_pshufd_ps(mVec128, BT_SHUFFLE(1,2,0,1));
   227                 __m128 B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(2,0,1,1));
   231                 B1 = bt_pshufd_ps(mVec128, BT_SHUFFLE(2,0,1,2));
   232                 B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(1,2,0,2));
   236                 mVec128 = bt_splat_ps(mVec128, 3);      
   237                 mVec128 = mVec128 * vQ2;        
   240                 mVec128 = mVec128 - B1; 
   241                 A1 = _mm_xor_ps(A1, vPPPM);     
   242                 mVec128 = mVec128+ A1;  
   244 #elif defined(BT_USE_NEON)        246         float32x4_t vQ1 = mVec128;
   247         float32x4_t vQ2 = q.get128();
   248         float32x4_t A0, A1, B1, A2, B2, A3, B3;
   249         float32x2_t vQ1zx, vQ2wx, vQ1yz, vQ2zx, vQ2yz, vQ2xz;
   253         tmp = vtrn_f32( vget_high_f32(vQ1), vget_low_f32(vQ1) );       
   256         tmp = vtrn_f32( vget_high_f32(vQ2), vget_low_f32(vQ2) );       
   259         vQ2wx = vext_f32(vget_high_f32(vQ2), vget_low_f32(vQ2), 1); 
   261         vQ1yz = vext_f32(vget_low_f32(vQ1), vget_high_f32(vQ1), 1);
   263         vQ2yz = vext_f32(vget_low_f32(vQ2), vget_high_f32(vQ2), 1);
   264         vQ2xz = vext_f32(vQ2zx, vQ2zx, 1);
   266         A1 = vcombine_f32(vget_low_f32(vQ1), vQ1zx);                    
   267         B1 = vcombine_f32(vdup_lane_f32(vget_high_f32(vQ2), 1), vQ2wx); 
   269         A2 = vcombine_f32(vQ1yz, vget_low_f32(vQ1));
   270         B2 = vcombine_f32(vQ2zx, vdup_lane_f32(vget_low_f32(vQ2), 1));
   272         A3 = vcombine_f32(vQ1zx, vQ1yz);        
   273         B3 = vcombine_f32(vQ2yz, vQ2xz);        
   275         A1 = vmulq_f32(A1, B1);
   276         A2 = vmulq_f32(A2, B2);
   277         A3 = vmulq_f32(A3, B3); 
   278         A0 = vmulq_lane_f32(vQ2, vget_high_f32(vQ1), 1); 
   280         A1 = vaddq_f32(A1, A2); 
   281         A0 = vsubq_f32(A0, A3); 
   284         A1 = (btSimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);  
   285         A0 = vaddq_f32(A0, A1); 
   301 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   304                 vd = _mm_mul_ps(mVec128, q.mVec128);
   306         __m128 t = _mm_movehl_ps(vd, vd);
   307                 vd = _mm_add_ps(vd, t);
   308                 t = _mm_shuffle_ps(vd, vd, 0x55);
   309                 vd = _mm_add_ss(vd, t);
   311         return _mm_cvtss_f32(vd);
   312 #elif defined(BT_USE_NEON)   313                 float32x4_t vd = vmulq_f32(mVec128, q.mVec128);
   314                 float32x2_t 
x = vpadd_f32(vget_low_f32(vd), vget_high_f32(vd));  
   316                 return vget_lane_f32(x, 0);
   341 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   344                 vd = _mm_mul_ps(mVec128, mVec128);
   346         __m128 t = _mm_movehl_ps(vd, vd);
   347                 vd = _mm_add_ps(vd, t);
   348                 t = _mm_shuffle_ps(vd, vd, 0x55);
   349                 vd = _mm_add_ss(vd, t);
   351                 vd = _mm_sqrt_ss(vd);
   352                 vd = _mm_div_ss(vOnes, vd);
   353         vd = bt_pshufd_ps(vd, 0); 
   354                 mVec128 = _mm_mul_ps(mVec128, vd);
   367 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   368                 __m128  vs = _mm_load_ss(&s);   
   369                 vs = bt_pshufd_ps(vs, 0x00);    
   372 #elif defined(BT_USE_NEON)   449                 return btVector3(m_floats[0] * s, m_floats[1] * s, m_floats[2] * s);
   455 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   457 #elif defined(BT_USE_NEON)   458         return btQuaternion((btSimdFloat4)veorq_s32((int32x4_t)mVec128, (int32x4_t)vQInv));
   469 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   471 #elif defined(BT_USE_NEON)   484 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   486 #elif defined(BT_USE_NEON)   498 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   500 #elif defined(BT_USE_NEON)   501                 return btQuaternion((btSimdFloat4)veorq_s32((int32x4_t)mVec128, (int32x4_t)btvMzeroMask) );
   513                 if( diff.
dot(diff) > sum.
dot(sum) )
   524                 if( diff.
dot(diff) < sum.
dot(sum) )
   592 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   593         __m128 vQ1 = q1.get128();
   594         __m128 vQ2 = q2.get128();
   595         __m128 A0, A1, B1, A2, B2;
   597         A1 = bt_pshufd_ps(vQ1, BT_SHUFFLE(0,1,2,0)); 
   598         B1 = bt_pshufd_ps(vQ2, BT_SHUFFLE(3,3,3,0)); 
   602         A2 = bt_pshufd_ps(vQ1, BT_SHUFFLE(1,2,0,1)); 
   603         B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(2,0,1,1)); 
   607         B1 = bt_pshufd_ps(vQ1, BT_SHUFFLE(2,0,1,2)); 
   608         B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(1,2,0,2)); 
   612         A0 = bt_splat_ps(vQ1, 3);       
   618     A1 = _mm_xor_ps(A1, vPPPM); 
   623 #elif defined(BT_USE_NEON)        625         float32x4_t vQ1 = q1.get128();
   626         float32x4_t vQ2 = q2.get128();
   627         float32x4_t A0, A1, B1, A2, B2, A3, B3;
   628     float32x2_t vQ1zx, vQ2wx, vQ1yz, vQ2zx, vQ2yz, vQ2xz;
   632     tmp = vtrn_f32( vget_high_f32(vQ1), vget_low_f32(vQ1) );       
   635     tmp = vtrn_f32( vget_high_f32(vQ2), vget_low_f32(vQ2) );       
   638     vQ2wx = vext_f32(vget_high_f32(vQ2), vget_low_f32(vQ2), 1); 
   640     vQ1yz = vext_f32(vget_low_f32(vQ1), vget_high_f32(vQ1), 1);
   642     vQ2yz = vext_f32(vget_low_f32(vQ2), vget_high_f32(vQ2), 1);
   643     vQ2xz = vext_f32(vQ2zx, vQ2zx, 1);
   645     A1 = vcombine_f32(vget_low_f32(vQ1), vQ1zx);                    
   646     B1 = vcombine_f32(vdup_lane_f32(vget_high_f32(vQ2), 1), vQ2wx); 
   648         A2 = vcombine_f32(vQ1yz, vget_low_f32(vQ1));
   649     B2 = vcombine_f32(vQ2zx, vdup_lane_f32(vget_low_f32(vQ2), 1));
   651     A3 = vcombine_f32(vQ1zx, vQ1yz);        
   652     B3 = vcombine_f32(vQ2yz, vQ2xz);        
   654         A1 = vmulq_f32(A1, B1);
   655         A2 = vmulq_f32(A2, B2);
   656         A3 = vmulq_f32(A3, B3); 
   657         A0 = vmulq_lane_f32(vQ2, vget_high_f32(vQ1), 1); 
   659         A1 = vaddq_f32(A1, A2); 
   660         A0 = vsubq_f32(A0, A3); 
   663     A1 = (btSimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);      
   664         A0 = vaddq_f32(A0, A1); 
   670         q1.
w() * q2.
x() + q1.
x() * q2.
w() + q1.
y() * q2.
z() - q1.
z() * q2.
y(),
   671                 q1.
w() * q2.
y() + q1.
y() * q2.
w() + q1.
z() * q2.
x() - q1.
x() * q2.
z(),
   672                 q1.
w() * q2.
z() + q1.
z() * q2.
w() + q1.
x() * q2.
y() - q1.
y() * q2.
x(),
   673                 q1.
w() * q2.
w() - q1.
x() * q2.
x() - q1.
y() * q2.
y() - q1.
z() * q2.
z()); 
   680 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   681         __m128 vQ1 = q.get128();
   682         __m128 vQ2 = w.get128();
   683         __m128 A1, B1, A2, B2, A3, B3;
   685         A1 = bt_pshufd_ps(vQ1, BT_SHUFFLE(3,3,3,0));
   686         B1 = bt_pshufd_ps(vQ2, BT_SHUFFLE(0,1,2,0));
   690         A2 = bt_pshufd_ps(vQ1, BT_SHUFFLE(1,2,0,1));
   691         B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(2,0,1,1));
   695         A3 = bt_pshufd_ps(vQ1, BT_SHUFFLE(2,0,1,2));
   696         B3 = bt_pshufd_ps(vQ2, BT_SHUFFLE(1,2,0,2));
   701         A1 = _mm_xor_ps(A1, vPPPM);     
   706 #elif defined(BT_USE_NEON)        708         float32x4_t vQ1 = q.get128();
   709         float32x4_t vQ2 = w.get128();
   710         float32x4_t A1, B1, A2, B2, A3, B3;
   711     float32x2_t vQ1wx, vQ2zx, vQ1yz, vQ2yz, vQ1zx, vQ2xz;
   713     vQ1wx = vext_f32(vget_high_f32(vQ1), vget_low_f32(vQ1), 1); 
   717     tmp = vtrn_f32( vget_high_f32(vQ2), vget_low_f32(vQ2) );       
   720     tmp = vtrn_f32( vget_high_f32(vQ1), vget_low_f32(vQ1) );       
   724     vQ1yz = vext_f32(vget_low_f32(vQ1), vget_high_f32(vQ1), 1);
   726     vQ2yz = vext_f32(vget_low_f32(vQ2), vget_high_f32(vQ2), 1);
   727     vQ2xz = vext_f32(vQ2zx, vQ2zx, 1);
   729     A1 = vcombine_f32(vdup_lane_f32(vget_high_f32(vQ1), 1), vQ1wx); 
   730     B1 = vcombine_f32(vget_low_f32(vQ2), vQ2zx);                    
   732         A2 = vcombine_f32(vQ1yz, vget_low_f32(vQ1));
   733     B2 = vcombine_f32(vQ2zx, vdup_lane_f32(vget_low_f32(vQ2), 1));
   735     A3 = vcombine_f32(vQ1zx, vQ1yz);        
   736     B3 = vcombine_f32(vQ2yz, vQ2xz);        
   738         A1 = vmulq_f32(A1, B1);
   739         A2 = vmulq_f32(A2, B2);
   740         A3 = vmulq_f32(A3, B3); 
   742         A1 = vaddq_f32(A1, A2); 
   745     A1 = (btSimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);      
   747     A1 = vsubq_f32(A1, A3);     
   753          q.
w() * w.
x() + q.
y() * w.
z() - q.
z() * w.
y(),
   754                  q.
w() * w.
y() + q.
z() * w.
x() - q.
x() * w.
z(),
   755                  q.
w() * w.
z() + q.
x() * w.
y() - q.
y() * w.
x(),
   756                 -q.
x() * w.
x() - q.
y() * w.
y() - q.
z() * w.
z()); 
   763 #if defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   764         __m128 vQ1 = w.get128();
   765         __m128 vQ2 = q.get128();
   766         __m128 A1, B1, A2, B2, A3, B3;
   768         A1 = bt_pshufd_ps(vQ1, BT_SHUFFLE(0,1,2,0));  
   769         B1 = bt_pshufd_ps(vQ2, BT_SHUFFLE(3,3,3,0));  
   773         A2 = bt_pshufd_ps(vQ1, BT_SHUFFLE(1,2,0,1));
   774         B2 = bt_pshufd_ps(vQ2, BT_SHUFFLE(2,0,1,1));
   778         A3 = bt_pshufd_ps(vQ1, BT_SHUFFLE(2,0,1,2));
   779         B3 = bt_pshufd_ps(vQ2, BT_SHUFFLE(1,2,0,2));
   784         A1 = _mm_xor_ps(A1, vPPPM);     
   789 #elif defined(BT_USE_NEON)        791         float32x4_t vQ1 = w.get128();
   792         float32x4_t vQ2 = q.get128();
   793         float32x4_t  A1, B1, A2, B2, A3, B3;
   794     float32x2_t vQ1zx, vQ2wx, vQ1yz, vQ2zx, vQ2yz, vQ2xz;
   799     tmp = vtrn_f32( vget_high_f32(vQ1), vget_low_f32(vQ1) );       
   802     tmp = vtrn_f32( vget_high_f32(vQ2), vget_low_f32(vQ2) );       
   805     vQ2wx = vext_f32(vget_high_f32(vQ2), vget_low_f32(vQ2), 1); 
   807     vQ1yz = vext_f32(vget_low_f32(vQ1), vget_high_f32(vQ1), 1);
   809     vQ2yz = vext_f32(vget_low_f32(vQ2), vget_high_f32(vQ2), 1);
   810     vQ2xz = vext_f32(vQ2zx, vQ2zx, 1);
   812     A1 = vcombine_f32(vget_low_f32(vQ1), vQ1zx);                    
   813     B1 = vcombine_f32(vdup_lane_f32(vget_high_f32(vQ2), 1), vQ2wx); 
   815         A2 = vcombine_f32(vQ1yz, vget_low_f32(vQ1));
   816     B2 = vcombine_f32(vQ2zx, vdup_lane_f32(vget_low_f32(vQ2), 1));
   818     A3 = vcombine_f32(vQ1zx, vQ1yz);        
   819     B3 = vcombine_f32(vQ2yz, vQ2xz);        
   821         A1 = vmulq_f32(A1, B1);
   822         A2 = vmulq_f32(A2, B2);
   823         A3 = vmulq_f32(A3, B3); 
   825         A1 = vaddq_f32(A1, A2); 
   828     A1 = (btSimdFloat4)veorq_s32((int32x4_t)A1, (int32x4_t)vPPPM);      
   830     A1 = vsubq_f32(A1, A3);     
   836         +w.
x() * q.
w() + w.
y() * q.
z() - w.
z() * q.
y(),
   837                 +w.
y() * q.
w() + w.
z() * q.
x() - w.
x() * q.
z(),
   838                 +w.
z() * q.
w() + w.
x() * q.
y() - w.
y() * q.
x(),
   839                 -w.
x() * q.
x() - w.
y() * q.
y() - w.
z() * q.
z()); 
   880         return q1.
slerp(q2, t);
   888 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE)   889         return btVector3(_mm_and_ps(q.get128(), btvFFF0fMask));
   890 #elif defined(BT_USE_NEON)   891     return btVector3((float32x4_t)vandq_s32((int32x4_t)q.get128(), btvFFF0Mask));
   941         for (
int i=0;i<4;i++)
   947         for (
int i=0;i<4;i++)
   955         for (
int i=0;i<4;i++)
   961         for (
int i=0;i<4;i++)
   969         for (
int i=0;i<4;i++)
   975         for (
int i=0;i<4;i++)
   980 #endif //BT_SIMD__QUATERNION_H_ static T sum(const btAlignedObjectArray< T > &items)
btScalar getAngle() const 
Return the angle of rotation represented by this quaternion. 
btQuaternion & operator*=(const btQuaternion &q)
Multiply this quaternion by q on the right. 
btQuaternion farthest(const btQuaternion &qd) const 
btQuaternion(const btScalar &yaw, const btScalar &pitch, const btScalar &roll)
Constructor from Euler angles. 
void serialize(struct btQuaternionData &dataOut) const 
void setEulerZYX(const btScalar &yaw, const btScalar &pitch, const btScalar &roll)
Set the quaternion using euler angles. 
btScalar btSin(btScalar x)
static const btQuaternion & getIdentity()
const btScalar & z() const 
Return the z value. 
void serializeDouble(struct btQuaternionDoubleData &dataOut) const 
void deSerializeDouble(const struct btQuaternionDoubleData &dataIn)
void deSerializeFloat(const struct btQuaternionFloatData &dataIn)
void btPlaneSpace1(const T &n, T &p, T &q)
btScalar btSqrt(btScalar y)
btScalar dot(const btQuaternion &q) const 
Return the dot product between this quaternion and another. 
const btScalar & getW() const 
#define SIMD_FORCE_INLINE
btQuaternion & operator/=(const btScalar &s)
Inversely scale this quaternion. 
btQuaternion operator-(const btQuaternion &q2) const 
Return the difference between this quaternion and the other. 
btQuaternion & operator-=(const btQuaternion &q)
Subtract out a quaternion. 
const btScalar & y() const 
Return the y value. 
btVector3 getAxis() const 
Return the axis of the rotation represented by this quaternion. 
btQuaternion operator-() const 
Return the negative of this quaternion This simply negates each element. 
const btScalar & getY() const 
Return the y value. 
const btScalar & getX() const 
Return the x value. 
const btScalar & w() const 
Return the w value. 
btScalar dot(const btVector3 &v) const 
Return the dot product. 
btQuaternion & operator+=(const btQuaternion &q)
Add two quaternions. 
btVector3 & normalize()
Normalize this vector x^2 + y^2 + z^2 = 1. 
btQuaternion nearest(const btQuaternion &qd) const 
btScalar length() const 
Return the length of the quaternion. 
btQuaternion operator*(const btScalar &s) const 
Return a scaled version of this quaternion. 
const btScalar & x() const 
Return the x value. 
btVector3 quatRotate(const btQuaternion &rotation, const btVector3 &v)
const btScalar & getZ() const 
Return the z value. 
void serializeFloat(struct btQuaternionFloatData &dataOut) const 
btQuaternion shortestArcQuat(const btVector3 &v0, const btVector3 &v1)
void setRotation(const btVector3 &axis, const btScalar &_angle)
Set the rotation using axis angle notation. 
btQuaternion & normalize()
Normalize the quaternion Such that x^2 + y^2 + z^2 +w^2 = 1. 
btQuaternion shortestArcQuatNormalize2(btVector3 &v0, btVector3 &v1)
void setValue(const btScalar &_x, const btScalar &_y, const btScalar &_z)
Set x,y,z and zero w. 
btVector3 cross(const btVector3 &v) const 
Return the cross product between this and another vector. 
btQuaternion()
No initialization constructor. 
const btScalar & getY() const 
Return the y value. 
The btQuadWord class is base class for btVector3 and btQuaternion. 
const btScalar & getX() const 
Return the x value. 
btQuaternion inverse() const 
Return the inverse of this quaternion. 
btScalar length() const 
Return the length of the vector. 
btScalar length2() const 
Return the length squared of the quaternion. 
const btScalar & y() const 
Return the y value. 
btScalar angleShortestPath(const btQuaternion &q) const 
Return the angle between this quaternion and the other along the shortest path. 
void deSerialize(const struct btQuaternionData &dataIn)
btVector3 can be used to represent 3D points and vectors. 
#define ATTRIBUTE_ALIGNED16(a)
btQuaternion(const btScalar &_x, const btScalar &_y, const btScalar &_z, const btScalar &_w)
Constructor from scalars. 
btScalar btAcos(btScalar x)
btQuaternion normalized() const 
Return a normalized version of this quaternion. 
btQuaternion & operator*=(const btScalar &s)
Scale this quaternion. 
btScalar angle(const btQuaternion &q) const 
Return the ***half*** angle between this quaternion and the other. 
btScalar getAngleShortestPath() const 
Return the angle of rotation represented by this quaternion along the shortest path. 
btQuaternion operator+(const btQuaternion &q2) const 
Return the sum of this quaternion and the other. 
const btScalar & x() const 
Return the x value. 
btQuaternion operator/(const btScalar &s) const 
Return an inversely scaled versionof this quaternion. 
The btQuaternion implements quaternion to perform linear algebra rotations in combination with btMatr...
void setEuler(const btScalar &yaw, const btScalar &pitch, const btScalar &roll)
Set the quaternion using Euler angles. 
btScalar btAngle(const btQuaternion &q1, const btQuaternion &q2)
Return the angle between two quaternions. 
btQuaternion(const btVector3 &_axis, const btScalar &_angle)
Axis angle Constructor. 
const btScalar & getZ() const 
Return the z value. 
btQuaternion slerp(const btQuaternion &q, const btScalar &t) const 
Return the quaternion which is the result of Spherical Linear Interpolation between this and the othe...
float btScalar
The btScalar type abstracts floating point numbers, to easily switch between double and single floati...
btScalar btCos(btScalar x)
btScalar btFabs(btScalar x)
const btScalar & z() const 
Return the z value.