2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2007 Erwin Coumans http://continuousphysics.com/Bullet/
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
15 ///btDbvt implementation by Nathanael Presson
17 #ifndef BT_DYNAMIC_BOUNDING_VOLUME_TREE_H
18 #define BT_DYNAMIC_BOUNDING_VOLUME_TREE_H
20 #include "LinearMath/btAlignedObjectArray.h"
21 #include "LinearMath/btVector3.h"
22 #include "LinearMath/btTransform.h"
23 #include "LinearMath/btAabbUtil2.h"
26 // Compile time configuration
30 // Implementation profiles
31 #define DBVT_IMPL_GENERIC 0 // Generic implementation
32 #define DBVT_IMPL_SSE 1 // SSE
34 // Template implementation of ICollide
36 #if (defined (_MSC_VER) && _MSC_VER >= 1400)
37 #define DBVT_USE_TEMPLATE 1
39 #define DBVT_USE_TEMPLATE 0
42 #define DBVT_USE_TEMPLATE 0
45 // Use only intrinsics instead of inline asm
46 #define DBVT_USE_INTRINSIC_SSE 1
48 // Using memmov for collideOCL
49 #define DBVT_USE_MEMMOVE 1
51 // Enable benchmarking code
52 #define DBVT_ENABLE_BENCHMARK 0
55 #define DBVT_INLINE SIMD_FORCE_INLINE
57 // Specific methods implementation
59 //SSE gives errors on a MSVC 7.1
60 #if defined (BT_USE_SSE) && defined (_WIN32)
61 #define DBVT_SELECT_IMPL DBVT_IMPL_SSE
62 #define DBVT_MERGE_IMPL DBVT_IMPL_SSE
63 #define DBVT_INT0_IMPL DBVT_IMPL_SSE
65 #define DBVT_SELECT_IMPL DBVT_IMPL_GENERIC
66 #define DBVT_MERGE_IMPL DBVT_IMPL_GENERIC
67 #define DBVT_INT0_IMPL DBVT_IMPL_GENERIC
70 #if (DBVT_SELECT_IMPL==DBVT_IMPL_SSE)|| \
71 (DBVT_MERGE_IMPL==DBVT_IMPL_SSE)|| \
72 (DBVT_INT0_IMPL==DBVT_IMPL_SSE)
73 #include <emmintrin.h>
77 // Auto config and checks
82 #define DBVT_VIRTUAL_DTOR(a)
83 #define DBVT_PREFIX template <typename T>
84 #define DBVT_IPOLICY T& policy
85 #define DBVT_CHECKTYPE static const ICollide& typechecker=*(T*)1;(void)typechecker;
87 #define DBVT_VIRTUAL_DTOR(a) virtual ~a() {}
88 #define DBVT_VIRTUAL virtual
90 #define DBVT_IPOLICY ICollide& policy
91 #define DBVT_CHECKTYPE
95 #if !defined( __CELLOS_LV2__) && !defined(__MWERKS__)
101 #ifndef DBVT_USE_TEMPLATE
102 #error "DBVT_USE_TEMPLATE undefined"
105 #ifndef DBVT_USE_MEMMOVE
106 #error "DBVT_USE_MEMMOVE undefined"
109 #ifndef DBVT_ENABLE_BENCHMARK
110 #error "DBVT_ENABLE_BENCHMARK undefined"
113 #ifndef DBVT_SELECT_IMPL
114 #error "DBVT_SELECT_IMPL undefined"
117 #ifndef DBVT_MERGE_IMPL
118 #error "DBVT_MERGE_IMPL undefined"
121 #ifndef DBVT_INT0_IMPL
122 #error "DBVT_INT0_IMPL undefined"
132 DBVT_INLINE btVector3 Center() const { return((mi+mx)/2); }
133 DBVT_INLINE btVector3 Lengths() const { return(mx-mi); }
134 DBVT_INLINE btVector3 Extents() const { return((mx-mi)/2); }
135 DBVT_INLINE const btVector3& Mins() const { return(mi); }
136 DBVT_INLINE const btVector3& Maxs() const { return(mx); }
137 static inline btDbvtAabbMm FromCE(const btVector3& c,const btVector3& e);
138 static inline btDbvtAabbMm FromCR(const btVector3& c,btScalar r);
139 static inline btDbvtAabbMm FromMM(const btVector3& mi,const btVector3& mx);
140 static inline btDbvtAabbMm FromPoints(const btVector3* pts,int n);
141 static inline btDbvtAabbMm FromPoints(const btVector3** ppts,int n);
142 DBVT_INLINE void Expand(const btVector3& e);
143 DBVT_INLINE void SignedExpand(const btVector3& e);
144 DBVT_INLINE bool Contain(const btDbvtAabbMm& a) const;
145 DBVT_INLINE int Classify(const btVector3& n,btScalar o,int s) const;
146 DBVT_INLINE btScalar ProjectMinimum(const btVector3& v,unsigned signs) const;
147 DBVT_INLINE friend bool Intersect( const btDbvtAabbMm& a,
148 const btDbvtAabbMm& b);
150 DBVT_INLINE friend bool Intersect( const btDbvtAabbMm& a,
153 DBVT_INLINE friend btScalar Proximity( const btDbvtAabbMm& a,
154 const btDbvtAabbMm& b);
155 DBVT_INLINE friend int Select( const btDbvtAabbMm& o,
156 const btDbvtAabbMm& a,
157 const btDbvtAabbMm& b);
158 DBVT_INLINE friend void Merge( const btDbvtAabbMm& a,
159 const btDbvtAabbMm& b,
161 DBVT_INLINE friend bool NotEqual( const btDbvtAabbMm& a,
162 const btDbvtAabbMm& b);
164 DBVT_INLINE void AddSpan(const btVector3& d,btScalar& smi,btScalar& smx) const;
170 typedef btDbvtAabbMm btDbvtVolume;
177 DBVT_INLINE bool isleaf() const { return(childs[1]==0); }
178 DBVT_INLINE bool isinternal() const { return(!isleaf()); }
181 btDbvtNode* childs[2];
187 ///The btDbvt class implements a fast dynamic bounding volume tree based on axis aligned bounding boxes (aabb tree).
188 ///This btDbvt is used for soft body collision detection and for the btDbvtBroadphase. It has a fast insert, remove and update of nodes.
189 ///Unlike the btQuantizedBvh, nodes can be dynamically moved around, which allows for change in topology of the underlying data structure.
198 sStkNN(const btDbvtNode* na,const btDbvtNode* nb) : a(na),b(nb) {}
202 const btDbvtNode* node;
204 sStkNP(const btDbvtNode* n,unsigned m) : node(n),mask(m) {}
208 const btDbvtNode* node;
212 sStkNPS(const btDbvtNode* n,unsigned m,btScalar v) : node(n),mask(m),value(v) {}
216 const btDbvtNode* node;
218 sStkCLN(const btDbvtNode* n,btDbvtNode* p) : node(n),parent(p) {}
220 // Policies/Interfaces
225 DBVT_VIRTUAL_DTOR(ICollide)
226 DBVT_VIRTUAL void Process(const btDbvtNode*,const btDbvtNode*) {}
227 DBVT_VIRTUAL void Process(const btDbvtNode*) {}
228 DBVT_VIRTUAL void Process(const btDbvtNode* n,btScalar) { Process(n); }
229 DBVT_VIRTUAL bool Descent(const btDbvtNode*) { return(true); }
230 DBVT_VIRTUAL bool AllLeaves(const btDbvtNode*) { return(true); }
235 virtual ~IWriter() {}
236 virtual void Prepare(const btDbvtNode* root,int numnodes)=0;
237 virtual void WriteNode(const btDbvtNode*,int index,int parent,int child0,int child1)=0;
238 virtual void WriteLeaf(const btDbvtNode*,int index,int parent)=0;
244 virtual void CloneLeaf(btDbvtNode*) {}
249 SIMPLE_STACKSIZE = 64,
250 DOUBLE_STACKSIZE = SIMPLE_STACKSIZE*2
261 btAlignedObjectArray<sStkNN> m_stkStack;
268 bool empty() const { return(0==m_root); }
269 void optimizeBottomUp();
270 void optimizeTopDown(int bu_treshold=128);
271 void optimizeIncremental(int passes);
272 btDbvtNode* insert(const btDbvtVolume& box,void* data);
273 void update(btDbvtNode* leaf,int lookahead=-1);
274 void update(btDbvtNode* leaf,btDbvtVolume& volume);
275 bool update(btDbvtNode* leaf,btDbvtVolume& volume,const btVector3& velocity,btScalar margin);
276 bool update(btDbvtNode* leaf,btDbvtVolume& volume,const btVector3& velocity);
277 bool update(btDbvtNode* leaf,btDbvtVolume& volume,btScalar margin);
278 void remove(btDbvtNode* leaf);
279 void write(IWriter* iwriter) const;
280 void clone(btDbvt& dest,IClone* iclone=0) const;
281 static int maxdepth(const btDbvtNode* node);
282 static int countLeaves(const btDbvtNode* node);
283 static void extractLeaves(const btDbvtNode* node,btAlignedObjectArray<const btDbvtNode*>& leaves);
284 #if DBVT_ENABLE_BENCHMARK
285 static void benchmark();
287 static void benchmark(){}
289 // DBVT_IPOLICY must support ICollide policy/interface
291 static void enumNodes( const btDbvtNode* root,
294 static void enumLeaves( const btDbvtNode* root,
297 void collideTT( const btDbvtNode* root0,
298 const btDbvtNode* root1,
302 void collideTTpersistentStack( const btDbvtNode* root0,
303 const btDbvtNode* root1,
307 void collideTT( const btDbvtNode* root0,
308 const btDbvtNode* root1,
309 const btTransform& xform,
312 void collideTT( const btDbvtNode* root0,
313 const btTransform& xform0,
314 const btDbvtNode* root1,
315 const btTransform& xform1,
320 void collideTV( const btDbvtNode* root,
321 const btDbvtVolume& volume,
323 ///rayTest is a re-entrant ray test, and can be called in parallel as long as the btAlignedAlloc is thread-safe (uses locking etc)
324 ///rayTest is slower than rayTestInternal, because it builds a local stack, using memory allocations, and it recomputes signs/rayDirectionInverses each time
326 static void rayTest( const btDbvtNode* root,
327 const btVector3& rayFrom,
328 const btVector3& rayTo,
330 ///rayTestInternal is faster than rayTest, because it uses a persistent stack (to reduce dynamic memory allocations to a minimum) and it uses precomputed signs/rayInverseDirections
331 ///rayTestInternal is used by btDbvtBroadphase to accelerate world ray casts
333 void rayTestInternal( const btDbvtNode* root,
334 const btVector3& rayFrom,
335 const btVector3& rayTo,
336 const btVector3& rayDirectionInverse,
337 unsigned int signs[3],
339 const btVector3& aabbMin,
340 const btVector3& aabbMax,
344 static void collideKDOP(const btDbvtNode* root,
345 const btVector3* normals,
346 const btScalar* offsets,
350 static void collideOCL( const btDbvtNode* root,
351 const btVector3* normals,
352 const btScalar* offsets,
353 const btVector3& sortaxis,
358 static void collideTU( const btDbvtNode* root,
361 static DBVT_INLINE int nearest(const int* i,const btDbvt::sStkNPS* a,btScalar v,int l,int h)
367 if(a[i[m]].value>=v) l=m+1; else h=m;
371 static DBVT_INLINE int allocate( btAlignedObjectArray<int>& ifree,
372 btAlignedObjectArray<sStkNPS>& stock,
373 const sStkNPS& value)
377 { i=ifree[ifree.size()-1];ifree.pop_back();stock[i]=value; }
379 { i=stock.size();stock.push_back(value); }
384 btDbvt(const btDbvt&) {}
392 inline btDbvtAabbMm btDbvtAabbMm::FromCE(const btVector3& c,const btVector3& e)
395 box.mi=c-e;box.mx=c+e;
400 inline btDbvtAabbMm btDbvtAabbMm::FromCR(const btVector3& c,btScalar r)
402 return(FromCE(c,btVector3(r,r,r)));
406 inline btDbvtAabbMm btDbvtAabbMm::FromMM(const btVector3& mi,const btVector3& mx)
414 inline btDbvtAabbMm btDbvtAabbMm::FromPoints(const btVector3* pts,int n)
417 box.mi=box.mx=pts[0];
420 box.mi.setMin(pts[i]);
421 box.mx.setMax(pts[i]);
427 inline btDbvtAabbMm btDbvtAabbMm::FromPoints(const btVector3** ppts,int n)
430 box.mi=box.mx=*ppts[0];
433 box.mi.setMin(*ppts[i]);
434 box.mx.setMax(*ppts[i]);
440 DBVT_INLINE void btDbvtAabbMm::Expand(const btVector3& e)
446 DBVT_INLINE void btDbvtAabbMm::SignedExpand(const btVector3& e)
448 if(e.x()>0) mx.setX(mx.x()+e[0]); else mi.setX(mi.x()+e[0]);
449 if(e.y()>0) mx.setY(mx.y()+e[1]); else mi.setY(mi.y()+e[1]);
450 if(e.z()>0) mx.setZ(mx.z()+e[2]); else mi.setZ(mi.z()+e[2]);
454 DBVT_INLINE bool btDbvtAabbMm::Contain(const btDbvtAabbMm& a) const
456 return( (mi.x()<=a.mi.x())&&
465 DBVT_INLINE int btDbvtAabbMm::Classify(const btVector3& n,btScalar o,int s) const
470 case (0+0+0): px=btVector3(mi.x(),mi.y(),mi.z());
471 pi=btVector3(mx.x(),mx.y(),mx.z());break;
472 case (1+0+0): px=btVector3(mx.x(),mi.y(),mi.z());
473 pi=btVector3(mi.x(),mx.y(),mx.z());break;
474 case (0+2+0): px=btVector3(mi.x(),mx.y(),mi.z());
475 pi=btVector3(mx.x(),mi.y(),mx.z());break;
476 case (1+2+0): px=btVector3(mx.x(),mx.y(),mi.z());
477 pi=btVector3(mi.x(),mi.y(),mx.z());break;
478 case (0+0+4): px=btVector3(mi.x(),mi.y(),mx.z());
479 pi=btVector3(mx.x(),mx.y(),mi.z());break;
480 case (1+0+4): px=btVector3(mx.x(),mi.y(),mx.z());
481 pi=btVector3(mi.x(),mx.y(),mi.z());break;
482 case (0+2+4): px=btVector3(mi.x(),mx.y(),mx.z());
483 pi=btVector3(mx.x(),mi.y(),mi.z());break;
484 case (1+2+4): px=btVector3(mx.x(),mx.y(),mx.z());
485 pi=btVector3(mi.x(),mi.y(),mi.z());break;
487 if((btDot(n,px)+o)<0) return(-1);
488 if((btDot(n,pi)+o)>=0) return(+1);
493 DBVT_INLINE btScalar btDbvtAabbMm::ProjectMinimum(const btVector3& v,unsigned signs) const
495 const btVector3* b[]={&mx,&mi};
496 const btVector3 p( b[(signs>>0)&1]->x(),
497 b[(signs>>1)&1]->y(),
498 b[(signs>>2)&1]->z());
503 DBVT_INLINE void btDbvtAabbMm::AddSpan(const btVector3& d,btScalar& smi,btScalar& smx) const
508 { smi+=mx[i]*d[i];smx+=mi[i]*d[i]; }
510 { smi+=mi[i]*d[i];smx+=mx[i]*d[i]; }
515 DBVT_INLINE bool Intersect( const btDbvtAabbMm& a,
516 const btDbvtAabbMm& b)
518 #if DBVT_INT0_IMPL == DBVT_IMPL_SSE
519 const __m128 rt(_mm_or_ps( _mm_cmplt_ps(_mm_load_ps(b.mx),_mm_load_ps(a.mi)),
520 _mm_cmplt_ps(_mm_load_ps(a.mx),_mm_load_ps(b.mi))));
521 const __int32* pu((const __int32*)&rt);
522 return((pu[0]|pu[1]|pu[2])==0);
524 return( (a.mi.x()<=b.mx.x())&&
525 (a.mx.x()>=b.mi.x())&&
526 (a.mi.y()<=b.mx.y())&&
527 (a.mx.y()>=b.mi.y())&&
528 (a.mi.z()<=b.mx.z())&&
529 (a.mx.z()>=b.mi.z()));
536 DBVT_INLINE bool Intersect( const btDbvtAabbMm& a,
539 return( (b.x()>=a.mi.x())&&
551 //////////////////////////////////////
555 DBVT_INLINE btScalar Proximity( const btDbvtAabbMm& a,
556 const btDbvtAabbMm& b)
558 const btVector3 d=(a.mi+a.mx)-(b.mi+b.mx);
559 return(btFabs(d.x())+btFabs(d.y())+btFabs(d.z()));
565 DBVT_INLINE int Select( const btDbvtAabbMm& o,
566 const btDbvtAabbMm& a,
567 const btDbvtAabbMm& b)
569 #if DBVT_SELECT_IMPL == DBVT_IMPL_SSE
570 static ATTRIBUTE_ALIGNED16(const unsigned __int32) mask[]={0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff};
571 ///@todo: the intrinsic version is 11% slower
572 #if DBVT_USE_INTRINSIC_SSE
574 union btSSEUnion ///NOTE: if we use more intrinsics, move btSSEUnion into the LinearMath directory
581 __m128 omi(_mm_load_ps(o.mi));
582 omi=_mm_add_ps(omi,_mm_load_ps(o.mx));
583 __m128 ami(_mm_load_ps(a.mi));
584 ami=_mm_add_ps(ami,_mm_load_ps(a.mx));
585 ami=_mm_sub_ps(ami,omi);
586 ami=_mm_and_ps(ami,_mm_load_ps((const float*)mask));
587 __m128 bmi(_mm_load_ps(b.mi));
588 bmi=_mm_add_ps(bmi,_mm_load_ps(b.mx));
589 bmi=_mm_sub_ps(bmi,omi);
590 bmi=_mm_and_ps(bmi,_mm_load_ps((const float*)mask));
591 __m128 t0(_mm_movehl_ps(ami,ami));
592 ami=_mm_add_ps(ami,t0);
593 ami=_mm_add_ss(ami,_mm_shuffle_ps(ami,ami,1));
594 __m128 t1(_mm_movehl_ps(bmi,bmi));
595 bmi=_mm_add_ps(bmi,t1);
596 bmi=_mm_add_ss(bmi,_mm_shuffle_ps(bmi,bmi,1));
599 tmp.ssereg = _mm_cmple_ss(bmi,ami);
600 return tmp.ints[0]&1;
603 ATTRIBUTE_ALIGNED16(__int32 r[1]);
634 return(Proximity(o,a)<Proximity(o,b)?0:1);
639 DBVT_INLINE void Merge( const btDbvtAabbMm& a,
640 const btDbvtAabbMm& b,
643 #if DBVT_MERGE_IMPL==DBVT_IMPL_SSE
644 __m128 ami(_mm_load_ps(a.mi));
645 __m128 amx(_mm_load_ps(a.mx));
646 __m128 bmi(_mm_load_ps(b.mi));
647 __m128 bmx(_mm_load_ps(b.mx));
648 ami=_mm_min_ps(ami,bmi);
649 amx=_mm_max_ps(amx,bmx);
650 _mm_store_ps(r.mi,ami);
651 _mm_store_ps(r.mx,amx);
655 if(a.mi[i]<b.mi[i]) r.mi[i]=a.mi[i]; else r.mi[i]=b.mi[i];
656 if(a.mx[i]>b.mx[i]) r.mx[i]=a.mx[i]; else r.mx[i]=b.mx[i];
662 DBVT_INLINE bool NotEqual( const btDbvtAabbMm& a,
663 const btDbvtAabbMm& b)
665 return( (a.mi.x()!=b.mi.x())||
666 (a.mi.y()!=b.mi.y())||
667 (a.mi.z()!=b.mi.z())||
668 (a.mx.x()!=b.mx.x())||
669 (a.mx.y()!=b.mx.y())||
670 (a.mx.z()!=b.mx.z()));
679 inline void btDbvt::enumNodes( const btDbvtNode* root,
683 policy.Process(root);
684 if(root->isinternal())
686 enumNodes(root->childs[0],policy);
687 enumNodes(root->childs[1],policy);
693 inline void btDbvt::enumLeaves( const btDbvtNode* root,
697 if(root->isinternal())
699 enumLeaves(root->childs[0],policy);
700 enumLeaves(root->childs[1],policy);
704 policy.Process(root);
710 inline void btDbvt::collideTT( const btDbvtNode* root0,
711 const btDbvtNode* root1,
718 int treshold=DOUBLE_STACKSIZE-4;
719 btAlignedObjectArray<sStkNN> stkStack;
720 stkStack.resize(DOUBLE_STACKSIZE);
721 stkStack[0]=sStkNN(root0,root1);
723 sStkNN p=stkStack[--depth];
726 stkStack.resize(stkStack.size()*2);
727 treshold=stkStack.size()-4;
731 if(p.a->isinternal())
733 stkStack[depth++]=sStkNN(p.a->childs[0],p.a->childs[0]);
734 stkStack[depth++]=sStkNN(p.a->childs[1],p.a->childs[1]);
735 stkStack[depth++]=sStkNN(p.a->childs[0],p.a->childs[1]);
738 else if(Intersect(p.a->volume,p.b->volume))
740 if(p.a->isinternal())
742 if(p.b->isinternal())
744 stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[0]);
745 stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[0]);
746 stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[1]);
747 stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[1]);
751 stkStack[depth++]=sStkNN(p.a->childs[0],p.b);
752 stkStack[depth++]=sStkNN(p.a->childs[1],p.b);
757 if(p.b->isinternal())
759 stkStack[depth++]=sStkNN(p.a,p.b->childs[0]);
760 stkStack[depth++]=sStkNN(p.a,p.b->childs[1]);
764 policy.Process(p.a,p.b);
775 inline void btDbvt::collideTTpersistentStack( const btDbvtNode* root0,
776 const btDbvtNode* root1,
783 int treshold=DOUBLE_STACKSIZE-4;
785 m_stkStack.resize(DOUBLE_STACKSIZE);
786 m_stkStack[0]=sStkNN(root0,root1);
788 sStkNN p=m_stkStack[--depth];
791 m_stkStack.resize(m_stkStack.size()*2);
792 treshold=m_stkStack.size()-4;
796 if(p.a->isinternal())
798 m_stkStack[depth++]=sStkNN(p.a->childs[0],p.a->childs[0]);
799 m_stkStack[depth++]=sStkNN(p.a->childs[1],p.a->childs[1]);
800 m_stkStack[depth++]=sStkNN(p.a->childs[0],p.a->childs[1]);
803 else if(Intersect(p.a->volume,p.b->volume))
805 if(p.a->isinternal())
807 if(p.b->isinternal())
809 m_stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[0]);
810 m_stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[0]);
811 m_stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[1]);
812 m_stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[1]);
816 m_stkStack[depth++]=sStkNN(p.a->childs[0],p.b);
817 m_stkStack[depth++]=sStkNN(p.a->childs[1],p.b);
822 if(p.b->isinternal())
824 m_stkStack[depth++]=sStkNN(p.a,p.b->childs[0]);
825 m_stkStack[depth++]=sStkNN(p.a,p.b->childs[1]);
829 policy.Process(p.a,p.b);
840 inline void btDbvt::collideTT( const btDbvtNode* root0,
841 const btDbvtNode* root1,
842 const btTransform& xform,
849 int treshold=DOUBLE_STACKSIZE-4;
850 btAlignedObjectArray<sStkNN> stkStack;
851 stkStack.resize(DOUBLE_STACKSIZE);
852 stkStack[0]=sStkNN(root0,root1);
854 sStkNN p=stkStack[--depth];
855 if(Intersect(p.a->volume,p.b->volume,xform))
859 stkStack.resize(stkStack.size()*2);
860 treshold=stkStack.size()-4;
862 if(p.a->isinternal())
864 if(p.b->isinternal())
866 stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[0]);
867 stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[0]);
868 stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[1]);
869 stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[1]);
873 stkStack[depth++]=sStkNN(p.a->childs[0],p.b);
874 stkStack[depth++]=sStkNN(p.a->childs[1],p.b);
879 if(p.b->isinternal())
881 stkStack[depth++]=sStkNN(p.a,p.b->childs[0]);
882 stkStack[depth++]=sStkNN(p.a,p.b->childs[1]);
886 policy.Process(p.a,p.b);
895 inline void btDbvt::collideTT( const btDbvtNode* root0,
896 const btTransform& xform0,
897 const btDbvtNode* root1,
898 const btTransform& xform1,
901 const btTransform xform=xform0.inverse()*xform1;
902 collideTT(root0,root1,xform,policy);
908 inline void btDbvt::collideTV( const btDbvtNode* root,
909 const btDbvtVolume& vol,
915 ATTRIBUTE_ALIGNED16(btDbvtVolume) volume(vol);
916 btAlignedObjectArray<const btDbvtNode*> stack;
918 stack.reserve(SIMPLE_STACKSIZE);
919 stack.push_back(root);
921 const btDbvtNode* n=stack[stack.size()-1];
923 if(Intersect(n->volume,volume))
927 stack.push_back(n->childs[0]);
928 stack.push_back(n->childs[1]);
935 } while(stack.size()>0);
940 inline void btDbvt::rayTestInternal( const btDbvtNode* root,
941 const btVector3& rayFrom,
942 const btVector3& rayTo,
943 const btVector3& rayDirectionInverse,
944 unsigned int signs[3],
946 const btVector3& aabbMin,
947 const btVector3& aabbMax,
954 btVector3 resultNormal;
957 int treshold=DOUBLE_STACKSIZE-2;
958 btAlignedObjectArray<const btDbvtNode*> stack;
959 stack.resize(DOUBLE_STACKSIZE);
964 const btDbvtNode* node=stack[--depth];
965 bounds[0] = node->volume.Mins()-aabbMax;
966 bounds[1] = node->volume.Maxs()-aabbMin;
967 btScalar tmin=1.f,lambda_min=0.f;
968 unsigned int result1=false;
969 result1 = btRayAabb2(rayFrom,rayDirectionInverse,signs,bounds,tmin,lambda_min,lambda_max);
972 if(node->isinternal())
976 stack.resize(stack.size()*2);
977 treshold=stack.size()-2;
979 stack[depth++]=node->childs[0];
980 stack[depth++]=node->childs[1];
984 policy.Process(node);
993 inline void btDbvt::rayTest( const btDbvtNode* root,
994 const btVector3& rayFrom,
995 const btVector3& rayTo,
1001 btVector3 rayDir = (rayTo-rayFrom);
1002 rayDir.normalize ();
1004 ///what about division by zero? --> just set rayDirection[i] to INF/BT_LARGE_FLOAT
1005 btVector3 rayDirectionInverse;
1006 rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0];
1007 rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1];
1008 rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2];
1009 unsigned int signs[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
1011 btScalar lambda_max = rayDir.dot(rayTo-rayFrom);
1013 btVector3 resultNormal;
1015 btAlignedObjectArray<const btDbvtNode*> stack;
1018 int treshold=DOUBLE_STACKSIZE-2;
1020 stack.resize(DOUBLE_STACKSIZE);
1022 btVector3 bounds[2];
1024 const btDbvtNode* node=stack[--depth];
1026 bounds[0] = node->volume.Mins();
1027 bounds[1] = node->volume.Maxs();
1029 btScalar tmin=1.f,lambda_min=0.f;
1030 unsigned int result1 = btRayAabb2(rayFrom,rayDirectionInverse,signs,bounds,tmin,lambda_min,lambda_max);
1032 #ifdef COMPARE_BTRAY_AABB2
1034 bool result2 = btRayAabb(rayFrom,rayTo,node->volume.Mins(),node->volume.Maxs(),param,resultNormal);
1035 btAssert(result1 == result2);
1036 #endif //TEST_BTRAY_AABB2
1040 if(node->isinternal())
1044 stack.resize(stack.size()*2);
1045 treshold=stack.size()-2;
1047 stack[depth++]=node->childs[0];
1048 stack[depth++]=node->childs[1];
1052 policy.Process(node);
1062 inline void btDbvt::collideKDOP(const btDbvtNode* root,
1063 const btVector3* normals,
1064 const btScalar* offsets,
1071 const int inside=(1<<count)-1;
1072 btAlignedObjectArray<sStkNP> stack;
1073 int signs[sizeof(unsigned)*8];
1074 btAssert(count<int (sizeof(signs)/sizeof(signs[0])));
1075 for(int i=0;i<count;++i)
1077 signs[i]= ((normals[i].x()>=0)?1:0)+
1078 ((normals[i].y()>=0)?2:0)+
1079 ((normals[i].z()>=0)?4:0);
1081 stack.reserve(SIMPLE_STACKSIZE);
1082 stack.push_back(sStkNP(root,0));
1084 sStkNP se=stack[stack.size()-1];
1087 for(int i=0,j=1;(!out)&&(i<count);++i,j<<=1)
1091 const int side=se.node->volume.Classify(normals[i],offsets[i],signs[i]);
1094 case -1: out=true;break;
1095 case +1: se.mask|=j;break;
1101 if((se.mask!=inside)&&(se.node->isinternal()))
1103 stack.push_back(sStkNP(se.node->childs[0],se.mask));
1104 stack.push_back(sStkNP(se.node->childs[1],se.mask));
1108 if(policy.AllLeaves(se.node)) enumLeaves(se.node,policy);
1111 } while(stack.size());
1117 inline void btDbvt::collideOCL( const btDbvtNode* root,
1118 const btVector3* normals,
1119 const btScalar* offsets,
1120 const btVector3& sortaxis,
1128 const unsigned srtsgns=(sortaxis[0]>=0?1:0)+
1129 (sortaxis[1]>=0?2:0)+
1130 (sortaxis[2]>=0?4:0);
1131 const int inside=(1<<count)-1;
1132 btAlignedObjectArray<sStkNPS> stock;
1133 btAlignedObjectArray<int> ifree;
1134 btAlignedObjectArray<int> stack;
1135 int signs[sizeof(unsigned)*8];
1136 btAssert(count<int (sizeof(signs)/sizeof(signs[0])));
1137 for(int i=0;i<count;++i)
1139 signs[i]= ((normals[i].x()>=0)?1:0)+
1140 ((normals[i].y()>=0)?2:0)+
1141 ((normals[i].z()>=0)?4:0);
1143 stock.reserve(SIMPLE_STACKSIZE);
1144 stack.reserve(SIMPLE_STACKSIZE);
1145 ifree.reserve(SIMPLE_STACKSIZE);
1146 stack.push_back(allocate(ifree,stock,sStkNPS(root,0,root->volume.ProjectMinimum(sortaxis,srtsgns))));
1148 const int id=stack[stack.size()-1];
1149 sStkNPS se=stock[id];
1150 stack.pop_back();ifree.push_back(id);
1154 for(int i=0,j=1;(!out)&&(i<count);++i,j<<=1)
1158 const int side=se.node->volume.Classify(normals[i],offsets[i],signs[i]);
1161 case -1: out=true;break;
1162 case +1: se.mask|=j;break;
1168 if(policy.Descent(se.node))
1170 if(se.node->isinternal())
1172 const btDbvtNode* pns[]={ se.node->childs[0],se.node->childs[1]};
1173 sStkNPS nes[]={ sStkNPS(pns[0],se.mask,pns[0]->volume.ProjectMinimum(sortaxis,srtsgns)),
1174 sStkNPS(pns[1],se.mask,pns[1]->volume.ProjectMinimum(sortaxis,srtsgns))};
1175 const int q=nes[0].value<nes[1].value?1:0;
1180 j=nearest(&stack[0],&stock[0],nes[q].value,0,stack.size());
1182 #if DBVT_USE_MEMMOVE
1183 memmove(&stack[j+1],&stack[j],sizeof(int)*(stack.size()-j-1));
1185 for(int k=stack.size()-1;k>j;--k) stack[k]=stack[k-1];
1187 stack[j]=allocate(ifree,stock,nes[q]);
1189 j=nearest(&stack[0],&stock[0],nes[1-q].value,j,stack.size());
1191 #if DBVT_USE_MEMMOVE
1192 memmove(&stack[j+1],&stack[j],sizeof(int)*(stack.size()-j-1));
1194 for(int k=stack.size()-1;k>j;--k) stack[k]=stack[k-1];
1196 stack[j]=allocate(ifree,stock,nes[1-q]);
1200 stack.push_back(allocate(ifree,stock,nes[q]));
1201 stack.push_back(allocate(ifree,stock,nes[1-q]));
1206 policy.Process(se.node,se.value);
1209 } while(stack.size());
1215 inline void btDbvt::collideTU( const btDbvtNode* root,
1221 btAlignedObjectArray<const btDbvtNode*> stack;
1222 stack.reserve(SIMPLE_STACKSIZE);
1223 stack.push_back(root);
1225 const btDbvtNode* n=stack[stack.size()-1];
1227 if(policy.Descent(n))
1230 { stack.push_back(n->childs[0]);stack.push_back(n->childs[1]); }
1232 { policy.Process(n); }
1234 } while(stack.size()>0);
1242 #undef DBVT_USE_MEMMOVE
1243 #undef DBVT_USE_TEMPLATE
1244 #undef DBVT_VIRTUAL_DTOR
1248 #undef DBVT_CHECKTYPE
1249 #undef DBVT_IMPL_GENERIC
1250 #undef DBVT_IMPL_SSE
1251 #undef DBVT_USE_INTRINSIC_SSE
1252 #undef DBVT_SELECT_IMPL
1253 #undef DBVT_MERGE_IMPL
1254 #undef DBVT_INT0_IMPL