rapier2d/geometry/broad_phase_bvh.rs
1use crate::dynamics::{IntegrationParameters, RigidBodySet};
2use crate::geometry::{Aabb, BroadPhasePairEvent, ColliderHandle, ColliderPair, ColliderSet};
3use crate::math::Real;
4use parry::partitioning::{Bvh, BvhWorkspace};
5use parry::utils::hashmap::{Entry, HashMap};
6
7/// The broad-phase collision detector that quickly filters out distant object pairs.
8///
9/// The broad-phase is the "first pass" of collision detection. It uses a hierarchical
10/// bounding volume tree (BVH) to quickly identify which collider pairs are close enough
11/// to potentially collide, avoiding expensive narrow-phase checks for distant objects.
12///
13/// Think of it as a "spatial index" that answers: "Which objects are near each other?"
14///
15/// You typically don't interact with this directly - it's managed by [`PhysicsPipeline`](crate::pipeline::PhysicsPipeline).
16/// However, you can use it to create a [`QueryPipeline`](crate::pipeline::QueryPipeline) for spatial queries.
17#[derive(Default, Clone)]
18#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
19pub struct BroadPhaseBvh {
20 pub(crate) tree: Bvh,
21 #[cfg_attr(feature = "serde-serialize", serde(skip))]
22 workspace: BvhWorkspace,
23 #[cfg_attr(
24 feature = "serde-serialize",
25 serde(
26 serialize_with = "crate::utils::serde::serialize_to_vec_tuple",
27 deserialize_with = "crate::utils::serde::deserialize_from_vec_tuple"
28 )
29 )]
30 pairs: HashMap<(ColliderHandle, ColliderHandle), u32>,
31 frame_index: u32,
32 optimization_strategy: BvhOptimizationStrategy,
33}
34
35// TODO: would be interesting to try out:
36// "Fast Insertion-Based Optimization of Bounding Volume Hierarchies"
37// by Bittner et al.
38/// Selection of strategies to maintain through time the broad-phase BVH in shape that remains
39/// efficient for collision-detection and scene queries.
40#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
41#[derive(Default, PartialEq, Eq, Copy, Clone)]
42pub enum BvhOptimizationStrategy {
43 /// Different sub-trees of the BVH will be optimized at each frame.
44 #[default]
45 SubtreeOptimizer,
46 /// Disables incremental BVH optimization (discouraged).
47 ///
48 /// This should not be used except for debugging purpose.
49 None,
50}
51
52const ENABLE_TREE_VALIDITY_CHECK: bool = false;
53
54impl BroadPhaseBvh {
55 const CHANGE_DETECTION_ENABLED: bool = true;
56 const CHANGE_DETECTION_FACTOR: Real = 1.0e-2;
57
58 /// Initializes a new empty broad-phase.
59 pub fn new() -> Self {
60 Self::default()
61 }
62
63 /// Initializes a new empty broad-phase with the specified strategy for incremental
64 /// BVH optimization.
65 pub fn with_optimization_strategy(optimization_strategy: BvhOptimizationStrategy) -> Self {
66 Self {
67 optimization_strategy,
68 ..Default::default()
69 }
70 }
71
72 /// Updates the broad-phase.
73 ///
74 /// The results are output through the `events` struct. The broad-phase algorithm is only
75 /// required to generate new events (i.e. no need to re-send an `AddPair` event if it was already
76 /// sent previously and no `RemovePair` happened since then). Sending redundant events is allowed
77 /// but can result in a slight computational overhead.
78 ///
79 /// # Parameters
80 /// - `params`: the integration parameters governing the simulation.
81 /// - `colliders`: the set of colliders. Change detection with `collider.needs_broad_phase_update()`
82 /// can be relied on at this stage.
83 /// - `modified_colliders`: colliders that are know to be modified since the last update.
84 /// - `removed_colliders`: colliders that got removed since the last update. Any associated data
85 /// in the broad-phase should be removed by this call to `update`.
86 /// - `events`: the broad-phase’s output. They indicate what collision pairs need to be created
87 /// and what pairs need to be removed. It is OK to create pairs for colliders that don’t
88 /// actually collide (though this can increase computational overhead in the narrow-phase)
89 /// but it is important not to indicate removal of a collision pair if the underlying colliders
90 /// are still touching or closer than `prediction_distance`.
91 pub fn update(
92 &mut self,
93 params: &IntegrationParameters,
94 colliders: &ColliderSet,
95 bodies: &RigidBodySet,
96 modified_colliders: &[ColliderHandle],
97 removed_colliders: &[ColliderHandle],
98 events: &mut Vec<BroadPhasePairEvent>,
99 ) {
100 self.frame_index = self.frame_index.overflowing_add(1).0;
101
102 // Removals must be handled first, in case another collider in
103 // `modified_colliders` shares the same index.
104 for handle in removed_colliders {
105 self.tree.remove(handle.into_raw_parts().0);
106 }
107
108 // if modified_colliders.is_empty() {
109 // return;
110 // }
111
112 let first_pass = self.tree.is_empty();
113
114 // let t0 = std::time::Instant::now();
115 for modified in modified_colliders {
116 if let Some(collider) = colliders.get(*modified) {
117 if !collider.is_enabled() || !collider.changes.needs_broad_phase_update() {
118 continue;
119 }
120
121 let aabb = collider.compute_broad_phase_aabb(params, bodies);
122
123 let change_detection_skin = if Self::CHANGE_DETECTION_ENABLED {
124 Self::CHANGE_DETECTION_FACTOR * params.length_unit
125 } else {
126 0.0
127 };
128
129 self.tree.insert_or_update_partially(
130 aabb,
131 modified.into_raw_parts().0,
132 change_detection_skin,
133 );
134 }
135 }
136
137 if ENABLE_TREE_VALIDITY_CHECK {
138 if first_pass {
139 self.tree.assert_well_formed();
140 }
141
142 self.tree.assert_well_formed_topology_only();
143 }
144
145 // let t0 = std::time::Instant::now();
146 match self.optimization_strategy {
147 BvhOptimizationStrategy::SubtreeOptimizer => {
148 self.tree.optimize_incremental(&mut self.workspace);
149 }
150 BvhOptimizationStrategy::None => {}
151 };
152 // println!(
153 // "Incremental optimization: {}",
154 // t0.elapsed().as_secs_f32() * 1000.0
155 // );
156
157 // NOTE: we run refit after optimization so we can skip updating internal nodes during
158 // optimization, and so we can reorder the tree in memory (in depth-first order)
159 // to make it more cache friendly after the rebuild shuffling everything around.
160 // let t0 = std::time::Instant::now();
161 self.tree.refit(&mut self.workspace);
162
163 if ENABLE_TREE_VALIDITY_CHECK {
164 self.tree.assert_well_formed();
165 }
166
167 // println!("Refit: {}", t0.elapsed().as_secs_f32() * 1000.0);
168 // println!(
169 // "leaf count: {}/{} (changed: {})",
170 // self.tree.leaf_count(),
171 // self.tree.reachable_leaf_count(0),
172 // self.tree.changed_leaf_count(0),
173 // );
174 // self.tree.assert_is_depth_first();
175 // self.tree.assert_well_formed();
176 // println!(
177 // "Is well formed. Tree height: {}",
178 // self.tree.subtree_height(0),
179 // );
180 // // println!("Tree quality: {}", self.tree.quality_metric());
181
182 let mut pairs_collector = |co1: u32, co2: u32| {
183 assert_ne!(co1, co2);
184
185 let Some((_, mut handle1)) = colliders.get_unknown_gen(co1) else {
186 return;
187 };
188 let Some((_, mut handle2)) = colliders.get_unknown_gen(co2) else {
189 return;
190 };
191
192 if co1 > co2 {
193 std::mem::swap(&mut handle1, &mut handle2);
194 }
195
196 match self.pairs.entry((handle1, handle2)) {
197 Entry::Occupied(e) => *e.into_mut() = self.frame_index,
198 Entry::Vacant(e) => {
199 e.insert(self.frame_index);
200 events.push(BroadPhasePairEvent::AddPair(ColliderPair::new(
201 handle1, handle2,
202 )));
203 }
204 }
205 };
206
207 // let t0 = std::time::Instant::now();
208 self.tree
209 .traverse_bvtt_single_tree::<{ Self::CHANGE_DETECTION_ENABLED }>(
210 &mut self.workspace,
211 &mut pairs_collector,
212 );
213 // println!("Detection: {}", t0.elapsed().as_secs_f32() * 1000.0);
214 // println!(">>>>>> Num events: {}", events.iter().len());
215
216 // Find outdated entries.
217 // TODO PERF:
218 // Currently, the narrow-phase isn’t capable of removing its own outdated
219 // collision pairs. So we need to run a pass here to find aabbs that are
220 // no longer overlapping. This, and the pair deduplication happening in
221 // the `pairs_collector` is expensive and should be done more efficiently
222 // by the narrow-phase itself (or islands) once we rework it.
223 //
224 // let t0 = std::time::Instant::now();
225 self.pairs.retain(|(h0, h1), timestamp| {
226 if *timestamp != self.frame_index {
227 if !colliders.contains(*h0) || !colliders.contains(*h1) {
228 // At least one of the colliders no longer exist, don’t retain the pair.
229 return false;
230 }
231
232 let Some(node0) = self.tree.leaf_node(h0.into_raw_parts().0) else {
233 return false;
234 };
235 let Some(node1) = self.tree.leaf_node(h1.into_raw_parts().0) else {
236 return false;
237 };
238
239 if (!Self::CHANGE_DETECTION_ENABLED || node0.is_changed() || node1.is_changed())
240 && !node0.intersects(node1)
241 {
242 events.push(BroadPhasePairEvent::DeletePair(ColliderPair::new(*h0, *h1)));
243 false
244 } else {
245 true
246 }
247 } else {
248 // If the timestamps match, we already saw this pair during traversal.
249 // There can be rare occurrences where the timestamp will be equal
250 // even though we didn’t see the pair during traversal. This happens
251 // if the frame index overflowed. But this is fine, we’ll catch it
252 // in another frame.
253 true
254 }
255 });
256
257 // println!(
258 // "Post-filtering: {} (added pairs: {}, removed pairs: {})",
259 // t0.elapsed().as_secs_f32() * 1000.0,
260 // added_pairs,
261 // removed_pairs
262 // );
263 }
264
265 /// Sets the AABB associated to the given collider.
266 ///
267 /// The AABB change will be immediately applied and propagated through the underlying BVH.
268 /// Change detection will automatically take it into account during the next broad-phase update.
269 pub fn set_aabb(&mut self, params: &IntegrationParameters, handle: ColliderHandle, aabb: Aabb) {
270 let change_detection_skin = if Self::CHANGE_DETECTION_ENABLED {
271 Self::CHANGE_DETECTION_FACTOR * params.length_unit
272 } else {
273 0.0
274 };
275 self.tree.insert_with_change_detection(
276 aabb,
277 handle.into_raw_parts().0,
278 change_detection_skin,
279 );
280 }
281}