Refactor and rework of BVH.

The BVH building code is now largely split out into a separate
type, BVHBase.  The intent is that this will also be used by
the BVH4 when I get around to it.

The BVH itself now uses references instead of indexes, allocating
and pointing directly into the MemArena.  This allows the nodes
to all be right next to their bounding boxes in memory.
This commit is contained in:
Nathan Vegdahl 2017-04-10 23:41:38 -07:00
parent db3e4ad129
commit d9564bc005
6 changed files with 271 additions and 184 deletions

1
Cargo.lock generated
View File

@ -4,6 +4,7 @@ version = "0.1.0"
dependencies = [
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"docopt 0.6.86 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lodepng 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"mem_arena 0.1.0",
"nom 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -22,6 +22,7 @@ scoped_threadpool = "0.1"
crossbeam = "0.2"
num_cpus = "1.0"
lodepng = "0.8"
lazy_static = "0.2"
simd = { version = "0.1.1", optional = true }
# Github dependencies

View File

@ -2,35 +2,31 @@
use mem_arena::MemArena;
use algorithm::{partition, merge_slices_append};
use algorithm::partition;
use bbox::BBox;
use boundable::Boundable;
use lerp::lerp_slice;
use math::log2_64;
use ray::AccelRay;
use super::objects_split::{sah_split, median_split};
use super::bvh_base::{BVHBase, BVHBaseNode, BVH_MAX_DEPTH};
const BVH_MAX_DEPTH: usize = 64;
#[derive(Copy, Clone, Debug)]
pub struct BVH<'a> {
nodes: &'a [BVHNode],
bounds: &'a [BBox],
root: Option<&'a BVHNode<'a>>,
depth: usize,
}
#[derive(Copy, Clone, Debug)]
enum BVHNode {
enum BVHNode<'a> {
Internal {
bounds_range: (usize, usize),
second_child_index: usize,
bounds: &'a [BBox],
children: (&'a BVHNode<'a>, &'a BVHNode<'a>),
split_axis: u8,
},
Leaf {
bounds_range: (usize, usize),
bounds: &'a [BBox],
object_range: (usize, usize),
},
}
@ -43,14 +39,18 @@ impl<'a> BVH<'a> {
-> BVH<'a>
where F: 'b + Fn(&T) -> &'b [BBox]
{
let mut builder = BVHBuilder::new();
if objects.len() == 0 {
BVH {
root: None,
depth: 0,
}
} else {
let base = BVHBase::from_objects(objects, objects_per_leaf, bounder);
builder.recursive_build(0, 0, objects_per_leaf, objects, &bounder);
BVH {
nodes: arena.copy_slice(&builder.nodes),
bounds: arena.copy_slice(&builder.bounds),
depth: builder.depth,
BVH {
root: Some(BVH::construct_from_base(arena, &base, base.root_node_index())),
depth: base.depth,
}
}
}
@ -61,189 +61,104 @@ impl<'a> BVH<'a> {
pub fn traverse<T, F>(&self, rays: &mut [AccelRay], objects: &[T], mut obj_ray_test: F)
where F: FnMut(&T, &mut [AccelRay])
{
if self.nodes.len() == 0 {
return;
}
match self.root {
None => {}
// +2 of max depth for root and last child
let mut i_stack = [0; BVH_MAX_DEPTH + 2];
let mut ray_i_stack = [rays.len(); BVH_MAX_DEPTH + 2];
let mut stack_ptr = 1;
Some(root) => {
// +2 of max depth for root and last child
let mut node_stack = [root; BVH_MAX_DEPTH + 2];
let mut ray_i_stack = [rays.len(); BVH_MAX_DEPTH + 2];
let mut stack_ptr = 1;
while stack_ptr > 0 {
match self.nodes[i_stack[stack_ptr]] {
BVHNode::Internal { bounds_range: br, second_child_index, split_axis } => {
let part = partition(&mut rays[..ray_i_stack[stack_ptr]], |r| {
(!r.is_done()) &&
lerp_slice(&self.bounds[br.0..br.1], r.time).intersect_accel_ray(r)
});
if part > 0 {
i_stack[stack_ptr] += 1;
i_stack[stack_ptr + 1] = second_child_index;
ray_i_stack[stack_ptr] = part;
ray_i_stack[stack_ptr + 1] = part;
if rays[0].dir_inv.get_n(split_axis as usize).is_sign_positive() {
i_stack.swap(stack_ptr, stack_ptr + 1);
while stack_ptr > 0 {
match node_stack[stack_ptr] {
&BVHNode::Internal { bounds, children, split_axis } => {
let part = partition(&mut rays[..ray_i_stack[stack_ptr]], |r| {
(!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r)
});
if part > 0 {
node_stack[stack_ptr] = children.0;
node_stack[stack_ptr + 1] = children.1;
ray_i_stack[stack_ptr] = part;
ray_i_stack[stack_ptr + 1] = part;
if rays[0].dir_inv.get_n(split_axis as usize).is_sign_positive() {
node_stack.swap(stack_ptr, stack_ptr + 1);
}
stack_ptr += 1;
} else {
stack_ptr -= 1;
}
}
stack_ptr += 1;
} else {
stack_ptr -= 1;
}
}
BVHNode::Leaf { bounds_range: br, object_range } => {
let part = partition(&mut rays[..ray_i_stack[stack_ptr]], |r| {
(!r.is_done()) &&
lerp_slice(&self.bounds[br.0..br.1], r.time).intersect_accel_ray(r)
});
if part > 0 {
for obj in &objects[object_range.0..object_range.1] {
obj_ray_test(obj, &mut rays[..part]);
&BVHNode::Leaf { bounds, object_range } => {
let part = partition(&mut rays[..ray_i_stack[stack_ptr]], |r| {
(!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r)
});
if part > 0 {
for obj in &objects[object_range.0..object_range.1] {
obj_ray_test(obj, &mut rays[..part]);
}
}
stack_ptr -= 1;
}
}
stack_ptr -= 1;
}
}
}
}
fn construct_from_base(arena: &'a MemArena,
base: &BVHBase,
node_index: usize)
-> &'a mut BVHNode<'a> {
match &base.nodes[node_index] {
&BVHBaseNode::Internal { bounds_range, children_indices, split_axis } => {
let mut node = unsafe { arena.alloc_uninitialized::<BVHNode>() };
let bounds = arena.copy_slice(&base.bounds[bounds_range.0..bounds_range.1]);
let child1 = BVH::construct_from_base(arena, base, children_indices.0);
let child2 = BVH::construct_from_base(arena, base, children_indices.1);
*node = BVHNode::Internal {
bounds: bounds,
children: (child1, child2),
split_axis: split_axis,
};
return node;
}
&BVHBaseNode::Leaf { bounds_range, object_range } => {
let mut node = unsafe { arena.alloc_uninitialized::<BVHNode>() };
let bounds = arena.copy_slice(&base.bounds[bounds_range.0..bounds_range.1]);
*node = BVHNode::Leaf {
bounds: bounds,
object_range: object_range,
};
return node;
}
}
}
}
lazy_static! {
static ref DEGENERATE_BOUNDS: [BBox; 1] = [BBox::new()];
}
impl<'a> Boundable for BVH<'a> {
fn bounds<'b>(&'b self) -> &'b [BBox] {
match self.nodes[0] {
BVHNode::Internal { bounds_range, .. } => &self.bounds[bounds_range.0..bounds_range.1],
match self.root {
None => &DEGENERATE_BOUNDS[..],
Some(root) => {
match root {
&BVHNode::Internal { bounds, .. } => bounds,
BVHNode::Leaf { bounds_range, .. } => &self.bounds[bounds_range.0..bounds_range.1],
}
}
}
#[derive(Debug)]
struct BVHBuilder {
nodes: Vec<BVHNode>,
bounds: Vec<BBox>,
depth: usize,
bounds_cache: Vec<BBox>,
}
impl BVHBuilder {
fn new() -> BVHBuilder {
BVHBuilder {
nodes: Vec::new(),
bounds: Vec::new(),
depth: 0,
bounds_cache: Vec::new(),
}
}
fn acc_bounds<'a, T, F>(&mut self, objects: &mut [T], bounder: &F)
where F: 'a + Fn(&T) -> &'a [BBox]
{
// TODO: do all of this without the temporary cache
let max_len = objects.iter().map(|obj| bounder(obj).len()).max().unwrap();
self.bounds_cache.clear();
self.bounds_cache.resize(max_len, BBox::new());
for obj in objects.iter() {
let bounds = bounder(obj);
debug_assert!(bounds.len() > 0);
if bounds.len() == max_len {
for i in 0..bounds.len() {
self.bounds_cache[i] |= bounds[i];
}
} else {
let s = (max_len - 1) as f32;
for (i, bbc) in self.bounds_cache.iter_mut().enumerate() {
*bbc |= lerp_slice(bounds, i as f32 / s);
&BVHNode::Leaf { bounds, .. } => bounds,
}
}
}
}
fn recursive_build<'a, T, F>(&mut self,
offset: usize,
depth: usize,
objects_per_leaf: usize,
objects: &mut [T],
bounder: &F)
-> (usize, (usize, usize))
where F: 'a + Fn(&T) -> &'a [BBox]
{
let me = self.nodes.len();
if objects.len() == 0 {
return (0, (0, 0));
} else if objects.len() <= objects_per_leaf {
// Leaf node
self.acc_bounds(objects, bounder);
let bi = self.bounds.len();
for b in self.bounds_cache.iter() {
self.bounds.push(*b);
}
self.nodes.push(BVHNode::Leaf {
bounds_range: (bi, self.bounds.len()),
object_range: (offset, offset + objects.len()),
});
if self.depth < depth {
self.depth = depth;
}
return (me, (bi, self.bounds.len()));
} else {
// Not a leaf node
self.nodes.push(BVHNode::Internal {
bounds_range: (0, 0),
second_child_index: 0,
split_axis: 0,
});
// Partition objects.
// If we're too near the max depth, we do balanced building to
// avoid exceeding max depth.
// Otherwise we do SAH splitting to build better trees.
let (split_index, split_axis) = if (log2_64(objects.len() as u64) as usize) <
(BVH_MAX_DEPTH - depth) {
// SAH splitting, when we have room to play
sah_split(objects, &bounder)
} else {
// Balanced splitting, when we don't have room to play
median_split(objects, &bounder)
};
// Create child nodes
let (_, c1_bounds) = self.recursive_build(offset,
depth + 1,
objects_per_leaf,
&mut objects[..split_index],
bounder);
let (c2_index, c2_bounds) = self.recursive_build(offset + split_index,
depth + 1,
objects_per_leaf,
&mut objects[split_index..],
bounder);
// Determine bounds
// TODO: do merging without the temporary vec.
let bi = self.bounds.len();
let mut merged = Vec::new();
merge_slices_append(&self.bounds[c1_bounds.0..c1_bounds.1],
&self.bounds[c2_bounds.0..c2_bounds.1],
&mut merged,
|b1, b2| *b1 | *b2);
self.bounds.extend(merged.drain(0..));
// Set node
self.nodes[me] = BVHNode::Internal {
bounds_range: (bi, self.bounds.len()),
second_child_index: c2_index,
split_axis: split_axis as u8,
};
return (me, (bi, self.bounds.len()));
}
}
}

166
src/accel/bvh_base.rs Normal file
View File

@ -0,0 +1,166 @@
#![allow(dead_code)]
use algorithm::merge_slices_append;
use bbox::BBox;
use lerp::lerp_slice;
use math::log2_64;
use super::objects_split::{sah_split, median_split};
pub const BVH_MAX_DEPTH: usize = 64;
/// An intermediary structure for creating a BVH.
#[derive(Debug)]
pub struct BVHBase {
pub nodes: Vec<BVHBaseNode>,
pub bounds: Vec<BBox>,
pub depth: usize,
bounds_cache: Vec<BBox>,
}
#[derive(Copy, Clone, Debug)]
pub enum BVHBaseNode {
Internal {
bounds_range: (usize, usize),
children_indices: (usize, usize),
split_axis: u8,
},
Leaf {
bounds_range: (usize, usize),
object_range: (usize, usize),
},
}
impl BVHBase {
fn new() -> BVHBase {
BVHBase {
nodes: Vec::new(),
bounds: Vec::new(),
depth: 0,
bounds_cache: Vec::new(),
}
}
pub fn from_objects<'b, T, F>(objects: &mut [T], objects_per_leaf: usize, bounder: F) -> BVHBase
where F: 'b + Fn(&T) -> &'b [BBox]
{
let mut bvh = BVHBase::new();
bvh.recursive_build(0, 0, objects_per_leaf, objects, &bounder);
bvh
}
pub fn root_node_index(&self) -> usize {
0
}
fn acc_bounds<'a, T, F>(&mut self, objects: &mut [T], bounder: &F)
where F: 'a + Fn(&T) -> &'a [BBox]
{
// TODO: do all of this without the temporary cache
let max_len = objects.iter().map(|obj| bounder(obj).len()).max().unwrap();
self.bounds_cache.clear();
self.bounds_cache.resize(max_len, BBox::new());
for obj in objects.iter() {
let bounds = bounder(obj);
debug_assert!(bounds.len() > 0);
if bounds.len() == max_len {
for i in 0..bounds.len() {
self.bounds_cache[i] |= bounds[i];
}
} else {
let s = (max_len - 1) as f32;
for (i, bbc) in self.bounds_cache.iter_mut().enumerate() {
*bbc |= lerp_slice(bounds, i as f32 / s);
}
}
}
}
fn recursive_build<'a, T, F>(&mut self,
offset: usize,
depth: usize,
objects_per_leaf: usize,
objects: &mut [T],
bounder: &F)
-> (usize, (usize, usize))
where F: 'a + Fn(&T) -> &'a [BBox]
{
let me = self.nodes.len();
if objects.len() == 0 {
return (0, (0, 0));
} else if objects.len() <= objects_per_leaf {
// Leaf node
self.acc_bounds(objects, bounder);
let bi = self.bounds.len();
for b in self.bounds_cache.iter() {
self.bounds.push(*b);
}
self.nodes.push(BVHBaseNode::Leaf {
bounds_range: (bi, self.bounds.len()),
object_range: (offset, offset + objects.len()),
});
if self.depth < depth {
self.depth = depth;
}
return (me, (bi, self.bounds.len()));
} else {
// Not a leaf node
self.nodes.push(BVHBaseNode::Internal {
bounds_range: (0, 0),
children_indices: (0, 0),
split_axis: 0,
});
// Partition objects.
// If we're too near the max depth, we do balanced building to
// avoid exceeding max depth.
// Otherwise we do SAH splitting to build better trees.
let (split_index, split_axis) = if (log2_64(objects.len() as u64) as usize) <
(BVH_MAX_DEPTH - depth) {
// SAH splitting, when we have room to play
sah_split(objects, &bounder)
} else {
// Balanced splitting, when we don't have room to play
median_split(objects, &bounder)
};
// Create child nodes
let (c1_index, c1_bounds) = self.recursive_build(offset,
depth + 1,
objects_per_leaf,
&mut objects[..split_index],
bounder);
let (c2_index, c2_bounds) = self.recursive_build(offset + split_index,
depth + 1,
objects_per_leaf,
&mut objects[split_index..],
bounder);
// Determine bounds
// TODO: do merging without the temporary vec.
let bi = self.bounds.len();
let mut merged = Vec::new();
merge_slices_append(&self.bounds[c1_bounds.0..c1_bounds.1],
&self.bounds[c2_bounds.0..c2_bounds.1],
&mut merged,
|b1, b2| *b1 | *b2);
self.bounds.extend(merged.drain(0..));
// Set node
self.nodes[me] = BVHBaseNode::Internal {
bounds_range: (bi, self.bounds.len()),
children_indices: (c1_index, c2_index),
split_axis: split_axis as u8,
};
return (me, (bi, self.bounds.len()));
}
}
}

View File

@ -1,3 +1,4 @@
mod bvh_base;
mod bvh;
mod light_array;
mod light_tree;

View File

@ -12,6 +12,9 @@ extern crate time;
#[macro_use]
extern crate nom;
#[macro_use]
extern crate lazy_static;
#[cfg(feature = "simd_perf")]
extern crate simd;