Reformat code with latest rustfmt and custom config.

This commit is contained in:
Nathan Vegdahl 2017-05-14 13:43:51 -07:00
parent 993ba719d7
commit 922e33ec3f
42 changed files with 21399 additions and 2189 deletions

6
rustfmt.toml Normal file
View File

@ -0,0 +1,6 @@
max_width = 1024
error_on_line_overflow = false
array_layout = "Block"
chain_indent = "Block"
fn_args_layout = "Block"
fn_call_style = "Block"

View File

@ -38,11 +38,7 @@ pub enum BVHNode<'a> {
} }
impl<'a> BVH<'a> { impl<'a> BVH<'a> {
pub fn from_objects<'b, T, F>(arena: &'a MemArena, pub fn from_objects<'b, T, F>(arena: &'a MemArena, objects: &mut [T], objects_per_leaf: usize, bounder: F) -> BVH<'a>
objects: &mut [T],
objects_per_leaf: usize,
bounder: F)
-> BVH<'a>
where F: 'b + Fn(&T) -> &'b [BBox] where F: 'b + Fn(&T) -> &'b [BBox]
{ {
if objects.len() == 0 { if objects.len() == 0 {
@ -74,8 +70,11 @@ impl<'a> BVH<'a> {
let mut timer = Timer::new(); let mut timer = Timer::new();
let mut trav_time: f64 = 0.0; let mut trav_time: f64 = 0.0;
let ray_sign = let ray_sign = [
[rays[0].dir_inv.x() >= 0.0, rays[0].dir_inv.y() >= 0.0, rays[0].dir_inv.z() >= 0.0]; rays[0].dir_inv.x() >= 0.0,
rays[0].dir_inv.y() >= 0.0,
rays[0].dir_inv.z() >= 0.0,
];
// +2 of max depth for root and last child // +2 of max depth for root and last child
let mut node_stack = [self.root.unwrap(); BVH_MAX_DEPTH + 2]; let mut node_stack = [self.root.unwrap(); BVH_MAX_DEPTH + 2];
@ -84,12 +83,17 @@ impl<'a> BVH<'a> {
while stack_ptr > 0 { while stack_ptr > 0 {
match node_stack[stack_ptr] { match node_stack[stack_ptr] {
&BVHNode::Internal { children, bounds_start, bounds_len, split_axis } => { &BVHNode::Internal {
let bounds = children,
unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) }; bounds_start,
let part = partition(&mut rays[..ray_i_stack[stack_ptr]], |r| { bounds_len,
(!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r) split_axis,
}); } => {
let bounds = unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) };
let part = partition(
&mut rays[..ray_i_stack[stack_ptr]],
|r| (!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r),
);
if part > 0 { if part > 0 {
ray_i_stack[stack_ptr] = part; ray_i_stack[stack_ptr] = part;
ray_i_stack[stack_ptr + 1] = part; ray_i_stack[stack_ptr + 1] = part;
@ -106,12 +110,16 @@ impl<'a> BVH<'a> {
} }
} }
&BVHNode::Leaf { object_range, bounds_start, bounds_len } => { &BVHNode::Leaf {
let bounds = object_range,
unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) }; bounds_start,
let part = partition(&mut rays[..ray_i_stack[stack_ptr]], |r| { bounds_len,
(!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r) } => {
}); let bounds = unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) };
let part = partition(
&mut rays[..ray_i_stack[stack_ptr]],
|r| (!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r),
);
trav_time += timer.tick() as f64; trav_time += timer.tick() as f64;
@ -129,23 +137,24 @@ impl<'a> BVH<'a> {
} }
trav_time += timer.tick() as f64; trav_time += timer.tick() as f64;
ACCEL_TRAV_TIME.with(|att| { ACCEL_TRAV_TIME.with(
|att| {
let v = att.get(); let v = att.get();
att.set(v + trav_time); att.set(v + trav_time);
}); }
);
} }
fn construct_from_base(arena: &'a MemArena, fn construct_from_base(arena: &'a MemArena, base: &BVHBase, node_index: usize) -> &'a mut BVHNode<'a> {
base: &BVHBase,
node_index: usize)
-> &'a mut BVHNode<'a> {
match &base.nodes[node_index] { match &base.nodes[node_index] {
&BVHBaseNode::Internal { bounds_range, children_indices, split_axis } => { &BVHBaseNode::Internal {
bounds_range,
children_indices,
split_axis,
} => {
let mut node = unsafe { arena.alloc_uninitialized_with_alignment::<BVHNode>(32) }; let mut node = unsafe { arena.alloc_uninitialized_with_alignment::<BVHNode>(32) };
let bounds = let bounds = arena.copy_slice_with_alignment(&base.bounds[bounds_range.0..bounds_range.1], 32);
arena.copy_slice_with_alignment(&base.bounds[bounds_range.0..bounds_range.1],
32);
let child1 = BVH::construct_from_base(arena, base, children_indices.0); let child1 = BVH::construct_from_base(arena, base, children_indices.0);
let child2 = BVH::construct_from_base(arena, base, children_indices.1); let child2 = BVH::construct_from_base(arena, base, children_indices.1);
@ -159,7 +168,10 @@ impl<'a> BVH<'a> {
return node; return node;
} }
&BVHBaseNode::Leaf { bounds_range, object_range } => { &BVHBaseNode::Leaf {
bounds_range,
object_range,
} => {
let mut node = unsafe { arena.alloc_uninitialized::<BVHNode>() }; let mut node = unsafe { arena.alloc_uninitialized::<BVHNode>() };
let bounds = arena.copy_slice(&base.bounds[bounds_range.0..bounds_range.1]); let bounds = arena.copy_slice(&base.bounds[bounds_range.0..bounds_range.1]);
@ -185,13 +197,17 @@ impl<'a> Boundable for BVH<'a> {
None => &DEGENERATE_BOUNDS[..], None => &DEGENERATE_BOUNDS[..],
Some(root) => { Some(root) => {
match root { match root {
&BVHNode::Internal { bounds_start, bounds_len, .. } => { &BVHNode::Internal {
unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) } bounds_start,
} bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
&BVHNode::Leaf { bounds_start, bounds_len, .. } => { &BVHNode::Leaf {
unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) } bounds_start,
} bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
} }
} }
} }

View File

@ -94,13 +94,7 @@ impl BVHBase {
} }
} }
fn recursive_build<'a, T, F>(&mut self, fn recursive_build<'a, T, F>(&mut self, offset: usize, depth: usize, objects_per_leaf: usize, objects: &mut [T], bounder: &F) -> (usize, (usize, usize))
offset: usize,
depth: usize,
objects_per_leaf: usize,
objects: &mut [T],
bounder: &F)
-> (usize, (usize, usize))
where F: 'a + Fn(&T) -> &'a [BBox] where F: 'a + Fn(&T) -> &'a [BBox]
{ {
let me = self.nodes.len(); let me = self.nodes.len();
@ -115,10 +109,12 @@ impl BVHBase {
// We make sure that it's worth having multiple time samples, and if not // We make sure that it's worth having multiple time samples, and if not
// we reduce to the union of the time samples. // we reduce to the union of the time samples.
self.acc_bounds(objects, bounder); self.acc_bounds(objects, bounder);
let union_bounds = self.bounds_cache.iter().fold(BBox::new(), |b1, b2| (b1 | *b2)); let union_bounds = self.bounds_cache
let average_area = .iter()
self.bounds_cache.iter().fold(0.0, |area, bb| area + bb.surface_area()) / .fold(BBox::new(), |b1, b2| (b1 | *b2));
self.bounds_cache.len() as f32; let average_area = self.bounds_cache
.iter()
.fold(0.0, |area, bb| area + bb.surface_area()) / self.bounds_cache.len() as f32;
if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) { if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) {
self.bounds.push(union_bounds); self.bounds.push(union_bounds);
} else { } else {
@ -127,10 +123,13 @@ impl BVHBase {
} }
// Create node // Create node
self.nodes.push(BVHBaseNode::Leaf { self.nodes
.push(
BVHBaseNode::Leaf {
bounds_range: (bi, self.bounds.len()), bounds_range: (bi, self.bounds.len()),
object_range: (offset, offset + objects.len()), object_range: (offset, offset + objects.len()),
}); }
);
if self.depth < depth { if self.depth < depth {
self.depth = depth; self.depth = depth;
@ -139,18 +138,20 @@ impl BVHBase {
return (me, (bi, self.bounds.len())); return (me, (bi, self.bounds.len()));
} else { } else {
// Not a leaf node // Not a leaf node
self.nodes.push(BVHBaseNode::Internal { self.nodes
.push(
BVHBaseNode::Internal {
bounds_range: (0, 0), bounds_range: (0, 0),
children_indices: (0, 0), children_indices: (0, 0),
split_axis: 0, split_axis: 0,
}); }
);
// Partition objects. // Partition objects.
// If we're too near the max depth, we do balanced building to // If we're too near the max depth, we do balanced building to
// avoid exceeding max depth. // avoid exceeding max depth.
// Otherwise we do SAH splitting to build better trees. // Otherwise we do SAH splitting to build better trees.
let (split_index, split_axis) = if (log2_64(objects.len() as u64) as usize) < let (split_index, split_axis) = if (log2_64(objects.len() as u64) as usize) < (BVH_MAX_DEPTH - depth) {
(BVH_MAX_DEPTH - depth) {
// SAH splitting, when we have room to play // SAH splitting, when we have room to play
sah_split(objects, &bounder) sah_split(objects, &bounder)
} else { } else {
@ -159,31 +160,36 @@ impl BVHBase {
}; };
// Create child nodes // Create child nodes
let (c1_index, c1_bounds) = self.recursive_build(offset, let (c1_index, c1_bounds) = self.recursive_build(
offset,
depth + 1, depth + 1,
objects_per_leaf, objects_per_leaf,
&mut objects[..split_index], &mut objects[..split_index],
bounder); bounder,
let (c2_index, c2_bounds) = self.recursive_build(offset + split_index, );
let (c2_index, c2_bounds) = self.recursive_build(
offset + split_index,
depth + 1, depth + 1,
objects_per_leaf, objects_per_leaf,
&mut objects[split_index..], &mut objects[split_index..],
bounder); bounder,
);
// Determine bounds // Determine bounds
// TODO: do merging without the temporary vec. // TODO: do merging without the temporary vec.
let bi = self.bounds.len(); let bi = self.bounds.len();
{ {
let mut merged = Vec::new(); let mut merged = Vec::new();
merge_slices_append(&self.bounds[c1_bounds.0..c1_bounds.1], merge_slices_append(
&self.bounds[c1_bounds.0..c1_bounds.1],
&self.bounds[c2_bounds.0..c2_bounds.1], &self.bounds[c2_bounds.0..c2_bounds.1],
&mut merged, &mut merged,
|b1, b2| *b1 | *b2); |b1, b2| *b1 | *b2,
);
// We make sure that it's worth having multiple time samples, and if not // We make sure that it's worth having multiple time samples, and if not
// we reduce to the union of the time samples. // we reduce to the union of the time samples.
let union_bounds = merged.iter().fold(BBox::new(), |b1, b2| (b1 | *b2)); let union_bounds = merged.iter().fold(BBox::new(), |b1, b2| (b1 | *b2));
let average_area = merged.iter().fold(0.0, |area, bb| area + bb.surface_area()) / let average_area = merged.iter().fold(0.0, |area, bb| area + bb.surface_area()) / merged.len() as f32;
merged.len() as f32;
if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) { if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) {
self.bounds.push(union_bounds); self.bounds.push(union_bounds);
} else { } else {

View File

@ -34,14 +34,7 @@ impl LightArray {
} }
impl LightAccel for LightArray { impl LightAccel for LightArray {
fn select(&self, fn select(&self, inc: Vector, pos: Point, nor: Normal, sc: &SurfaceClosure, time: f32, n: f32) -> Option<(usize, f32, f32)> {
inc: Vector,
pos: Point,
nor: Normal,
sc: &SurfaceClosure,
time: f32,
n: f32)
-> Option<(usize, f32, f32)> {
let _ = (inc, pos, nor, sc, time); // Not using these, silence warnings let _ = (inc, pos, nor, sc, time); // Not using these, silence warnings
assert!(n >= 0.0 && n <= 1.0); assert!(n >= 0.0 && n <= 1.0);

View File

@ -26,10 +26,7 @@ struct Node {
} }
impl<'a> LightTree<'a> { impl<'a> LightTree<'a> {
pub fn from_objects<'b, T, F>(arena: &'a MemArena, pub fn from_objects<'b, T, F>(arena: &'a MemArena, objects: &mut [T], info_getter: F) -> LightTree<'a>
objects: &mut [T],
info_getter: F)
-> LightTree<'a>
where F: 'b + Fn(&T) -> (&'b [BBox], f32) where F: 'b + Fn(&T) -> (&'b [BBox], f32)
{ {
let mut builder = LightTreeBuilder::new(); let mut builder = LightTreeBuilder::new();
@ -45,14 +42,7 @@ impl<'a> LightTree<'a> {
impl<'a> LightAccel for LightTree<'a> { impl<'a> LightAccel for LightTree<'a> {
fn select(&self, fn select(&self, inc: Vector, pos: Point, nor: Normal, sc: &SurfaceClosure, time: f32, n: f32) -> Option<(usize, f32, f32)> {
inc: Vector,
pos: Point,
nor: Normal,
sc: &SurfaceClosure,
time: f32,
n: f32)
-> Option<(usize, f32, f32)> {
if self.nodes.len() == 0 { if self.nodes.len() == 0 {
return None; return None;
} }
@ -151,12 +141,7 @@ impl LightTreeBuilder {
} }
} }
fn recursive_build<'a, T, F>(&mut self, fn recursive_build<'a, T, F>(&mut self, offset: usize, depth: usize, objects: &mut [T], info_getter: &F) -> (usize, (usize, usize))
offset: usize,
depth: usize,
objects: &mut [T],
info_getter: &F)
-> (usize, (usize, usize))
where F: 'a + Fn(&T) -> (&'a [BBox], f32) where F: 'a + Fn(&T) -> (&'a [BBox], f32)
{ {
let me_index = self.nodes.len(); let me_index = self.nodes.len();
@ -168,12 +153,15 @@ impl LightTreeBuilder {
let bi = self.bounds.len(); let bi = self.bounds.len();
let (obj_bounds, energy) = info_getter(&objects[0]); let (obj_bounds, energy) = info_getter(&objects[0]);
self.bounds.extend(obj_bounds); self.bounds.extend(obj_bounds);
self.nodes.push(Node { self.nodes
.push(
Node {
is_leaf: true, is_leaf: true,
bounds_range: (bi, self.bounds.len()), bounds_range: (bi, self.bounds.len()),
energy: energy, energy: energy,
child_index: offset, child_index: offset,
}); }
);
if self.depth < depth { if self.depth < depth {
self.depth = depth; self.depth = depth;
@ -182,32 +170,38 @@ impl LightTreeBuilder {
return (me_index, (bi, self.bounds.len())); return (me_index, (bi, self.bounds.len()));
} else { } else {
// Not a leaf node // Not a leaf node
self.nodes.push(Node { self.nodes
.push(
Node {
is_leaf: false, is_leaf: false,
bounds_range: (0, 0), bounds_range: (0, 0),
energy: 0.0, energy: 0.0,
child_index: 0, child_index: 0,
}); }
);
// Partition objects. // Partition objects.
let (split_index, _) = sah_split(objects, &|obj_ref| info_getter(obj_ref).0); let (split_index, _) = sah_split(objects, &|obj_ref| info_getter(obj_ref).0);
// Create child nodes // Create child nodes
let (_, c1_bounds) = let (_, c1_bounds) = self.recursive_build(offset, depth + 1, &mut objects[..split_index], info_getter);
self.recursive_build(offset, depth + 1, &mut objects[..split_index], info_getter); let (c2_index, c2_bounds) = self.recursive_build(
let (c2_index, c2_bounds) = self.recursive_build(offset + split_index, offset + split_index,
depth + 1, depth + 1,
&mut objects[split_index..], &mut objects[split_index..],
info_getter); info_getter,
);
// Determine bounds // Determine bounds
// TODO: do merging without the temporary vec. // TODO: do merging without the temporary vec.
let bi = self.bounds.len(); let bi = self.bounds.len();
let mut merged = Vec::new(); let mut merged = Vec::new();
merge_slices_append(&self.bounds[c1_bounds.0..c1_bounds.1], merge_slices_append(
&self.bounds[c1_bounds.0..c1_bounds.1],
&self.bounds[c2_bounds.0..c2_bounds.1], &self.bounds[c2_bounds.0..c2_bounds.1],
&mut merged, &mut merged,
|b1, b2| *b1 | *b2); |b1, b2| *b1 | *b2,
);
self.bounds.extend(merged.drain(0..)); self.bounds.extend(merged.drain(0..));
// Set node // Set node

View File

@ -19,14 +19,7 @@ thread_local! {
pub trait LightAccel { pub trait LightAccel {
/// Returns (index_of_light, selection_pdf, whittled_n) /// Returns (index_of_light, selection_pdf, whittled_n)
fn select(&self, fn select(&self, inc: Vector, pos: Point, nor: Normal, sc: &SurfaceClosure, time: f32, n: f32) -> Option<(usize, f32, f32)>;
inc: Vector,
pos: Point,
nor: Normal,
sc: &SurfaceClosure,
time: f32,
n: f32)
-> Option<(usize, f32, f32)>;
fn approximate_energy(&self) -> f32; fn approximate_energy(&self) -> f32;
} }

View File

@ -65,8 +65,7 @@ pub fn free_sah_split<'a, T, F>(seed: u32, objects: &mut [T], bounder: &F) -> (u
// Build SAH bins // Build SAH bins
let sah_bins = { let sah_bins = {
let mut sah_bins = [[(BBox::new(), BBox::new(), 0, 0); SAH_BIN_COUNT - 1]; let mut sah_bins = [[(BBox::new(), BBox::new(), 0, 0); SAH_BIN_COUNT - 1]; SPLIT_PLANE_COUNT];
SPLIT_PLANE_COUNT];
for obj in objects.iter() { for obj in objects.iter() {
let tb = lerp_slice(bounder(obj), 0.5); let tb = lerp_slice(bounder(obj), 0.5);
let centroid = tb.center().into_vector(); let centroid = tb.center().into_vector();
@ -132,11 +131,13 @@ pub fn free_sah_split<'a, T, F>(seed: u32, objects: &mut [T], bounder: &F) -> (u
}; };
// Partition // Partition
let mut split_i = partition(&mut objects[..], |obj| { let mut split_i = partition(
&mut objects[..], |obj| {
let centroid = lerp_slice(bounder(obj), 0.5).center().into_vector(); let centroid = lerp_slice(bounder(obj), 0.5).center().into_vector();
let dist = dot(centroid, plane); let dist = dot(centroid, plane);
dist < div dist < div
}); }
);
if split_i < 1 { if split_i < 1 {
split_i = 1; split_i = 1;
@ -223,11 +224,13 @@ pub fn sah_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, usize)
}; };
// Partition // Partition
let mut split_i = partition(&mut objects[..], |obj| { let mut split_i = partition(
&mut objects[..], |obj| {
let tb = lerp_slice(bounder(obj), 0.5); let tb = lerp_slice(bounder(obj), 0.5);
let centroid = (tb.min.get_n(split_axis) + tb.max.get_n(split_axis)) * 0.5; let centroid = (tb.min.get_n(split_axis) + tb.max.get_n(split_axis)) * 0.5;
centroid < div centroid < div
}); }
);
if split_i < 1 { if split_i < 1 {
split_i = 1; split_i = 1;
} else if split_i >= objects.len() { } else if split_i >= objects.len() {
@ -269,11 +272,13 @@ pub fn bounds_mean_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, us
let div = (bounds.min.get_n(split_axis) + bounds.max.get_n(split_axis)) * 0.5; let div = (bounds.min.get_n(split_axis) + bounds.max.get_n(split_axis)) * 0.5;
// Partition // Partition
let mut split_i = partition(&mut objects[..], |obj| { let mut split_i = partition(
&mut objects[..], |obj| {
let tb = lerp_slice(bounder(obj), 0.5); let tb = lerp_slice(bounder(obj), 0.5);
let centroid = (tb.min.get_n(split_axis) + tb.max.get_n(split_axis)) * 0.5; let centroid = (tb.min.get_n(split_axis) + tb.max.get_n(split_axis)) * 0.5;
centroid < div centroid < div
}); }
);
if split_i < 1 { if split_i < 1 {
split_i = 1; split_i = 1;
} else if split_i >= objects.len() { } else if split_i >= objects.len() {
@ -317,7 +322,8 @@ pub fn median_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, usize)
let place = objects.len() / 2; let place = objects.len() / 2;
if place > 0 { place } else { 1 } if place > 0 { place } else { 1 }
}; };
quick_select(objects, place, |a, b| { quick_select(
objects, place, |a, b| {
let tb_a = lerp_slice(bounder(a), 0.5); let tb_a = lerp_slice(bounder(a), 0.5);
let tb_b = lerp_slice(bounder(b), 0.5); let tb_b = lerp_slice(bounder(b), 0.5);
let centroid_a = (tb_a.min.get_n(split_axis) + tb_a.max.get_n(split_axis)) * 0.5; let centroid_a = (tb_a.min.get_n(split_axis) + tb_a.max.get_n(split_axis)) * 0.5;
@ -330,7 +336,8 @@ pub fn median_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, usize)
} else { } else {
Ordering::Greater Ordering::Greater
} }
}); }
);
(place, split_axis) (place, split_axis)
} }

View File

@ -159,9 +159,11 @@ pub fn partition_pair<A, B, F>(slc1: &mut [A], slc2: &mut [B], mut pred: F) -> u
if a1 == b1 { if a1 == b1 {
return ((a1 as usize) - start) / std::mem::size_of::<A>(); return ((a1 as usize) - start) / std::mem::size_of::<A>();
} }
if !pred(((a1 as usize) - start) / std::mem::size_of::<A>(), if !pred(
((a1 as usize) - start) / std::mem::size_of::<A>(),
&mut *a1, &mut *a1,
&mut *a2) { &mut *a2,
) {
break; break;
} }
a1 = a1.offset(1); a1 = a1.offset(1);
@ -174,9 +176,11 @@ pub fn partition_pair<A, B, F>(slc1: &mut [A], slc2: &mut [B], mut pred: F) -> u
if a1 == b1 { if a1 == b1 {
return ((a1 as usize) - start) / std::mem::size_of::<A>(); return ((a1 as usize) - start) / std::mem::size_of::<A>();
} }
if pred(((b1 as usize) - start) / std::mem::size_of::<A>(), if pred(
((b1 as usize) - start) / std::mem::size_of::<A>(),
&mut *b1, &mut *b1,
&mut *b2) { &mut *b2,
) {
break; break;
} }
} }
@ -223,10 +227,7 @@ pub fn quick_select<T, F>(slc: &mut [T], n: usize, mut order: F)
} }
/// Merges two slices of things, appending the result to vec_out /// Merges two slices of things, appending the result to vec_out
pub fn merge_slices_append<T: Lerp + Copy, F>(slice1: &[T], pub fn merge_slices_append<T: Lerp + Copy, F>(slice1: &[T], slice2: &[T], vec_out: &mut Vec<T>, merge: F)
slice2: &[T],
vec_out: &mut Vec<T>,
merge: F)
where F: Fn(&T, &T) -> T where F: Fn(&T, &T) -> T
{ {
// Transform the bounding boxes // Transform the bounding boxes
@ -253,10 +254,7 @@ pub fn merge_slices_append<T: Lerp + Copy, F>(slice1: &[T],
/// Merges two slices of things, storing the result in slice_out. /// Merges two slices of things, storing the result in slice_out.
/// Panics if slice_out is not the right size. /// Panics if slice_out is not the right size.
pub fn merge_slices_to<T: Lerp + Copy, F>(slice1: &[T], pub fn merge_slices_to<T: Lerp + Copy, F>(slice1: &[T], slice2: &[T], slice_out: &mut [T], merge: F)
slice2: &[T],
slice_out: &mut [T],
merge: F)
where F: Fn(&T, &T) -> T where F: Fn(&T, &T) -> T
{ {
assert!(slice_out.len() == cmp::max(slice1.len(), slice2.len())); assert!(slice_out.len() == cmp::max(slice1.len(), slice2.len()));
@ -266,8 +264,10 @@ pub fn merge_slices_to<T: Lerp + Copy, F>(slice1: &[T],
return; return;
} else if slice1.len() == slice2.len() { } else if slice1.len() == slice2.len() {
for (xfo, (xf1, xf2)) in for (xfo, (xf1, xf2)) in
Iterator::zip(slice_out.iter_mut(), Iterator::zip(
Iterator::zip(slice1.iter(), slice2.iter())) { slice_out.iter_mut(),
Iterator::zip(slice1.iter(), slice2.iter()),
) {
*xfo = merge(xf1, xf2); *xfo = merge(xf1, xf2);
} }
} else if slice1.len() > slice2.len() { } else if slice1.len() > slice2.len() {
@ -291,13 +291,15 @@ mod tests {
use super::*; use super::*;
fn quick_select_ints(list: &mut [i32], i: usize) { fn quick_select_ints(list: &mut [i32], i: usize) {
quick_select(list, i, |a, b| if a < b { quick_select(
list, i, |a, b| if a < b {
Ordering::Less Ordering::Less
} else if a == b { } else if a == b {
Ordering::Equal Ordering::Equal
} else { } else {
Ordering::Greater Ordering::Greater
}); }
);
} }
#[test] #[test]

View File

@ -23,19 +23,18 @@ impl BBox {
pub fn new() -> BBox { pub fn new() -> BBox {
BBox { BBox {
min: Point::new(std::f32::INFINITY, std::f32::INFINITY, std::f32::INFINITY), min: Point::new(std::f32::INFINITY, std::f32::INFINITY, std::f32::INFINITY),
max: Point::new(std::f32::NEG_INFINITY, max: Point::new(
std::f32::NEG_INFINITY, std::f32::NEG_INFINITY,
std::f32::NEG_INFINITY), std::f32::NEG_INFINITY,
std::f32::NEG_INFINITY,
),
} }
} }
/// Creates a BBox with min as the minimum extent and max as the maximum /// Creates a BBox with min as the minimum extent and max as the maximum
/// extent. /// extent.
pub fn from_points(min: Point, max: Point) -> BBox { pub fn from_points(min: Point, max: Point) -> BBox {
BBox { BBox { min: min, max: max }
min: min,
max: max,
}
} }
// Returns whether the given ray intersects with the bbox. // Returns whether the given ray intersects with the bbox.
@ -59,14 +58,16 @@ impl BBox {
// Creates a new BBox transformed into a different space. // Creates a new BBox transformed into a different space.
pub fn transformed(&self, xform: Matrix4x4) -> BBox { pub fn transformed(&self, xform: Matrix4x4) -> BBox {
// BBox corners // BBox corners
let vs = [Point::new(self.min.x(), self.min.y(), self.min.z()), let vs = [
Point::new(self.min.x(), self.min.y(), self.min.z()),
Point::new(self.min.x(), self.min.y(), self.max.z()), Point::new(self.min.x(), self.min.y(), self.max.z()),
Point::new(self.min.x(), self.max.y(), self.min.z()), Point::new(self.min.x(), self.max.y(), self.min.z()),
Point::new(self.min.x(), self.max.y(), self.max.z()), Point::new(self.min.x(), self.max.y(), self.max.z()),
Point::new(self.max.x(), self.min.y(), self.min.z()), Point::new(self.max.x(), self.min.y(), self.min.z()),
Point::new(self.max.x(), self.min.y(), self.max.z()), Point::new(self.max.x(), self.min.y(), self.max.z()),
Point::new(self.max.x(), self.max.y(), self.min.z()), Point::new(self.max.x(), self.max.y(), self.min.z()),
Point::new(self.max.x(), self.max.y(), self.max.z())]; Point::new(self.max.x(), self.max.y(), self.max.z()),
];
// Transform BBox corners and make new bbox // Transform BBox corners and make new bbox
let mut b = BBox::new(); let mut b = BBox::new();
@ -99,8 +100,10 @@ impl BitOr for BBox {
type Output = BBox; type Output = BBox;
fn bitor(self, rhs: BBox) -> BBox { fn bitor(self, rhs: BBox) -> BBox {
BBox::from_points(Point { co: self.min.co.v_min(rhs.min.co) }, BBox::from_points(
Point { co: self.max.co.v_max(rhs.max.co) }) Point { co: self.min.co.v_min(rhs.min.co) },
Point { co: self.max.co.v_max(rhs.max.co) },
)
} }
} }
@ -115,8 +118,10 @@ impl BitOr<Point> for BBox {
type Output = BBox; type Output = BBox;
fn bitor(self, rhs: Point) -> BBox { fn bitor(self, rhs: Point) -> BBox {
BBox::from_points(Point { co: self.min.co.v_min(rhs.co) }, BBox::from_points(
Point { co: self.max.co.v_max(rhs.co) }) Point { co: self.min.co.v_min(rhs.co) },
Point { co: self.max.co.v_max(rhs.co) },
)
} }
} }

View File

@ -18,12 +18,7 @@ pub struct Camera<'a> {
} }
impl<'a> Camera<'a> { impl<'a> Camera<'a> {
pub fn new(arena: &'a MemArena, pub fn new(arena: &'a MemArena, transforms: Vec<Matrix4x4>, fovs: Vec<f32>, mut aperture_radii: Vec<f32>, mut focus_distances: Vec<f32>) -> Camera<'a> {
transforms: Vec<Matrix4x4>,
fovs: Vec<f32>,
mut aperture_radii: Vec<f32>,
mut focus_distances: Vec<f32>)
-> Camera<'a> {
assert!(transforms.len() != 0, "Camera has no transform(s)!"); assert!(transforms.len() != 0, "Camera has no transform(s)!");
assert!(fovs.len() != 0, "Camera has no fov(s)!"); assert!(fovs.len() != 0, "Camera has no fov(s)!");
@ -33,11 +28,15 @@ impl<'a> Camera<'a> {
focus_distances = vec![1.0]; focus_distances = vec![1.0];
if aperture_radii.len() == 0 && focus_distances.len() != 0 { if aperture_radii.len() == 0 && focus_distances.len() != 0 {
println!("WARNING: camera has aperture radius but no focus distance. Disabling \ println!(
focal blur."); "WARNING: camera has aperture radius but no focus distance. Disabling \
focal blur."
);
} else if aperture_radii.len() != 0 && focus_distances.len() == 0 { } else if aperture_radii.len() != 0 && focus_distances.len() == 0 {
println!("WARNING: camera has focus distance but no aperture radius. Disabling \ println!(
focal blur."); "WARNING: camera has focus distance but no aperture radius. Disabling \
focal blur."
);
} }
} }
@ -51,7 +50,9 @@ impl<'a> Camera<'a> {
} }
// Convert angle fov into linear fov. // Convert angle fov into linear fov.
let tfovs: Vec<f32> = fovs.iter().map(|n| (n / 2.0).sin() / (n / 2.0).cos()).collect(); let tfovs: Vec<f32> = fovs.iter()
.map(|n| (n / 2.0).sin() / (n / 2.0).cos())
.collect();
Camera { Camera {
transforms: arena.copy_slice(&transforms), transforms: arena.copy_slice(&transforms),
@ -76,9 +77,11 @@ impl<'a> Camera<'a> {
}; };
// Ray direction // Ray direction
let dir = Vector::new((x * tfov) - (orig.x() / focus_distance), let dir = Vector::new(
(x * tfov) - (orig.x() / focus_distance),
(y * tfov) - (orig.y() / focus_distance), (y * tfov) - (orig.y() / focus_distance),
1.0) 1.0,
)
.normalized(); .normalized();
Ray::new(orig * transform, dir * transform, time, false) Ray::new(orig * transform, dir * transform, time, false)

View File

@ -22,10 +22,12 @@ pub trait Color {
fn to_spectral_sample(&self, hero_wavelength: f32) -> SpectralSample { fn to_spectral_sample(&self, hero_wavelength: f32) -> SpectralSample {
SpectralSample { SpectralSample {
e: Float4::new(self.sample_spectrum(nth_wavelength(hero_wavelength, 0)), e: Float4::new(
self.sample_spectrum(nth_wavelength(hero_wavelength, 0)),
self.sample_spectrum(nth_wavelength(hero_wavelength, 1)), self.sample_spectrum(nth_wavelength(hero_wavelength, 1)),
self.sample_spectrum(nth_wavelength(hero_wavelength, 2)), self.sample_spectrum(nth_wavelength(hero_wavelength, 2)),
self.sample_spectrum(nth_wavelength(hero_wavelength, 3))), self.sample_spectrum(nth_wavelength(hero_wavelength, 3)),
),
hero_wavelength: hero_wavelength, hero_wavelength: hero_wavelength,
} }
@ -260,17 +262,13 @@ impl DivAssign<f32> for XYZ {
/// colorspace cannot represent all colors in the XYZ colorspace. /// colorspace cannot represent all colors in the XYZ colorspace.
#[allow(dead_code)] #[allow(dead_code)]
pub fn xyz_to_rec709(xyz: (f32, f32, f32)) -> (f32, f32, f32) { pub fn xyz_to_rec709(xyz: (f32, f32, f32)) -> (f32, f32, f32) {
((xyz.0 * 3.2404542) + (xyz.1 * -1.5371385) + (xyz.2 * -0.4985314), ((xyz.0 * 3.2404542) + (xyz.1 * -1.5371385) + (xyz.2 * -0.4985314), (xyz.0 * -0.9692660) + (xyz.1 * 1.8760108) + (xyz.2 * 0.0415560), (xyz.0 * 0.0556434) + (xyz.1 * -0.2040259) + (xyz.2 * 1.0572252))
(xyz.0 * -0.9692660) + (xyz.1 * 1.8760108) + (xyz.2 * 0.0415560),
(xyz.0 * 0.0556434) + (xyz.1 * -0.2040259) + (xyz.2 * 1.0572252))
} }
/// Converts a color in Rec.709 colorspace to XYZ colorspace. /// Converts a color in Rec.709 colorspace to XYZ colorspace.
#[allow(dead_code)] #[allow(dead_code)]
pub fn rec709_to_xyz(rec: (f32, f32, f32)) -> (f32, f32, f32) { pub fn rec709_to_xyz(rec: (f32, f32, f32)) -> (f32, f32, f32) {
((rec.0 * 0.4124564) + (rec.1 * 0.3575761) + (rec.2 * 0.1804375), ((rec.0 * 0.4124564) + (rec.1 * 0.3575761) + (rec.2 * 0.1804375), (rec.0 * 0.2126729) + (rec.1 * 0.7151522) + (rec.2 * 0.0721750), (rec.0 * 0.0193339) + (rec.1 * 0.1191920) + (rec.2 * 0.9503041))
(rec.0 * 0.2126729) + (rec.1 * 0.7151522) + (rec.2 * 0.0721750),
(rec.0 * 0.0193339) + (rec.1 * 0.1191920) + (rec.2 * 0.9503041))
} }
/// Converts a color in XYZ colorspace to an adjusted Rec.709 colorspace /// Converts a color in XYZ colorspace to an adjusted Rec.709 colorspace
@ -278,18 +276,14 @@ pub fn rec709_to_xyz(rec: (f32, f32, f32)) -> (f32, f32, f32) {
/// Note: this is lossy, as negative resulting values are clamped to zero. /// Note: this is lossy, as negative resulting values are clamped to zero.
#[allow(dead_code)] #[allow(dead_code)]
pub fn xyz_to_rec709e(xyz: (f32, f32, f32)) -> (f32, f32, f32) { pub fn xyz_to_rec709e(xyz: (f32, f32, f32)) -> (f32, f32, f32) {
((xyz.0 * 3.0799600) + (xyz.1 * -1.5371400) + (xyz.2 * -0.5428160), ((xyz.0 * 3.0799600) + (xyz.1 * -1.5371400) + (xyz.2 * -0.5428160), (xyz.0 * -0.9212590) + (xyz.1 * 1.8760100) + (xyz.2 * 0.0452475), (xyz.0 * 0.0528874) + (xyz.1 * -0.2040260) + (xyz.2 * 1.1511400))
(xyz.0 * -0.9212590) + (xyz.1 * 1.8760100) + (xyz.2 * 0.0452475),
(xyz.0 * 0.0528874) + (xyz.1 * -0.2040260) + (xyz.2 * 1.1511400))
} }
/// Converts a color in an adjusted Rec.709 colorspace with whitepoint E to /// Converts a color in an adjusted Rec.709 colorspace with whitepoint E to
/// XYZ colorspace. /// XYZ colorspace.
#[allow(dead_code)] #[allow(dead_code)]
pub fn rec709e_to_xyz(rec: (f32, f32, f32)) -> (f32, f32, f32) { pub fn rec709e_to_xyz(rec: (f32, f32, f32)) -> (f32, f32, f32) {
((rec.0 * 0.4339499) + (rec.1 * 0.3762098) + (rec.2 * 0.1898403), ((rec.0 * 0.4339499) + (rec.1 * 0.3762098) + (rec.2 * 0.1898403), (rec.0 * 0.2126729) + (rec.1 * 0.7151522) + (rec.2 * 0.0721750), (rec.0 * 0.0177566) + (rec.1 * 0.1094680) + (rec.2 * 0.8727755))
(rec.0 * 0.2126729) + (rec.1 * 0.7151522) + (rec.2 * 0.0721750),
(rec.0 * 0.0177566) + (rec.1 * 0.1094680) + (rec.2 * 0.8727755))
} }
@ -309,8 +303,7 @@ fn x_1931(wavelength: f32) -> f32 {
let t1 = (wavelength - 442.0) * (if wavelength < 442.0 { 0.0624 } else { 0.0374 }); let t1 = (wavelength - 442.0) * (if wavelength < 442.0 { 0.0624 } else { 0.0374 });
let t2 = (wavelength - 599.8) * (if wavelength < 599.8 { 0.0264 } else { 0.0323 }); let t2 = (wavelength - 599.8) * (if wavelength < 599.8 { 0.0264 } else { 0.0323 });
let t3 = (wavelength - 501.1) * (if wavelength < 501.1 { 0.0490 } else { 0.0382 }); let t3 = (wavelength - 501.1) * (if wavelength < 501.1 { 0.0490 } else { 0.0382 });
(0.362 * faster_exp(-0.5 * t1 * t1)) + (1.056 * faster_exp(-0.5 * t2 * t2)) - (0.362 * faster_exp(-0.5 * t1 * t1)) + (1.056 * faster_exp(-0.5 * t2 * t2)) - (0.065 * faster_exp(-0.5 * t3 * t3))
(0.065 * faster_exp(-0.5 * t3 * t3))
} }
#[allow(dead_code)] #[allow(dead_code)]

View File

@ -142,12 +142,14 @@ impl Image {
} }
// Write file // Write file
if let Err(_) = lodepng::encode_file(path, if let Err(_) = lodepng::encode_file(
path,
&image, &image,
self.res.0, self.res.0,
self.res.1, self.res.1,
lodepng::ColorType::LCT_RGB, lodepng::ColorType::LCT_RGB,
8) { 8,
) {
panic!("Couldn't write PNG file."); panic!("Couldn't write PNG file.");
} }
@ -222,10 +224,9 @@ impl<'a> Drop for Bucket<'a> {
let mut bucket_list = tmp.borrow_mut(); let mut bucket_list = tmp.borrow_mut();
// Find matching bucket and remove it // Find matching bucket and remove it
let i = bucket_list.iter().position(|bucket| { let i = bucket_list
(bucket.0).0 == self.min.0 && (bucket.0).1 == self.min.1 && .iter()
(bucket.1).0 == self.max.0 && (bucket.1).1 == self.max.1 .position(|bucket| (bucket.0).0 == self.min.0 && (bucket.0).1 == self.min.1 && (bucket.1).0 == self.max.0 && (bucket.1).1 == self.max.1);
});
bucket_list.swap_remove(i.unwrap()); bucket_list.swap_remove(i.unwrap());
} }
} }

View File

@ -21,7 +21,9 @@ pub fn lerp<T: Lerp>(a: T, b: T, alpha: f32) -> T {
/// Interpolates a slice of data as if each adjecent pair of elements /// Interpolates a slice of data as if each adjecent pair of elements
/// represent a linear segment. /// represent a linear segment.
pub fn lerp_slice<T: Lerp + Copy>(s: &[T], alpha: f32) -> T { pub fn lerp_slice<T: Lerp + Copy>(s: &[T], alpha: f32) -> T {
debug_assert!(s.len() > 0); debug_assert!(
s.len() > 0,
);
debug_assert!(alpha >= 0.0); debug_assert!(alpha >= 0.0);
debug_assert!(alpha <= 1.0); debug_assert!(alpha <= 1.0);
@ -41,7 +43,9 @@ pub fn lerp_slice_with<T, F>(s: &[T], alpha: f32, f: F) -> T
where T: Copy, where T: Copy,
F: Fn(T, T, f32) -> T F: Fn(T, T, f32) -> T
{ {
debug_assert!(s.len() > 0); debug_assert!(
s.len() > 0,
);
debug_assert!(alpha >= 0.0); debug_assert!(alpha >= 0.0);
debug_assert!(alpha <= 1.0); debug_assert!(alpha <= 1.0);
@ -86,10 +90,12 @@ impl Lerp for Matrix4x4 {
fn lerp(self, other: Matrix4x4, alpha: f32) -> Matrix4x4 { fn lerp(self, other: Matrix4x4, alpha: f32) -> Matrix4x4 {
let alpha_minus = 1.0 - alpha; let alpha_minus = 1.0 - alpha;
Matrix4x4 { Matrix4x4 {
values: [(self[0] * alpha_minus) + (other[0] * alpha), values: [
(self[0] * alpha_minus) + (other[0] * alpha),
(self[1] * alpha_minus) + (other[1] * alpha), (self[1] * alpha_minus) + (other[1] * alpha),
(self[2] * alpha_minus) + (other[2] * alpha), (self[2] * alpha_minus) + (other[2] * alpha),
(self[3] * alpha_minus) + (other[3] * alpha)], (self[3] * alpha_minus) + (other[3] * alpha),
],
} }
} }
} }
@ -198,7 +204,8 @@ mod tests {
#[test] #[test]
fn lerp_matrix() { fn lerp_matrix() {
let a = Matrix4x4::new_from_values(0.0, let a = Matrix4x4::new_from_values(
0.0,
2.0, 2.0,
2.0, 2.0,
3.0, 3.0,
@ -213,8 +220,10 @@ mod tests {
12.0, 12.0,
13.0, 13.0,
14.0, 14.0,
15.0); 15.0,
let b = Matrix4x4::new_from_values(-1.0, );
let b = Matrix4x4::new_from_values(
-1.0,
1.0, 1.0,
3.0, 3.0,
4.0, 4.0,
@ -229,9 +238,11 @@ mod tests {
13.0, 13.0,
14.0, 14.0,
15.0, 15.0,
16.0); 16.0,
);
let c1 = Matrix4x4::new_from_values(-0.25, let c1 = Matrix4x4::new_from_values(
-0.25,
1.75, 1.75,
2.25, 2.25,
3.25, 3.25,
@ -246,8 +257,10 @@ mod tests {
12.25, 12.25,
13.25, 13.25,
14.25, 14.25,
15.25); 15.25,
let c2 = Matrix4x4::new_from_values(-0.5, );
let c2 = Matrix4x4::new_from_values(
-0.5,
1.5, 1.5,
2.5, 2.5,
3.5, 3.5,
@ -262,8 +275,10 @@ mod tests {
12.5, 12.5,
13.5, 13.5,
14.5, 14.5,
15.5); 15.5,
let c3 = Matrix4x4::new_from_values(-0.75, );
let c3 = Matrix4x4::new_from_values(
-0.75,
1.25, 1.25,
2.75, 2.75,
3.75, 3.75,
@ -278,7 +293,8 @@ mod tests {
12.75, 12.75,
13.75, 13.75,
14.75, 14.75,
15.75); 15.75,
);
assert_eq!(a.lerp(b, 0.0), a); assert_eq!(a.lerp(b, 0.0), a);
assert_eq!(a.lerp(b, 0.25), c1); assert_eq!(a.lerp(b, 0.25), c1);

View File

@ -19,11 +19,7 @@ pub struct DistantDiskLight<'a> {
} }
impl<'a> DistantDiskLight<'a> { impl<'a> DistantDiskLight<'a> {
pub fn new(arena: &'a MemArena, pub fn new(arena: &'a MemArena, radii: Vec<f32>, directions: Vec<Vector>, colors: Vec<XYZ>) -> DistantDiskLight<'a> {
radii: Vec<f32>,
directions: Vec<Vector>,
colors: Vec<XYZ>)
-> DistantDiskLight<'a> {
DistantDiskLight { DistantDiskLight {
radii: arena.copy_slice(&radii), radii: arena.copy_slice(&radii),
directions: arena.copy_slice(&directions), directions: arena.copy_slice(&directions),
@ -79,8 +75,9 @@ impl<'a> WorldLightSource for DistantDiskLight<'a> {
} }
fn approximate_energy(&self) -> f32 { fn approximate_energy(&self) -> f32 {
let color: XYZ = self.colors.iter().fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b) / let color: XYZ = self.colors
self.colors.len() as f32; .iter()
.fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b) / self.colors.len() as f32;
color.y color.y
} }
} }

View File

@ -26,14 +26,7 @@ pub trait LightSource: Boundable + Debug + Sync {
/// ///
/// Returns: The light arriving at the point arr, the vector to use for /// Returns: The light arriving at the point arr, the vector to use for
/// shadow testing, and the pdf of the sample. /// shadow testing, and the pdf of the sample.
fn sample(&self, fn sample(&self, space: &Matrix4x4, arr: Point, u: f32, v: f32, wavelength: f32, time: f32) -> (SpectralSample, Vector, f32);
space: &Matrix4x4,
arr: Point,
u: f32,
v: f32,
wavelength: f32,
time: f32)
-> (SpectralSample, Vector, f32);
/// Calculates the pdf of sampling the given /// Calculates the pdf of sampling the given
@ -44,15 +37,7 @@ pub trait LightSource: Boundable + Debug + Sync {
/// are a valid sample for the light source (i.e. hits/lies on the light /// are a valid sample for the light source (i.e. hits/lies on the light
/// source). No guarantees are made about the correctness of the return /// source). No guarantees are made about the correctness of the return
/// value if they are not valid. /// value if they are not valid.
fn sample_pdf(&self, fn sample_pdf(&self, space: &Matrix4x4, arr: Point, sample_dir: Vector, sample_u: f32, sample_v: f32, wavelength: f32, time: f32) -> f32;
space: &Matrix4x4,
arr: Point,
sample_dir: Vector,
sample_u: f32,
sample_v: f32,
wavelength: f32,
time: f32)
-> f32;
/// Returns the color emitted in the given direction from the /// Returns the color emitted in the given direction from the
@ -63,14 +48,7 @@ pub trait LightSource: Boundable + Debug + Sync {
/// - v: Random parameter V. /// - v: Random parameter V.
/// - wavelength: The hero wavelength of light to sample at. /// - wavelength: The hero wavelength of light to sample at.
/// - time: The time to sample at. /// - time: The time to sample at.
fn outgoing(&self, fn outgoing(&self, space: &Matrix4x4, dir: Vector, u: f32, v: f32, wavelength: f32, time: f32) -> SpectralSample;
space: &Matrix4x4,
dir: Vector,
u: f32,
v: f32,
wavelength: f32,
time: f32)
-> SpectralSample;
/// Returns whether the light has a delta distribution. /// Returns whether the light has a delta distribution.

View File

@ -18,17 +18,17 @@ pub struct RectangleLight<'a> {
} }
impl<'a> RectangleLight<'a> { impl<'a> RectangleLight<'a> {
pub fn new<'b>(arena: &'b MemArena, pub fn new<'b>(arena: &'b MemArena, dimensions: Vec<(f32, f32)>, colors: Vec<XYZ>) -> RectangleLight<'b> {
dimensions: Vec<(f32, f32)>, let bbs: Vec<_> = dimensions
colors: Vec<XYZ>) .iter()
-> RectangleLight<'b> { .map(
let bbs: Vec<_> = dimensions.iter() |d| {
.map(|d| {
BBox { BBox {
min: Point::new(d.0 * -0.5, d.1 * -0.5, 0.0), min: Point::new(d.0 * -0.5, d.1 * -0.5, 0.0),
max: Point::new(d.0 * 0.5, d.1 * 0.5, 0.0), max: Point::new(d.0 * 0.5, d.1 * 0.5, 0.0),
} }
}) }
)
.collect(); .collect();
RectangleLight { RectangleLight {
dimensions: arena.copy_slice(&dimensions), dimensions: arena.copy_slice(&dimensions),
@ -39,14 +39,7 @@ impl<'a> RectangleLight<'a> {
} }
impl<'a> LightSource for RectangleLight<'a> { impl<'a> LightSource for RectangleLight<'a> {
fn sample(&self, fn sample(&self, space: &Matrix4x4, arr: Point, u: f32, v: f32, wavelength: f32, time: f32) -> (SpectralSample, Vector, f32) {
space: &Matrix4x4,
arr: Point,
u: f32,
v: f32,
wavelength: f32,
time: f32)
-> (SpectralSample, Vector, f32) {
// Calculate time interpolated values // Calculate time interpolated values
let dim = lerp_slice(&self.dimensions, time); let dim = lerp_slice(&self.dimensions, time);
let col = lerp_slice(&self.colors, time); let col = lerp_slice(&self.colors, time);
@ -105,15 +98,7 @@ impl<'a> LightSource for RectangleLight<'a> {
return (spectral_sample, shadow_vec, pdf as f32); return (spectral_sample, shadow_vec, pdf as f32);
} }
fn sample_pdf(&self, fn sample_pdf(&self, space: &Matrix4x4, arr: Point, sample_dir: Vector, sample_u: f32, sample_v: f32, wavelength: f32, time: f32) -> f32 {
space: &Matrix4x4,
arr: Point,
sample_dir: Vector,
sample_u: f32,
sample_v: f32,
wavelength: f32,
time: f32)
-> f32 {
// We're not using these, silence warnings // We're not using these, silence warnings
let _ = (sample_dir, sample_u, sample_v, wavelength); let _ = (sample_dir, sample_u, sample_v, wavelength);
@ -140,14 +125,7 @@ impl<'a> LightSource for RectangleLight<'a> {
1.0 / (area_1 + area_2) 1.0 / (area_1 + area_2)
} }
fn outgoing(&self, fn outgoing(&self, space: &Matrix4x4, dir: Vector, u: f32, v: f32, wavelength: f32, time: f32) -> SpectralSample {
space: &Matrix4x4,
dir: Vector,
u: f32,
v: f32,
wavelength: f32,
time: f32)
-> SpectralSample {
// We're not using these, silence warnings // We're not using these, silence warnings
let _ = (space, dir, u, v); let _ = (space, dir, u, v);
@ -165,8 +143,9 @@ impl<'a> LightSource for RectangleLight<'a> {
} }
fn approximate_energy(&self) -> f32 { fn approximate_energy(&self) -> f32 {
let color: XYZ = self.colors.iter().fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b) / let color: XYZ = self.colors
self.colors.len() as f32; .iter()
.fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b) / self.colors.len() as f32;
color.y color.y
} }
} }

View File

@ -22,13 +22,16 @@ pub struct SphereLight<'a> {
impl<'a> SphereLight<'a> { impl<'a> SphereLight<'a> {
pub fn new<'b>(arena: &'b MemArena, radii: Vec<f32>, colors: Vec<XYZ>) -> SphereLight<'b> { pub fn new<'b>(arena: &'b MemArena, radii: Vec<f32>, colors: Vec<XYZ>) -> SphereLight<'b> {
let bbs: Vec<_> = radii.iter() let bbs: Vec<_> = radii
.map(|r| { .iter()
.map(
|r| {
BBox { BBox {
min: Point::new(-*r, -*r, -*r), min: Point::new(-*r, -*r, -*r),
max: Point::new(*r, *r, *r), max: Point::new(*r, *r, *r),
} }
}) }
)
.collect(); .collect();
SphereLight { SphereLight {
radii: arena.copy_slice(&radii), radii: arena.copy_slice(&radii),
@ -39,14 +42,7 @@ impl<'a> SphereLight<'a> {
} }
impl<'a> LightSource for SphereLight<'a> { impl<'a> LightSource for SphereLight<'a> {
fn sample(&self, fn sample(&self, space: &Matrix4x4, arr: Point, u: f32, v: f32, wavelength: f32, time: f32) -> (SpectralSample, Vector, f32) {
space: &Matrix4x4,
arr: Point,
u: f32,
v: f32,
wavelength: f32,
time: f32)
-> (SpectralSample, Vector, f32) {
// TODO: track fp error due to transforms // TODO: track fp error due to transforms
let arr = arr * *space; let arr = arr * *space;
let pos = Point::new(0.0, 0.0, 0.0); let pos = Point::new(0.0, 0.0, 0.0);
@ -93,13 +89,14 @@ impl<'a> LightSource for SphereLight<'a> {
}; };
let sin_a = ((1.0 - (cos_a * cos_a)).max(0.0)).sqrt(); let sin_a = ((1.0 - (cos_a * cos_a)).max(0.0)).sqrt();
let phi = v as f64 * 2.0 * PI_64; let phi = v as f64 * 2.0 * PI_64;
let sample = Vector::new((phi.cos() * sin_a * radius) as f32, let sample = Vector::new(
(phi.cos() * sin_a * radius) as f32,
(phi.sin() * sin_a * radius) as f32, (phi.sin() * sin_a * radius) as f32,
(d - (cos_a * radius)) as f32); (d - (cos_a * radius)) as f32,
);
// Calculate the final values and return everything. // Calculate the final values and return everything.
let shadow_vec = ((x * sample.x()) + (y * sample.y()) + (z * sample.z())) * let shadow_vec = ((x * sample.x()) + (y * sample.y()) + (z * sample.z())) * space.inverse();
space.inverse();
let pdf = uniform_sample_cone_pdf(cos_theta_max); let pdf = uniform_sample_cone_pdf(cos_theta_max);
let spectral_sample = (col * surface_area_inv as f32).to_spectral_sample(wavelength); let spectral_sample = (col * surface_area_inv as f32).to_spectral_sample(wavelength);
return (spectral_sample, shadow_vec, pdf as f32); return (spectral_sample, shadow_vec, pdf as f32);
@ -112,15 +109,7 @@ impl<'a> LightSource for SphereLight<'a> {
} }
} }
fn sample_pdf(&self, fn sample_pdf(&self, space: &Matrix4x4, arr: Point, sample_dir: Vector, sample_u: f32, sample_v: f32, wavelength: f32, time: f32) -> f32 {
space: &Matrix4x4,
arr: Point,
sample_dir: Vector,
sample_u: f32,
sample_v: f32,
wavelength: f32,
time: f32)
-> f32 {
// We're not using these, silence warnings // We're not using these, silence warnings
let _ = (sample_dir, sample_u, sample_v, wavelength); let _ = (sample_dir, sample_u, sample_v, wavelength);
@ -143,14 +132,7 @@ impl<'a> LightSource for SphereLight<'a> {
} }
} }
fn outgoing(&self, fn outgoing(&self, space: &Matrix4x4, dir: Vector, u: f32, v: f32, wavelength: f32, time: f32) -> SpectralSample {
space: &Matrix4x4,
dir: Vector,
u: f32,
v: f32,
wavelength: f32,
time: f32)
-> SpectralSample {
// We're not using these, silence warnings // We're not using these, silence warnings
let _ = (space, dir, u, v); let _ = (space, dir, u, v);
@ -166,8 +148,9 @@ impl<'a> LightSource for SphereLight<'a> {
} }
fn approximate_energy(&self) -> f32 { fn approximate_energy(&self) -> f32 {
let color: XYZ = self.colors.iter().fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b) / let color: XYZ = self.colors
self.colors.len() as f32; .iter()
.fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b) / self.colors.len() as f32;
color.y color.y
} }
} }

View File

@ -70,59 +70,86 @@ fn main() {
let mut t = Timer::new(); let mut t = Timer::new();
// Parse command line arguments. // Parse command line arguments.
let args = let args = App::new("Psychopath")
App::new("Psychopath")
.version(VERSION) .version(VERSION)
.about("A slightly psychotic path tracer") .about("A slightly psychotic path tracer")
.arg(Arg::with_name("input") .arg(
Arg::with_name("input")
.short("i") .short("i")
.long("input") .long("input")
.value_name("FILE") .value_name("FILE")
.help("Input .psy file") .help("Input .psy file")
.takes_value(true) .takes_value(true)
.required_unless("dev")) .required_unless("dev")
.arg(Arg::with_name("spp") )
.arg(
Arg::with_name("spp")
.short("s") .short("s")
.long("spp") .long("spp")
.value_name("N") .value_name("N")
.help("Number of samples per pixel") .help("Number of samples per pixel")
.takes_value(true) .takes_value(true)
.validator(|s| { .validator(
usize::from_str(&s).and(Ok(())).or(Err("must be an integer".to_string())) |s| {
})) usize::from_str(&s)
.arg(Arg::with_name("max_bucket_samples") .and(Ok(()))
.or(Err("must be an integer".to_string()))
}
)
)
.arg(
Arg::with_name("max_bucket_samples")
.short("b") .short("b")
.long("spb") .long("spb")
.value_name("N") .value_name("N")
.help("Target number of samples per bucket (determines bucket size)") .help("Target number of samples per bucket (determines bucket size)")
.takes_value(true) .takes_value(true)
.validator(|s| { .validator(
usize::from_str(&s).and(Ok(())).or(Err("must be an integer".to_string())) |s| {
})) usize::from_str(&s)
.arg(Arg::with_name("threads") .and(Ok(()))
.or(Err("must be an integer".to_string()))
}
)
)
.arg(
Arg::with_name("threads")
.short("t") .short("t")
.long("threads") .long("threads")
.value_name("N") .value_name("N")
.help("Number of threads to render with. Defaults to the number of logical \ .help(
cores on the system.") "Number of threads to render with. Defaults to the number of logical \
cores on the system."
)
.takes_value(true) .takes_value(true)
.validator(|s| { .validator(
usize::from_str(&s).and(Ok(())).or(Err("must be an integer".to_string())) |s| {
})) usize::from_str(&s)
.arg(Arg::with_name("stats") .and(Ok(()))
.or(Err("must be an integer".to_string()))
}
)
)
.arg(
Arg::with_name("stats")
.long("stats") .long("stats")
.help("Print additional statistics about rendering")) .help("Print additional statistics about rendering")
.arg(Arg::with_name("dev") )
.arg(
Arg::with_name("dev")
.long("dev") .long("dev")
.help("Show useful dev/debug info.")) .help("Show useful dev/debug info.")
)
.get_matches(); .get_matches();
// Print some misc useful dev info. // Print some misc useful dev info.
if args.is_present("dev") { if args.is_present("dev") {
println!("Ray size: {} bytes", mem::size_of::<Ray>()); println!("Ray size: {} bytes", mem::size_of::<Ray>());
println!("AccelRay size: {} bytes", mem::size_of::<AccelRay>()); println!("AccelRay size: {} bytes", mem::size_of::<AccelRay>());
println!("SurfaceIntersection size: {} bytes", println!(
mem::size_of::<SurfaceIntersection>()); "SurfaceIntersection size: {} bytes",
mem::size_of::<SurfaceIntersection>()
);
println!("LightPath size: {} bytes", mem::size_of::<LightPath>()); println!("LightPath size: {} bytes", mem::size_of::<LightPath>());
println!("BBox size: {} bytes", mem::size_of::<BBox>()); println!("BBox size: {} bytes", mem::size_of::<BBox>());
println!("BVHNode size: {} bytes", mem::size_of::<BVHNode>()); println!("BVHNode size: {} bytes", mem::size_of::<BVHNode>());
@ -130,7 +157,9 @@ fn main() {
} }
// Parse data tree of scene file // Parse data tree of scene file
println!("Parsing scene file..."); println!(
"Parsing scene file...",
);
t.tick(); t.tick();
let mut psy_contents = String::new(); let mut psy_contents = String::new();
let dt = { let dt = {
@ -150,18 +179,19 @@ fn main() {
println!("Building scene..."); println!("Building scene...");
let arena = MemArena::with_min_block_size((1 << 20) * 4); let arena = MemArena::with_min_block_size((1 << 20) * 4);
let mut r = parse_scene(&arena, child).unwrap_or_else(|e| { let mut r = parse_scene(&arena, child).unwrap_or_else(
|e| {
e.print(&psy_contents); e.print(&psy_contents);
panic!("Parse error."); panic!("Parse error.");
}); }
);
if let Some(spp) = args.value_of("spp") { if let Some(spp) = args.value_of("spp") {
println!("\tOverriding scene spp: {}", spp); println!("\tOverriding scene spp: {}", spp);
r.spp = usize::from_str(&spp).unwrap(); r.spp = usize::from_str(&spp).unwrap();
} }
let max_samples_per_bucket = if let Some(max_samples_per_bucket) = let max_samples_per_bucket = if let Some(max_samples_per_bucket) = args.value_of("max_bucket_samples") {
args.value_of("max_bucket_samples") {
u32::from_str(&max_samples_per_bucket).unwrap() u32::from_str(&max_samples_per_bucket).unwrap()
} else { } else {
4096 4096
@ -182,16 +212,26 @@ fn main() {
let rtime = t.tick(); let rtime = t.tick();
let ntime = rtime as f64 / rstats.total_time; let ntime = rtime as f64 / rstats.total_time;
println!("\tRendered scene in {:.3}s", rtime); println!("\tRendered scene in {:.3}s", rtime);
println!("\t\tTrace: {:.3}s", println!(
ntime * rstats.trace_time); "\t\tTrace: {:.3}s",
println!("\t\t\tTraversal: {:.3}s", ntime * rstats.trace_time
ntime * rstats.accel_traversal_time); );
println!("\t\tInitial ray generation: {:.3}s", println!(
ntime * rstats.initial_ray_generation_time); "\t\t\tTraversal: {:.3}s",
println!("\t\tRay generation: {:.3}s", ntime * rstats.accel_traversal_time
ntime * rstats.ray_generation_time); );
println!("\t\tSample writing: {:.3}s", println!(
ntime * rstats.sample_writing_time); "\t\tInitial ray generation: {:.3}s",
ntime * rstats.initial_ray_generation_time
);
println!(
"\t\tRay generation: {:.3}s",
ntime * rstats.ray_generation_time
);
println!(
"\t\tSample writing: {:.3}s",
ntime * rstats.sample_writing_time
);
} }
println!("Writing image to disk..."); println!("Writing image to disk...");

View File

@ -29,9 +29,7 @@ pub fn fast_pow2(p: f32) -> f32 {
let w: i32 = clipp as i32; let w: i32 = clipp as i32;
let z: f32 = clipp - w as f32 + offset; let z: f32 = clipp - w as f32 + offset;
let i: u32 = ((1 << 23) as f32 * let i: u32 = ((1 << 23) as f32 * (clipp + 121.2740575 + 27.7280233 / (4.84252568 - z) - 1.49012907 * z)) as u32;
(clipp + 121.2740575 + 27.7280233 / (4.84252568 - z) - 1.49012907 * z)) as
u32;
unsafe { transmute_copy::<u32, f32>(&i) } unsafe { transmute_copy::<u32, f32>(&i) }
} }
@ -77,10 +75,72 @@ pub fn upper_power_of_two(mut v: u32) -> u32 {
/// Gets the log base 2 of the given integer /// Gets the log base 2 of the given integer
pub fn log2_64(value: u64) -> u64 { pub fn log2_64(value: u64) -> u64 {
const TAB64: [u64; 64] = [63, 0, 58, 1, 59, 47, 53, 2, 60, 39, 48, 27, 54, 33, 42, 3, 61, 51, const TAB64: [u64; 64] = [
37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, 62, 57, 46, 63,
52, 38, 26, 32, 41, 50, 36, 17, 19, 29, 10, 13, 21, 56, 45, 25, 31, 0,
35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5]; 58,
1,
59,
47,
53,
2,
60,
39,
48,
27,
54,
33,
42,
3,
61,
51,
37,
40,
49,
18,
28,
20,
55,
30,
34,
11,
43,
14,
22,
4,
62,
57,
46,
52,
38,
26,
32,
41,
50,
36,
17,
19,
29,
10,
13,
21,
56,
45,
25,
31,
35,
16,
9,
12,
44,
24,
15,
8,
23,
7,
6,
5,
];
let value = value | value.wrapping_shr(1); let value = value | value.wrapping_shr(1);
let value = value | value.wrapping_shr(2); let value = value | value.wrapping_shr(2);
@ -89,8 +149,7 @@ pub fn log2_64(value: u64) -> u64 {
let value = value | value.wrapping_shr(16); let value = value | value.wrapping_shr(16);
let value = value | value.wrapping_shr(32); let value = value | value.wrapping_shr(32);
TAB64[((value.wrapping_sub(value.wrapping_shr(1)) as u64).wrapping_mul(0x07EDD5E59A4E28C2)) TAB64[((value.wrapping_sub(value.wrapping_shr(1)) as u64).wrapping_mul(0x07EDD5E59A4E28C2)).wrapping_shr(58) as usize]
.wrapping_shr(58) as usize]
} }

View File

@ -35,12 +35,14 @@ impl<'a> DataTree<'a> {
remaining_text = skip_ws_and_comments(remaining_text); remaining_text = skip_ws_and_comments(remaining_text);
if remaining_text.1.len() == 0 { if remaining_text.1.len() == 0 {
return Ok(DataTree::Internal { return Ok(
DataTree::Internal {
type_name: "ROOT", type_name: "ROOT",
ident: None, ident: None,
children: items, children: items,
byte_offset: 0, byte_offset: 0,
}); }
);
} else { } else {
// If the whole text wasn't parsed, something went wrong. // If the whole text wasn't parsed, something went wrong.
return Err(ParseError::Other((0, "Failed to parse the entire string."))); return Err(ParseError::Other((0, "Failed to parse the entire string.")));
@ -104,9 +106,7 @@ impl<'a> DataTree<'a> {
} }
} }
pub fn iter_internal_children_with_type(&'a self, pub fn iter_internal_children_with_type(&'a self, type_name: &'static str) -> DataTreeFilterInternalIter<'a> {
type_name: &'static str)
-> DataTreeFilterInternalIter<'a> {
if let &DataTree::Internal { ref children, .. } = self { if let &DataTree::Internal { ref children, .. } = self {
DataTreeFilterInternalIter { DataTreeFilterInternalIter {
type_name: type_name, type_name: type_name,
@ -120,9 +120,7 @@ impl<'a> DataTree<'a> {
} }
} }
pub fn iter_leaf_children_with_type(&'a self, pub fn iter_leaf_children_with_type(&'a self, type_name: &'static str) -> DataTreeFilterLeafIter<'a> {
type_name: &'static str)
-> DataTreeFilterLeafIter<'a> {
if let &DataTree::Internal { ref children, .. } = self { if let &DataTree::Internal { ref children, .. } = self {
DataTreeFilterLeafIter { DataTreeFilterLeafIter {
type_name: type_name, type_name: type_name,
@ -138,14 +136,23 @@ impl<'a> DataTree<'a> {
// For unit tests // For unit tests
fn internal_data_or_panic(&'a self) -> (&'a str, Option<&'a str>, &'a Vec<DataTree<'a>>) { fn internal_data_or_panic(&'a self) -> (&'a str, Option<&'a str>, &'a Vec<DataTree<'a>>) {
if let DataTree::Internal { type_name, ident, ref children, byte_offset: _ } = *self { if let DataTree::Internal {
type_name,
ident,
ref children,
byte_offset: _,
} = *self {
(type_name, ident, children) (type_name, ident, children)
} else { } else {
panic!("Expected DataTree::Internal, found DataTree::Leaf") panic!("Expected DataTree::Internal, found DataTree::Leaf")
} }
} }
fn leaf_data_or_panic(&'a self) -> (&'a str, &'a str) { fn leaf_data_or_panic(&'a self) -> (&'a str, &'a str) {
if let DataTree::Leaf { type_name, contents, byte_offset: _ } = *self { if let DataTree::Leaf {
type_name,
contents,
byte_offset: _,
} = *self {
(type_name, contents) (type_name, contents)
} else { } else {
panic!("Expected DataTree::Leaf, found DataTree::Internal") panic!("Expected DataTree::Leaf, found DataTree::Internal")
@ -194,7 +201,12 @@ impl<'a> Iterator for DataTreeFilterInternalIter<'a> {
fn next(&mut self) -> Option<(&'a str, Option<&'a str>, &'a Vec<DataTree<'a>>, usize)> { fn next(&mut self) -> Option<(&'a str, Option<&'a str>, &'a Vec<DataTree<'a>>, usize)> {
loop { loop {
match self.iter.next() { match self.iter.next() {
Some(&DataTree::Internal { type_name, ident, ref children, byte_offset }) => { Some(&DataTree::Internal {
type_name,
ident,
ref children,
byte_offset,
}) => {
if type_name == self.type_name { if type_name == self.type_name {
return Some((type_name, ident, children, byte_offset)); return Some((type_name, ident, children, byte_offset));
} else { } else {
@ -233,7 +245,11 @@ impl<'a> Iterator for DataTreeFilterLeafIter<'a> {
continue; continue;
} }
Some(&DataTree::Leaf { type_name, contents, byte_offset }) => { Some(&DataTree::Leaf {
type_name,
contents,
byte_offset,
}) => {
if type_name == self.type_name { if type_name == self.type_name {
return Some((type_name, contents, byte_offset)); return Some((type_name, contents, byte_offset));
} else { } else {
@ -296,13 +312,17 @@ fn parse_node<'a>(source_text: (usize, &'a str)) -> ParseResult<'a> {
children.push(node); children.push(node);
} }
if let (Token::CloseInner, text4) = next_token(text_remaining) { if let (Token::CloseInner, text4) = next_token(text_remaining) {
return Ok(Some((DataTree::Internal { return Ok(
Some(
(DataTree::Internal {
type_name: type_name, type_name: type_name,
ident: Some(n), ident: Some(n),
children: children, children: children,
byte_offset: text1.0, byte_offset: text1.0,
}, },
text4))); text4)
)
);
} else { } else {
return Err(ParseError::MissingCloseInternal(text_remaining.0)); return Err(ParseError::MissingCloseInternal(text_remaining.0));
} }
@ -321,13 +341,17 @@ fn parse_node<'a>(source_text: (usize, &'a str)) -> ParseResult<'a> {
} }
if let (Token::CloseInner, text3) = next_token(text_remaining) { if let (Token::CloseInner, text3) = next_token(text_remaining) {
return Ok(Some((DataTree::Internal { return Ok(
Some(
(DataTree::Internal {
type_name: type_name, type_name: type_name,
ident: None, ident: None,
children: children, children: children,
byte_offset: text1.0, byte_offset: text1.0,
}, },
text3))); text3)
)
);
} else { } else {
return Err(ParseError::MissingCloseInternal(text_remaining.0)); return Err(ParseError::MissingCloseInternal(text_remaining.0));
} }
@ -337,12 +361,16 @@ fn parse_node<'a>(source_text: (usize, &'a str)) -> ParseResult<'a> {
(Token::OpenLeaf, text2) => { (Token::OpenLeaf, text2) => {
let (contents, text3) = parse_leaf_content(text2); let (contents, text3) = parse_leaf_content(text2);
if let (Token::CloseLeaf, text4) = next_token(text3) { if let (Token::CloseLeaf, text4) = next_token(text3) {
return Ok(Some((DataTree::Leaf { return Ok(
Some(
(DataTree::Leaf {
type_name: type_name, type_name: type_name,
contents: contents, contents: contents,
byte_offset: text1.0, byte_offset: text1.0,
}, },
text4))); text4)
)
);
} else { } else {
return Err(ParseError::MissingCloseLeaf(text3.0)); return Err(ParseError::MissingCloseLeaf(text3.0));
} }
@ -586,8 +614,10 @@ mod tests {
fn tokenize_5() { fn tokenize_5() {
let input = (0, " $hi\\ t\\#he\\[re "); let input = (0, " $hi\\ t\\#he\\[re ");
assert_eq!(next_token(input), assert_eq!(
(Token::Ident("$hi\\ t\\#he\\[re"), (15, " "))); next_token(input),
(Token::Ident("$hi\\ t\\#he\\[re"), (15, " "))
);
} }
#[test] #[test]
@ -618,14 +648,22 @@ mod tests {
let (token7, input8) = next_token(input7); let (token7, input8) = next_token(input7);
let (token8, input9) = next_token(input8); let (token8, input9) = next_token(input8);
assert_eq!((token1, input2), assert_eq!(
(Token::TypeName("Thing"), (5, " $yar { # A comment\n\tThing2 []\n}"))); (token1, input2),
assert_eq!((token2, input3), (Token::TypeName("Thing"), (5, " $yar { # A comment\n\tThing2 []\n}"))
(Token::Ident("$yar"), (10, " { # A comment\n\tThing2 []\n}"))); );
assert_eq!((token3, input4), assert_eq!(
(Token::OpenInner, (12, " # A comment\n\tThing2 []\n}"))); (token2, input3),
assert_eq!((token4, input5), (Token::Ident("$yar"), (10, " { # A comment\n\tThing2 []\n}"))
(Token::TypeName("Thing2"), (32, " []\n}"))); );
assert_eq!(
(token3, input4),
(Token::OpenInner, (12, " # A comment\n\tThing2 []\n}"))
);
assert_eq!(
(token4, input5),
(Token::TypeName("Thing2"), (32, " []\n}"))
);
assert_eq!((token5, input6), (Token::OpenLeaf, (34, "]\n}"))); assert_eq!((token5, input6), (Token::OpenLeaf, (34, "]\n}")));
assert_eq!((token6, input7), (Token::CloseLeaf, (35, "\n}"))); assert_eq!((token6, input7), (Token::CloseLeaf, (35, "\n}")));
assert_eq!((token7, input8), (Token::CloseInner, (37, ""))); assert_eq!((token7, input8), (Token::CloseInner, (37, "")));
@ -655,13 +693,15 @@ mod tests {
#[test] #[test]
fn iter_1() { fn iter_1() {
let dt = DataTree::from_str(r#" let dt = DataTree::from_str(
r#"
A {} A {}
B {} B {}
A [] A []
A {} A {}
B {} B {}
"#) "#
)
.unwrap(); .unwrap();
let i = dt.iter_children_with_type("A"); let i = dt.iter_children_with_type("A");
@ -670,13 +710,15 @@ mod tests {
#[test] #[test]
fn iter_2() { fn iter_2() {
let dt = DataTree::from_str(r#" let dt = DataTree::from_str(
r#"
A {} A {}
B {} B {}
A [] A []
A {} A {}
B {} B {}
"#) "#
)
.unwrap(); .unwrap();
let i = dt.iter_internal_children_with_type("A"); let i = dt.iter_internal_children_with_type("A");
@ -685,13 +727,15 @@ mod tests {
#[test] #[test]
fn iter_3() { fn iter_3() {
let dt = DataTree::from_str(r#" let dt = DataTree::from_str(
r#"
A [] A []
B {} B {}
A {} A {}
A [] A []
B {} B {}
"#) "#
)
.unwrap(); .unwrap();
let i = dt.iter_leaf_children_with_type("A"); let i = dt.iter_leaf_children_with_type("A");

View File

@ -40,9 +40,11 @@ impl PsyParseError {
match self { match self {
&PsyParseError::UnknownError(offset) => { &PsyParseError::UnknownError(offset) => {
let line = line_count_to_byte_offset(psy_content, offset); let line = line_count_to_byte_offset(psy_content, offset);
println!("Line {}: Unknown parse error. If you get this message, please report \ println!(
"Line {}: Unknown parse error. If you get this message, please report \
it to the developers so they can improve the error messages.", it to the developers so they can improve the error messages.",
line); line
);
} }
&PsyParseError::UnknownVariant(offset, error) => { &PsyParseError::UnknownVariant(offset, error) => {
@ -89,65 +91,87 @@ fn line_count_to_byte_offset(text: &str, offset: usize) -> usize {
/// Takes in a DataTree representing a Scene node and returns /// Takes in a DataTree representing a Scene node and returns
pub fn parse_scene<'a>(arena: &'a MemArena, pub fn parse_scene<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Renderer<'a>, PsyParseError> {
tree: &'a DataTree)
-> Result<Renderer<'a>, PsyParseError> {
// Verify we have the right number of each section // Verify we have the right number of each section
if tree.iter_children_with_type("Output").count() != 1 { if tree.iter_children_with_type("Output").count() != 1 {
let count = tree.iter_children_with_type("Output").count(); let count = tree.iter_children_with_type("Output").count();
return Err(PsyParseError::WrongNodeCount(tree.byte_offset(), return Err(
PsyParseError::WrongNodeCount(
tree.byte_offset(),
"Scene should have precisely one Output \ "Scene should have precisely one Output \
section.", section.",
count)); count,
)
);
} }
if tree.iter_children_with_type("RenderSettings").count() != 1 { if tree.iter_children_with_type("RenderSettings").count() != 1 {
let count = tree.iter_children_with_type("RenderSettings").count(); let count = tree.iter_children_with_type("RenderSettings").count();
return Err(PsyParseError::WrongNodeCount(tree.byte_offset(), return Err(
PsyParseError::WrongNodeCount(
tree.byte_offset(),
"Scene should have precisely one \ "Scene should have precisely one \
RenderSettings section.", RenderSettings section.",
count)); count,
)
);
} }
if tree.iter_children_with_type("Camera").count() != 1 { if tree.iter_children_with_type("Camera").count() != 1 {
let count = tree.iter_children_with_type("Camera").count(); let count = tree.iter_children_with_type("Camera").count();
return Err(PsyParseError::WrongNodeCount(tree.byte_offset(), return Err(
PsyParseError::WrongNodeCount(
tree.byte_offset(),
"Scene should have precisely one Camera \ "Scene should have precisely one Camera \
section.", section.",
count)); count,
)
);
} }
if tree.iter_children_with_type("World").count() != 1 { if tree.iter_children_with_type("World").count() != 1 {
let count = tree.iter_children_with_type("World").count(); let count = tree.iter_children_with_type("World").count();
return Err(PsyParseError::WrongNodeCount(tree.byte_offset(), return Err(
PsyParseError::WrongNodeCount(
tree.byte_offset(),
"Scene should have precisely one World section.", "Scene should have precisely one World section.",
count)); count,
)
);
} }
if tree.iter_children_with_type("Assembly").count() != 1 { if tree.iter_children_with_type("Assembly").count() != 1 {
let count = tree.iter_children_with_type("Assembly").count(); let count = tree.iter_children_with_type("Assembly").count();
return Err(PsyParseError::WrongNodeCount(tree.byte_offset(), return Err(
PsyParseError::WrongNodeCount(
tree.byte_offset(),
"Scene should have precisely one Root Assembly \ "Scene should have precisely one Root Assembly \
section.", section.",
count)); count,
)
);
} }
// Parse output info // Parse output info
let output_info = parse_output_info(tree.iter_children_with_type("Output") let output_info = parse_output_info(tree.iter_children_with_type("Output").nth(0).unwrap())?;
.nth(0)
.unwrap())?;
// Parse render settings // Parse render settings
let render_settings = parse_render_settings(tree.iter_children_with_type("RenderSettings") let render_settings = parse_render_settings(
tree.iter_children_with_type("RenderSettings")
.nth(0) .nth(0)
.unwrap())?; .unwrap()
)?;
// Parse camera // Parse camera
let camera = parse_camera(arena, let camera = parse_camera(
tree.iter_children_with_type("Camera").nth(0).unwrap())?; arena,
tree.iter_children_with_type("Camera").nth(0).unwrap(),
)?;
// Parse world // Parse world
let world = parse_world(arena, tree.iter_children_with_type("World").nth(0).unwrap())?; let world = parse_world(arena, tree.iter_children_with_type("World").nth(0).unwrap())?;
// Parse root scene assembly // Parse root scene assembly
let assembly = parse_assembly(arena, let assembly = parse_assembly(
tree.iter_children_with_type("Assembly").nth(0).unwrap())?; arena,
tree.iter_children_with_type("Assembly").nth(0).unwrap(),
)?;
// Put scene together // Put scene together
let scene_name = if let &DataTree::Internal { ident, .. } = tree { let scene_name = if let &DataTree::Internal { ident, .. } = tree {
@ -188,18 +212,30 @@ fn parse_output_info(tree: &DataTree) -> Result<String, PsyParseError> {
for child in children { for child in children {
match child { match child {
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == "Path" => { &DataTree::Leaf {
type_name,
contents,
byte_offset,
} if type_name == "Path" => {
// Trim and validate // Trim and validate
let tc = contents.trim(); let tc = contents.trim();
if tc.chars().count() < 2 { if tc.chars().count() < 2 {
return Err(PsyParseError::IncorrectLeafData(byte_offset, return Err(
PsyParseError::IncorrectLeafData(
byte_offset,
"File path format is \ "File path format is \
incorrect.")); incorrect.",
)
);
} }
if tc.chars().nth(0).unwrap() != '"' || tc.chars().last().unwrap() != '"' { if tc.chars().nth(0).unwrap() != '"' || tc.chars().last().unwrap() != '"' {
return Err(PsyParseError::IncorrectLeafData(byte_offset, return Err(
PsyParseError::IncorrectLeafData(
byte_offset,
"File paths must be \ "File paths must be \
surrounded by quotes.")); surrounded by quotes.",
)
);
} }
let len = tc.len(); let len = tc.len();
let tc = &tc[1..len - 1]; let tc = &tc[1..len - 1];
@ -217,13 +253,16 @@ fn parse_output_info(tree: &DataTree) -> Result<String, PsyParseError> {
if found_path { if found_path {
return Ok((path)); return Ok((path));
} else { } else {
return Err(PsyParseError::MissingNode(tree.byte_offset(), return Err(PsyParseError::MissingNode(tree.byte_offset(), "Output section must contain a Path."));
"Output section must contain a Path."));
} }
} else { } else {
return Err(PsyParseError::ExpectedInternalNode(tree.byte_offset(), return Err(
PsyParseError::ExpectedInternalNode(
tree.byte_offset(),
"Output section should be an internal \ "Output section should be an internal \
node, containing at least a Path.")); node, containing at least a Path.",
)
);
}; };
} }
@ -241,45 +280,66 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
for child in children { for child in children {
match child { match child {
// Resolution // Resolution
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == &DataTree::Leaf {
"Resolution" => { type_name,
if let IResult::Done(_, (w, h)) = closure!(terminated!(tuple!(ws_u32, ws_u32), contents,
nom::eof))(contents.as_bytes()) { byte_offset,
} if type_name == "Resolution" => {
if let IResult::Done(_, (w, h)) = closure!(terminated!(tuple!(ws_u32, ws_u32), nom::eof))(contents.as_bytes()) {
found_res = true; found_res = true;
res = (w, h); res = (w, h);
} else { } else {
// Found Resolution, but its contents is not in the right format // Found Resolution, but its contents is not in the right format
return Err(PsyParseError::IncorrectLeafData(byte_offset, return Err(
PsyParseError::IncorrectLeafData(
byte_offset,
"Resolution should be specified with two \ "Resolution should be specified with two \
integers in the form '[width height]'.")); integers in the form '[width height]'.",
)
);
} }
} }
// SamplesPerPixel // SamplesPerPixel
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == &DataTree::Leaf {
"SamplesPerPixel" => { type_name,
contents,
byte_offset,
} if type_name == "SamplesPerPixel" => {
if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) { if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) {
found_spp = true; found_spp = true;
spp = n; spp = n;
} else { } else {
// Found SamplesPerPixel, but its contents is not in the right format // Found SamplesPerPixel, but its contents is not in the right format
return Err(PsyParseError::IncorrectLeafData(byte_offset, return Err(
PsyParseError::IncorrectLeafData(
byte_offset,
"SamplesPerPixel should be \ "SamplesPerPixel should be \
an integer specified in \ an integer specified in \
the form '[samples]'.")); the form '[samples]'.",
)
);
} }
} }
// Seed // Seed
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == "Seed" => { &DataTree::Leaf {
type_name,
contents,
byte_offset,
} if type_name == "Seed" => {
if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) { if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) {
seed = n; seed = n;
} else { } else {
// Found Seed, but its contents is not in the right format // Found Seed, but its contents is not in the right format
return Err(PsyParseError::IncorrectLeafData(byte_offset, return Err(
PsyParseError::IncorrectLeafData(
byte_offset,
"Seed should be an integer \ "Seed should be an integer \
specified in the form \ specified in the form \
'[samples]'.")); '[samples]'.",
)
);
} }
} }
@ -290,15 +350,23 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
if found_res && found_spp { if found_res && found_spp {
return Ok((res, spp, seed)); return Ok((res, spp, seed));
} else { } else {
return Err(PsyParseError::MissingNode(tree.byte_offset(), return Err(
PsyParseError::MissingNode(
tree.byte_offset(),
"RenderSettings must have both Resolution and \ "RenderSettings must have both Resolution and \
SamplesPerPixel specified.")); SamplesPerPixel specified.",
)
);
} }
} else { } else {
return Err(PsyParseError::ExpectedInternalNode(tree.byte_offset(), return Err(
PsyParseError::ExpectedInternalNode(
tree.byte_offset(),
"RenderSettings section should be an \ "RenderSettings section should be an \
internal node, containing at least \ internal node, containing at least \
Resolution and SamplesPerPixel.")); Resolution and SamplesPerPixel.",
)
);
}; };
} }
@ -316,49 +384,74 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
for child in children.iter() { for child in children.iter() {
match child { match child {
// Fov // Fov
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == "Fov" => { &DataTree::Leaf {
type_name,
contents,
byte_offset,
} if type_name == "Fov" => {
if let IResult::Done(_, fov) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, fov) = ws_f32(contents.as_bytes()) {
fovs.push(fov * (3.1415926536 / 180.0)); fovs.push(fov * (3.1415926536 / 180.0));
} else { } else {
// Found Fov, but its contents is not in the right format // Found Fov, but its contents is not in the right format
return Err(PsyParseError::IncorrectLeafData(byte_offset, return Err(
PsyParseError::IncorrectLeafData(
byte_offset,
"Fov should be a decimal \ "Fov should be a decimal \
number specified in the \ number specified in the \
form '[fov]'.")); form '[fov]'.",
)
);
} }
} }
// FocalDistance // FocalDistance
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == &DataTree::Leaf {
"FocalDistance" => { type_name,
contents,
byte_offset,
} if type_name == "FocalDistance" => {
if let IResult::Done(_, fd) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, fd) = ws_f32(contents.as_bytes()) {
focus_distances.push(fd); focus_distances.push(fd);
} else { } else {
// Found FocalDistance, but its contents is not in the right format // Found FocalDistance, but its contents is not in the right format
return Err(PsyParseError::IncorrectLeafData(byte_offset, return Err(
PsyParseError::IncorrectLeafData(
byte_offset,
"FocalDistance should be a \ "FocalDistance should be a \
decimal number specified \ decimal number specified \
in the form '[fov]'.")); in the form '[fov]'.",
)
);
} }
} }
// ApertureRadius // ApertureRadius
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == &DataTree::Leaf {
"ApertureRadius" => { type_name,
contents,
byte_offset,
} if type_name == "ApertureRadius" => {
if let IResult::Done(_, ar) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, ar) = ws_f32(contents.as_bytes()) {
aperture_radii.push(ar); aperture_radii.push(ar);
} else { } else {
// Found ApertureRadius, but its contents is not in the right format // Found ApertureRadius, but its contents is not in the right format
return Err(PsyParseError::IncorrectLeafData(byte_offset, return Err(
PsyParseError::IncorrectLeafData(
byte_offset,
"ApertureRadius should be a \ "ApertureRadius should be a \
decimal number specified \ decimal number specified \
in the form '[fov]'.")); in the form '[fov]'.",
)
);
} }
} }
// Transform // Transform
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == &DataTree::Leaf {
"Transform" => { type_name,
contents,
byte_offset,
} if type_name == "Transform" => {
if let Ok(mat) = parse_matrix(contents) { if let Ok(mat) = parse_matrix(contents) {
mats.push(mat); mats.push(mat);
} else { } else {
@ -373,10 +466,14 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
return Ok(Camera::new(arena, mats, fovs, aperture_radii, focus_distances)); return Ok(Camera::new(arena, mats, fovs, aperture_radii, focus_distances));
} else { } else {
return Err(PsyParseError::ExpectedInternalNode(tree.byte_offset(), return Err(
PsyParseError::ExpectedInternalNode(
tree.byte_offset(),
"Camera section should be an internal \ "Camera section should be an internal \
node, containing at least Fov and \ node, containing at least Fov and \
Transform.")); Transform.",
)
);
} }
} }
@ -391,59 +488,81 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
// Parse background shader // Parse background shader
let bgs = { let bgs = {
if tree.iter_children_with_type("BackgroundShader").count() != 1 { if tree.iter_children_with_type("BackgroundShader").count() != 1 {
return Err(PsyParseError::WrongNodeCount(tree.byte_offset(), return Err(
PsyParseError::WrongNodeCount(
tree.byte_offset(),
"World should have precisely one BackgroundShader section.", "World should have precisely one BackgroundShader section.",
tree.iter_children_with_type("BackgroundShader").count())); tree.iter_children_with_type("BackgroundShader").count(),
)
);
} }
tree.iter_children_with_type("BackgroundShader").nth(0).unwrap() tree.iter_children_with_type("BackgroundShader")
.nth(0)
.unwrap()
}; };
let bgs_type = { let bgs_type = {
if bgs.iter_children_with_type("Type").count() != 1 { if bgs.iter_children_with_type("Type").count() != 1 {
return Err(PsyParseError::WrongNodeCount(bgs.byte_offset(), return Err(
PsyParseError::WrongNodeCount(
bgs.byte_offset(),
"BackgroundShader should have \ "BackgroundShader should have \
precisely one Type specified.", precisely one Type specified.",
bgs.iter_children_with_type("Type") bgs.iter_children_with_type("Type").count(),
.count())); )
);
} }
if let &DataTree::Leaf { contents, .. } = if let &DataTree::Leaf { contents, .. } = bgs.iter_children_with_type("Type").nth(0).unwrap() {
bgs.iter_children_with_type("Type")
.nth(0)
.unwrap() {
contents.trim() contents.trim()
} else { } else {
return Err(PsyParseError::ExpectedLeafNode(bgs.byte_offset(), return Err(
PsyParseError::ExpectedLeafNode(
bgs.byte_offset(),
"BackgroundShader's Type should be a \ "BackgroundShader's Type should be a \
leaf node.")); leaf node.",
)
);
} }
}; };
match bgs_type { match bgs_type {
"Color" => { "Color" => {
if let Some(&DataTree::Leaf { contents, byte_offset, .. }) = if let Some(&DataTree::Leaf {
bgs.iter_children_with_type("Color") contents,
.nth(0) { byte_offset,
if let IResult::Done(_, color) = ..
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.trim() }) = bgs.iter_children_with_type("Color").nth(0) {
.as_bytes()) { if let IResult::Done(_, color) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.trim().as_bytes()) {
// TODO: proper color space management, not just assuming // TODO: proper color space management, not just assuming
// rec.709. // rec.709.
background_color = XYZ::from_tuple(rec709e_to_xyz(color)); background_color = XYZ::from_tuple(rec709e_to_xyz(color));
} else { } else {
return Err(PsyParseError::IncorrectLeafData(byte_offset, return Err(
PsyParseError::IncorrectLeafData(
byte_offset,
"Color should be specified \ "Color should be specified \
with three decimal numbers \ with three decimal numbers \
in the form '[R G B]'.")); in the form '[R G B]'.",
)
);
} }
} else { } else {
return Err(PsyParseError::MissingNode(bgs.byte_offset(), return Err(
PsyParseError::MissingNode(
bgs.byte_offset(),
"BackgroundShader's Type is Color, \ "BackgroundShader's Type is Color, \
but no Color is specified.")); but no Color is specified.",
)
);
} }
} }
_ => { _ => {
return Err(PsyParseError::UnknownVariant(bgs.byte_offset(), return Err(
PsyParseError::UnknownVariant(
bgs.byte_offset(),
"The specified BackgroundShader Type \ "The specified BackgroundShader Type \
isn't a recognized type.")) isn't a recognized type.",
)
)
} }
} }
@ -459,15 +578,21 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
} }
// Build and return the world // Build and return the world
return Ok(World { return Ok(
World {
background_color: background_color, background_color: background_color,
lights: arena.copy_slice(&lights), lights: arena.copy_slice(&lights),
}); }
);
} else { } else {
return Err(PsyParseError::ExpectedInternalNode(tree.byte_offset(), return Err(
PsyParseError::ExpectedInternalNode(
tree.byte_offset(),
"World section should be an internal \ "World section should be an internal \
node, containing at least a \ node, containing at least a \
BackgroundShader.")); BackgroundShader.",
)
);
} }
} }
@ -476,7 +601,9 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
pub fn parse_matrix(contents: &str) -> Result<Matrix4x4, PsyParseError> { pub fn parse_matrix(contents: &str) -> Result<Matrix4x4, PsyParseError> {
if let IResult::Done(_, ns) = if let IResult::Done(_, ns) =
closure!(terminated!(tuple!(ws_f32, closure!(
terminated!(
tuple!(
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
@ -491,9 +618,15 @@ pub fn parse_matrix(contents: &str) -> Result<Matrix4x4, PsyParseError> {
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32), ws_f32,
nom::eof))(contents.as_bytes()) { ws_f32
return Ok(Matrix4x4::new_from_values(ns.0, ),
nom::eof
)
)(contents.as_bytes()) {
return Ok(
Matrix4x4::new_from_values(
ns.0,
ns.4, ns.4,
ns.8, ns.8,
ns.12, ns.12,
@ -508,14 +641,18 @@ pub fn parse_matrix(contents: &str) -> Result<Matrix4x4, PsyParseError> {
ns.3, ns.3,
ns.7, ns.7,
ns.11, ns.11,
ns.15)); ns.15,
)
);
} else { } else {
return Err(PsyParseError::UnknownError(0)); return Err(PsyParseError::UnknownError(0));
} }
} }
pub fn make_transform_format_error(byte_offset: usize) -> PsyParseError { pub fn make_transform_format_error(byte_offset: usize) -> PsyParseError {
return PsyParseError::IncorrectLeafData(byte_offset, return PsyParseError::IncorrectLeafData(
byte_offset,
"Transform should be sixteen integers specified in \ "Transform should be sixteen integers specified in \
the form '[# # # # # # # # # # # # # # # #]'."); the form '[# # # # # # # # # # # # # # # #]'.",
);
} }

View File

@ -12,9 +12,7 @@ use super::psy_mesh_surface::parse_mesh_surface;
use super::psy::{parse_matrix, PsyParseError}; use super::psy::{parse_matrix, PsyParseError};
pub fn parse_assembly<'a>(arena: &'a MemArena, pub fn parse_assembly<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Assembly<'a>, PsyParseError> {
tree: &'a DataTree)
-> Result<Assembly<'a>, PsyParseError> {
let mut builder = AssemblyBuilder::new(arena); let mut builder = AssemblyBuilder::new(arena);
if tree.is_internal() { if tree.is_internal() {
@ -54,38 +52,43 @@ pub fn parse_assembly<'a>(arena: &'a MemArena,
if builder.name_exists(name) { if builder.name_exists(name) {
builder.add_instance(name, Some(&xforms)); builder.add_instance(name, Some(&xforms));
} else { } else {
return Err(PsyParseError::InstancedMissingData( return Err(
PsyParseError::InstancedMissingData(
child.iter_leaf_children_with_type("Data").nth(0).unwrap().2, child.iter_leaf_children_with_type("Data").nth(0).unwrap().2,
"Attempted to add \ "Attempted to add \
instance for data with \ instance for data with \
a name that doesn't \ a name that doesn't \
exist.", exist.",
name.to_string())); name.to_string(),
)
);
} }
} }
// MeshSurface // MeshSurface
"MeshSurface" => { "MeshSurface" => {
if let &DataTree::Internal { ident: Some(ident), .. } = child { if let &DataTree::Internal { ident: Some(ident), .. } = child {
builder.add_object(ident, builder.add_object(
Object::Surface(arena.alloc( ident,
parse_mesh_surface(arena, &child)? Object::Surface(arena.alloc(parse_mesh_surface(arena, &child)?)),
))); );
} else { } else {
// TODO: error condition of some kind, because no ident // TODO: error condition of some kind, because no ident
panic!("MeshSurface encountered that was a leaf, but MeshSurfaces cannot \ panic!(
"MeshSurface encountered that was a leaf, but MeshSurfaces cannot \
be a leaf: {}", be a leaf: {}",
child.byte_offset()); child.byte_offset()
);
} }
} }
// Sphere Light // Sphere Light
"SphereLight" => { "SphereLight" => {
if let &DataTree::Internal { ident: Some(ident), .. } = child { if let &DataTree::Internal { ident: Some(ident), .. } = child {
builder.add_object(ident, builder.add_object(
Object::Light(arena.alloc( ident,
parse_sphere_light(arena, &child)? Object::Light(arena.alloc(parse_sphere_light(arena, &child)?)),
))); );
} else { } else {
// No ident // No ident
return Err(PsyParseError::UnknownError(child.byte_offset())); return Err(PsyParseError::UnknownError(child.byte_offset()));
@ -95,10 +98,10 @@ pub fn parse_assembly<'a>(arena: &'a MemArena,
// Rectangle Light // Rectangle Light
"RectangleLight" => { "RectangleLight" => {
if let &DataTree::Internal { ident: Some(ident), .. } = child { if let &DataTree::Internal { ident: Some(ident), .. } = child {
builder.add_object(ident, builder.add_object(
Object::Light(arena.alloc( ident,
parse_rectangle_light(arena, &child)? Object::Light(arena.alloc(parse_rectangle_light(arena, &child)?)),
))); );
} else { } else {
// No ident // No ident
return Err(PsyParseError::UnknownError(child.byte_offset())); return Err(PsyParseError::UnknownError(child.byte_offset()));

View File

@ -15,9 +15,7 @@ use super::DataTree;
use super::psy::PsyParseError; use super::psy::PsyParseError;
pub fn parse_distant_disk_light<'a>(arena: &'a MemArena, pub fn parse_distant_disk_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<DistantDiskLight<'a>, PsyParseError> {
tree: &'a DataTree)
-> Result<DistantDiskLight<'a>, PsyParseError> {
if let &DataTree::Internal { ref children, .. } = tree { if let &DataTree::Internal { ref children, .. } = tree {
let mut radii = Vec::new(); let mut radii = Vec::new();
let mut directions = Vec::new(); let mut directions = Vec::new();
@ -27,7 +25,11 @@ pub fn parse_distant_disk_light<'a>(arena: &'a MemArena,
for child in children.iter() { for child in children.iter() {
match child { match child {
// Radius // Radius
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == "Radius" => { &DataTree::Leaf {
type_name,
contents,
byte_offset,
} if type_name == "Radius" => {
if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) {
radii.push(radius); radii.push(radius);
} else { } else {
@ -37,10 +39,12 @@ pub fn parse_distant_disk_light<'a>(arena: &'a MemArena,
} }
// Direction // Direction
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == &DataTree::Leaf {
"Direction" => { type_name,
if let IResult::Done(_, direction) = contents,
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) { byte_offset,
} if type_name == "Direction" => {
if let IResult::Done(_, direction) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) {
directions.push(Vector::new(direction.0, direction.1, direction.2)); directions.push(Vector::new(direction.0, direction.1, direction.2));
} else { } else {
// Found direction, but its contents is not in the right format // Found direction, but its contents is not in the right format
@ -49,9 +53,12 @@ pub fn parse_distant_disk_light<'a>(arena: &'a MemArena,
} }
// Color // Color
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == "Color" => { &DataTree::Leaf {
if let IResult::Done(_, color) = type_name,
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) { contents,
byte_offset,
} if type_name == "Color" => {
if let IResult::Done(_, color) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) {
// TODO: handle color space conversions properly. // TODO: handle color space conversions properly.
// Probably will need a special color type with its // Probably will need a special color type with its
// own parser...? // own parser...?
@ -73,9 +80,7 @@ pub fn parse_distant_disk_light<'a>(arena: &'a MemArena,
} }
pub fn parse_sphere_light<'a>(arena: &'a MemArena, pub fn parse_sphere_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<SphereLight<'a>, PsyParseError> {
tree: &'a DataTree)
-> Result<SphereLight<'a>, PsyParseError> {
if let &DataTree::Internal { ref children, .. } = tree { if let &DataTree::Internal { ref children, .. } = tree {
let mut radii = Vec::new(); let mut radii = Vec::new();
let mut colors = Vec::new(); let mut colors = Vec::new();
@ -84,7 +89,11 @@ pub fn parse_sphere_light<'a>(arena: &'a MemArena,
for child in children.iter() { for child in children.iter() {
match child { match child {
// Radius // Radius
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == "Radius" => { &DataTree::Leaf {
type_name,
contents,
byte_offset,
} if type_name == "Radius" => {
if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) {
radii.push(radius); radii.push(radius);
} else { } else {
@ -94,9 +103,12 @@ pub fn parse_sphere_light<'a>(arena: &'a MemArena,
} }
// Color // Color
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == "Color" => { &DataTree::Leaf {
if let IResult::Done(_, color) = type_name,
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) { contents,
byte_offset,
} if type_name == "Color" => {
if let IResult::Done(_, color) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) {
// TODO: handle color space conversions properly. // TODO: handle color space conversions properly.
// Probably will need a special color type with its // Probably will need a special color type with its
// own parser...? // own parser...?
@ -117,9 +129,7 @@ pub fn parse_sphere_light<'a>(arena: &'a MemArena,
} }
} }
pub fn parse_rectangle_light<'a>(arena: &'a MemArena, pub fn parse_rectangle_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<RectangleLight<'a>, PsyParseError> {
tree: &'a DataTree)
-> Result<RectangleLight<'a>, PsyParseError> {
if let &DataTree::Internal { ref children, .. } = tree { if let &DataTree::Internal { ref children, .. } = tree {
let mut dimensions = Vec::new(); let mut dimensions = Vec::new();
let mut colors = Vec::new(); let mut colors = Vec::new();
@ -128,10 +138,12 @@ pub fn parse_rectangle_light<'a>(arena: &'a MemArena,
for child in children.iter() { for child in children.iter() {
match child { match child {
// Dimensions // Dimensions
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == &DataTree::Leaf {
"Dimensions" => { type_name,
if let IResult::Done(_, radius) = contents,
closure!(tuple!(ws_f32, ws_f32))(contents.as_bytes()) { byte_offset,
} if type_name == "Dimensions" => {
if let IResult::Done(_, radius) = closure!(tuple!(ws_f32, ws_f32))(contents.as_bytes()) {
dimensions.push(radius); dimensions.push(radius);
} else { } else {
// Found dimensions, but its contents is not in the right format // Found dimensions, but its contents is not in the right format
@ -140,9 +152,12 @@ pub fn parse_rectangle_light<'a>(arena: &'a MemArena,
} }
// Color // Color
&DataTree::Leaf { type_name, contents, byte_offset } if type_name == "Color" => { &DataTree::Leaf {
if let IResult::Done(_, color) = type_name,
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) { contents,
byte_offset,
} if type_name == "Color" => {
if let IResult::Done(_, color) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) {
// TODO: handle color space conversions properly. // TODO: handle color space conversions properly.
// Probably will need a special color type with its // Probably will need a special color type with its
// own parser...? // own parser...?

View File

@ -21,9 +21,7 @@ use super::psy::PsyParseError;
// accel: BVH, // accel: BVH,
// } // }
pub fn parse_mesh_surface<'a>(arena: &'a MemArena, pub fn parse_mesh_surface<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<TriangleMesh<'a>, PsyParseError> {
tree: &'a DataTree)
-> Result<TriangleMesh<'a>, PsyParseError> {
let mut verts = Vec::new(); let mut verts = Vec::new();
let mut face_vert_counts = Vec::new(); let mut face_vert_counts = Vec::new();
let mut face_vert_indices = Vec::new(); let mut face_vert_indices = Vec::new();
@ -39,8 +37,7 @@ pub fn parse_mesh_surface<'a>(arena: &'a MemArena,
// Collect verts for this time sample // Collect verts for this time sample
let mut vert_count = 0; let mut vert_count = 0;
while let IResult::Done(remaining, vert) = while let IResult::Done(remaining, vert) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(raw_text) {
closure!(tuple!(ws_f32, ws_f32, ws_f32))(raw_text) {
raw_text = remaining; raw_text = remaining;
verts.push(Point::new(vert.0, vert.1, vert.2)); verts.push(Point::new(vert.0, vert.1, vert.2));
@ -91,9 +88,7 @@ pub fn parse_mesh_surface<'a>(arena: &'a MemArena,
// Store all the time samples of each triangle contiguously // Store all the time samples of each triangle contiguously
for time_sample in 0..time_samples { for time_sample in 0..time_samples {
let start_vi = vert_count * time_sample; let start_vi = vert_count * time_sample;
triangles.push((verts[start_vi + face_vert_indices[v1]], triangles.push((verts[start_vi + face_vert_indices[v1]], verts[start_vi + face_vert_indices[v1 + vi + 1]], verts[start_vi + face_vert_indices[v1 + vi + 2]]));
verts[start_vi + face_vert_indices[v1 + vi + 1]],
verts[start_vi + face_vert_indices[v1 + vi + 2]]));
} }
} }
} else { } else {

View File

@ -97,14 +97,16 @@ impl<'a> Renderer<'a> {
let pixrenref = &pixels_rendered; let pixrenref = &pixels_rendered;
// Render // Render
tpool.scoped(|scope| { tpool.scoped(
|scope| {
// Spawn worker tasks // Spawn worker tasks
for _ in 0..thread_count { for _ in 0..thread_count {
let jq = &job_queue; let jq = &job_queue;
let ajq = &all_jobs_queued; let ajq = &all_jobs_queued;
let img = &image; let img = &image;
let cstats = &collective_stats; let cstats = &collective_stats;
scope.execute(move || { scope.execute(
move || {
let mut stats = RenderStats::new(); let mut stats = RenderStats::new();
let mut timer = Timer::new(); let mut timer = Timer::new();
let mut total_timer = Timer::new(); let mut total_timer = Timer::new();
@ -139,30 +141,23 @@ impl<'a> Renderer<'a> {
for si in 0..self.spp { for si in 0..self.spp {
// Calculate image plane x and y coordinates // Calculate image plane x and y coordinates
let (img_x, img_y) = { let (img_x, img_y) = {
let filter_x = let filter_x = fast_logit(halton::sample(4, offset + si as u32), 1.5) + 0.5;
fast_logit(halton::sample(4, offset + si as u32), 1.5) + let filter_y = fast_logit(halton::sample(5, offset + si as u32), 1.5) + 0.5;
0.5;
let filter_y =
fast_logit(halton::sample(5, offset + si as u32), 1.5) +
0.5;
let samp_x = (filter_x + x as f32) * cmpx; let samp_x = (filter_x + x as f32) * cmpx;
let samp_y = (filter_y + y as f32) * cmpy; let samp_y = (filter_y + y as f32) * cmpy;
((samp_x - 0.5) * x_extent, (0.5 - samp_y) * y_extent) ((samp_x - 0.5) * x_extent, (0.5 - samp_y) * y_extent)
}; };
// Create the light path and initial ray for this sample // Create the light path and initial ray for this sample
let (path, ray) = let (path, ray) = LightPath::new(
LightPath::new(&self.scene, &self.scene,
(x, y), (x, y),
(img_x, img_y), (img_x, img_y),
(halton::sample(0, offset + si as u32), (halton::sample(0, offset + si as u32), halton::sample(1, offset + si as u32)),
halton::sample(1, offset + si as u32)),
halton::sample(2, offset + si as u32), halton::sample(2, offset + si as u32),
map_0_1_to_wavelength(halton::sample(3, map_0_1_to_wavelength(halton::sample(3, offset + si as u32)),
offset + offset + si as u32,
si as );
u32)),
offset + si as u32);
paths.push(path); paths.push(path);
rays.push(ray); rays.push(ray);
} }
@ -178,10 +173,11 @@ impl<'a> Renderer<'a> {
stats.trace_time += timer.tick() as f64; stats.trace_time += timer.tick() as f64;
// Determine next rays to shoot based on result // Determine next rays to shoot based on result
pi = pi = partition_pair(
partition_pair(&mut paths[..pi], &mut rays[..pi], |i, path, ray| { &mut paths[..pi],
path.next(&mut xform_stack, &self.scene, &isects[i], &mut *ray) &mut rays[..pi],
}); |i, path, ray| path.next(&mut xform_stack, &self.scene, &isects[i], &mut *ray),
);
stats.ray_generation_time += timer.tick() as f64; stats.ray_generation_time += timer.tick() as f64;
} }
@ -191,8 +187,7 @@ impl<'a> Renderer<'a> {
let max = (bucket.x + bucket.w, bucket.y + bucket.h); let max = (bucket.x + bucket.w, bucket.y + bucket.h);
let mut img_bucket = img.get_bucket(min, max); let mut img_bucket = img.get_bucket(min, max);
for path in paths.iter() { for path in paths.iter() {
let path_col = SpectralSample::from_parts(path.color, let path_col = SpectralSample::from_parts(path.color, path.wavelength);
path.wavelength);
let mut col = img_bucket.get(path.pixel_co.0, path.pixel_co.1); let mut col = img_bucket.get(path.pixel_co.0, path.pixel_co.1);
col += XYZ::from_spectral_sample(&path_col) / self.spp as f32; col += XYZ::from_spectral_sample(&path_col) / self.spp as f32;
img_bucket.set(path.pixel_co.0, path.pixel_co.1, col); img_bucket.set(path.pixel_co.0, path.pixel_co.1, col);
@ -221,14 +216,17 @@ impl<'a> Renderer<'a> {
} }
stats.total_time += total_timer.tick() as f64; stats.total_time += total_timer.tick() as f64;
ACCEL_TRAV_TIME.with(|att| { ACCEL_TRAV_TIME.with(
|att| {
stats.accel_traversal_time = att.get(); stats.accel_traversal_time = att.get();
att.set(0.0); att.set(0.0);
}); }
);
// Collect stats // Collect stats
cstats.write().unwrap().collect(stats); cstats.write().unwrap().collect(stats);
}); }
);
} }
// Print initial 0.00% progress // Print initial 0.00% progress
@ -272,21 +270,26 @@ impl<'a> Renderer<'a> {
bucket_h bucket_h
}; };
if x < img_width && y < img_height && w > 0 && h > 0 { if x < img_width && y < img_height && w > 0 && h > 0 {
job_queue.push(BucketJob { job_queue.push(
BucketJob {
x: x as u32, x: x as u32,
y: y as u32, y: y as u32,
w: w as u32, w: w as u32,
h: h as u32, h: h as u32,
}); }
);
} }
} }
// Mark done queuing jobs // Mark done queuing jobs
*all_jobs_queued.write().unwrap() = true; *all_jobs_queued.write().unwrap() = true;
}); }
);
// Clear percentage progress print // Clear percentage progress print
print!("\r \r"); print!(
"\r \r",
);
// Return the rendered image and stats // Return the rendered image and stats
return (image, *collective_stats.read().unwrap()); return (image, *collective_stats.read().unwrap());
@ -321,14 +324,7 @@ pub struct LightPath {
} }
impl LightPath { impl LightPath {
fn new(scene: &Scene, fn new(scene: &Scene, pixel_co: (u32, u32), image_plane_co: (f32, f32), lens_uv: (f32, f32), time: f32, wavelength: f32, lds_offset: u32) -> (LightPath, Ray) {
pixel_co: (u32, u32),
image_plane_co: (f32, f32),
lens_uv: (f32, f32),
time: f32,
wavelength: f32,
lds_offset: u32)
-> (LightPath, Ray) {
(LightPath { (LightPath {
event: LightPathEvent::CameraRay, event: LightPathEvent::CameraRay,
bounce_count: 0, bounce_count: 0,
@ -347,11 +343,15 @@ impl LightPath {
color: Float4::splat(0.0), color: Float4::splat(0.0),
}, },
scene.camera.generate_ray(image_plane_co.0, scene
.camera
.generate_ray(
image_plane_co.0,
image_plane_co.1, image_plane_co.1,
time, time,
lens_uv.0, lens_uv.0,
lens_uv.1)) lens_uv.1,
))
} }
fn next_lds_samp(&self) -> f32 { fn next_lds_samp(&self) -> f32 {
@ -361,57 +361,51 @@ impl LightPath {
s s
} }
fn next(&mut self, fn next(&mut self, xform_stack: &mut TransformStack, scene: &Scene, isect: &surface::SurfaceIntersection, ray: &mut Ray) -> bool {
xform_stack: &mut TransformStack,
scene: &Scene,
isect: &surface::SurfaceIntersection,
ray: &mut Ray)
-> bool {
match self.event { match self.event {
//-------------------------------------------------------------------- //--------------------------------------------------------------------
// Result of Camera or bounce ray, prepare next bounce and light rays // Result of Camera or bounce ray, prepare next bounce and light rays
LightPathEvent::CameraRay | LightPathEvent::CameraRay |
LightPathEvent::BounceRay => { LightPathEvent::BounceRay => {
if let &surface::SurfaceIntersection::Hit { intersection_data: ref idata, if let &surface::SurfaceIntersection::Hit {
ref closure } = isect { intersection_data: ref idata,
ref closure,
} = isect {
// Hit something! Do the stuff // Hit something! Do the stuff
// Prepare light ray // Prepare light ray
let light_n = self.next_lds_samp(); let light_n = self.next_lds_samp();
let light_uvw = let light_uvw = (self.next_lds_samp(), self.next_lds_samp(), self.next_lds_samp());
(self.next_lds_samp(), self.next_lds_samp(), self.next_lds_samp());
xform_stack.clear(); xform_stack.clear();
let found_light = if let Some((light_color, let found_light = if let Some((light_color, shadow_vec, light_pdf, light_sel_pdf, is_infinite)) =
shadow_vec, scene.sample_lights(
light_pdf, xform_stack,
light_sel_pdf,
is_infinite)) =
scene.sample_lights(xform_stack,
light_n, light_n,
light_uvw, light_uvw,
self.wavelength, self.wavelength,
self.time, self.time,
isect) { isect,
) {
// Check if pdf is zero, to avoid NaN's. // Check if pdf is zero, to avoid NaN's.
if light_pdf > 0.0 { if light_pdf > 0.0 {
// Calculate and store the light that will be contributed // Calculate and store the light that will be contributed
// to the film plane if the light is not in shadow. // to the film plane if the light is not in shadow.
self.pending_color_addition = { self.pending_color_addition = {
let material = closure.as_surface_closure(); let material = closure.as_surface_closure();
let la = let la = material.evaluate(ray.dir, shadow_vec, idata.nor, self.wavelength);
material.evaluate(ray.dir, shadow_vec, idata.nor, self.wavelength); light_color.e * la.e * self.light_attenuation / (light_pdf * light_sel_pdf)
light_color.e * la.e * self.light_attenuation /
(light_pdf * light_sel_pdf)
}; };
// Calculate the shadow ray for testing if the light is // Calculate the shadow ray for testing if the light is
// in shadow or not. // in shadow or not.
// TODO: use proper ray offsets for avoiding self-shadowing // TODO: use proper ray offsets for avoiding self-shadowing
// rather than this hacky stupid stuff. // rather than this hacky stupid stuff.
*ray = Ray::new(idata.pos + shadow_vec.normalized() * 0.001, *ray = Ray::new(
idata.pos + shadow_vec.normalized() * 0.001,
shadow_vec, shadow_vec,
self.time, self.time,
true); true,
);
// For distant lights // For distant lights
if is_infinite { if is_infinite {
@ -445,11 +439,7 @@ impl LightPath {
self.next_attentuation_fac = filter.e / pdf; self.next_attentuation_fac = filter.e / pdf;
// Calculate the ray for this bounce // Calculate the ray for this bounce
self.next_bounce_ray = Some(Ray::new(idata.pos + self.next_bounce_ray = Some(Ray::new(idata.pos + dir.normalized() * 0.0001, dir, self.time, false));
dir.normalized() * 0.0001,
dir,
self.time,
false));
true true
} else { } else {
@ -477,9 +467,11 @@ impl LightPath {
} }
} else { } else {
// Didn't hit anything, so background color // Didn't hit anything, so background color
self.color += self.color += scene
scene.world.background_color.to_spectral_sample(self.wavelength).e * .world
self.light_attenuation; .background_color
.to_spectral_sample(self.wavelength)
.e * self.light_attenuation;
return false; return false;
} }
} }

View File

@ -1,5 +1,3 @@
mod monte_carlo; mod monte_carlo;
pub use self::monte_carlo::{square_to_circle, cosine_sample_hemisphere, uniform_sample_hemisphere, pub use self::monte_carlo::{square_to_circle, cosine_sample_hemisphere, uniform_sample_hemisphere, uniform_sample_sphere, uniform_sample_cone, uniform_sample_cone_pdf, spherical_triangle_solid_angle, uniform_sample_spherical_triangle};
uniform_sample_sphere, uniform_sample_cone, uniform_sample_cone_pdf,
spherical_triangle_solid_angle, uniform_sample_spherical_triangle};

View File

@ -68,9 +68,11 @@ pub fn uniform_sample_cone(u: f32, v: f32, cos_theta_max: f64) -> Vector {
let cos_theta = (1.0 - u as f64) + (u as f64 * cos_theta_max); let cos_theta = (1.0 - u as f64) + (u as f64 * cos_theta_max);
let sin_theta = (1.0 - (cos_theta * cos_theta)).sqrt(); let sin_theta = (1.0 - (cos_theta * cos_theta)).sqrt();
let phi = v as f64 * 2.0 * PI_64; let phi = v as f64 * 2.0 * PI_64;
Vector::new((phi.cos() * sin_theta) as f32, Vector::new(
(phi.cos() * sin_theta) as f32,
(phi.sin() * sin_theta) as f32, (phi.sin() * sin_theta) as f32,
cos_theta as f32) cos_theta as f32,
)
} }
pub fn uniform_sample_cone_pdf(cos_theta_max: f64) -> f64 { pub fn uniform_sample_cone_pdf(cos_theta_max: f64) -> f64 {
@ -97,9 +99,15 @@ pub fn spherical_triangle_solid_angle(va: Vector, vb: Vector, vc: Vector) -> f32
} }
// Calculate the cosine of the angles at the vertices // Calculate the cosine of the angles at the vertices
let cos_va = ((cos_a - (cos_b * cos_c)) / (sin_b * sin_c)).max(-1.0).min(1.0); let cos_va = ((cos_a - (cos_b * cos_c)) / (sin_b * sin_c))
let cos_vb = ((cos_b - (cos_c * cos_a)) / (sin_c * sin_a)).max(-1.0).min(1.0); .max(-1.0)
let cos_vc = ((cos_c - (cos_a * cos_b)) / (sin_a * sin_b)).max(-1.0).min(1.0); .min(1.0);
let cos_vb = ((cos_b - (cos_c * cos_a)) / (sin_c * sin_a))
.max(-1.0)
.min(1.0);
let cos_vc = ((cos_c - (cos_a * cos_b)) / (sin_a * sin_b))
.max(-1.0)
.min(1.0);
// Calculate the angles themselves, in radians // Calculate the angles themselves, in radians
let ang_va = cos_va.acos(); let ang_va = cos_va.acos();
@ -112,12 +120,7 @@ pub fn spherical_triangle_solid_angle(va: Vector, vb: Vector, vc: Vector) -> f32
/// Generates a uniform sample on a spherical triangle given two uniform /// Generates a uniform sample on a spherical triangle given two uniform
/// random variables i and j in [0, 1]. /// random variables i and j in [0, 1].
pub fn uniform_sample_spherical_triangle(va: Vector, pub fn uniform_sample_spherical_triangle(va: Vector, vb: Vector, vc: Vector, i: f32, j: f32) -> Vector {
vb: Vector,
vc: Vector,
i: f32,
j: f32)
-> Vector {
// Calculate sines and cosines of the spherical triangle's edge lengths // Calculate sines and cosines of the spherical triangle's edge lengths
let cos_a: f64 = dot(vb, vc).max(-1.0).min(1.0) as f64; let cos_a: f64 = dot(vb, vc).max(-1.0).min(1.0) as f64;
let cos_b: f64 = dot(vc, va).max(-1.0).min(1.0) as f64; let cos_b: f64 = dot(vc, va).max(-1.0).min(1.0) as f64;
@ -135,9 +138,15 @@ pub fn uniform_sample_spherical_triangle(va: Vector,
} }
// Calculate the cosine of the angles at the vertices // Calculate the cosine of the angles at the vertices
let cos_va = ((cos_a - (cos_b * cos_c)) / (sin_b * sin_c)).max(-1.0).min(1.0); let cos_va = ((cos_a - (cos_b * cos_c)) / (sin_b * sin_c))
let cos_vb = ((cos_b - (cos_c * cos_a)) / (sin_c * sin_a)).max(-1.0).min(1.0); .max(-1.0)
let cos_vc = ((cos_c - (cos_a * cos_b)) / (sin_a * sin_b)).max(-1.0).min(1.0); .min(1.0);
let cos_vb = ((cos_b - (cos_c * cos_a)) / (sin_c * sin_a))
.max(-1.0)
.min(1.0);
let cos_vc = ((cos_c - (cos_a * cos_b)) / (sin_a * sin_b))
.max(-1.0)
.min(1.0);
// Calculate sine for A // Calculate sine for A
let sin_va = (1.0 - (cos_va * cos_va)).sqrt(); let sin_va = (1.0 - (cos_va * cos_va)).sqrt();
@ -163,8 +172,7 @@ pub fn uniform_sample_spherical_triangle(va: Vector,
let q_bottom = ((v * s) + (u * t)) * sin_va; let q_bottom = ((v * s) + (u * t)) * sin_va;
let q = q_top / q_bottom; let q = q_top / q_bottom;
let vc_2 = (va * q as f32) + let vc_2 = (va * q as f32) + ((vc - (va * dot(vc, va))).normalized() * (1.0 - (q * q)).sqrt() as f32);
((vc - (va * dot(vc, va))).normalized() * (1.0 - (q * q)).sqrt() as f32);
let z = 1.0 - (j * (1.0 - dot(vc_2, vb))); let z = 1.0 - (j * (1.0 - dot(vc_2, vb)));

View File

@ -36,15 +36,11 @@ pub struct Assembly<'a> {
impl<'a> Assembly<'a> { impl<'a> Assembly<'a> {
// Returns (light_color, shadow_vector, pdf, selection_pdf) // Returns (light_color, shadow_vector, pdf, selection_pdf)
pub fn sample_lights(&self, pub fn sample_lights(&self, xform_stack: &mut TransformStack, n: f32, uvw: (f32, f32, f32), wavelength: f32, time: f32, intr: &SurfaceIntersection) -> Option<(SpectralSample, Vector, f32, f32)> {
xform_stack: &mut TransformStack, if let &SurfaceIntersection::Hit {
n: f32, intersection_data: idata,
uvw: (f32, f32, f32), closure,
wavelength: f32, } = intr {
time: f32,
intr: &SurfaceIntersection)
-> Option<(SpectralSample, Vector, f32, f32)> {
if let &SurfaceIntersection::Hit { intersection_data: idata, closure } = intr {
let sel_xform = if xform_stack.top().len() > 0 { let sel_xform = if xform_stack.top().len() > 0 {
lerp_slice(xform_stack.top(), time) lerp_slice(xform_stack.top(), time)
} else { } else {
@ -52,12 +48,14 @@ impl<'a> Assembly<'a> {
}; };
if let Some((light_i, sel_pdf, whittled_n)) = if let Some((light_i, sel_pdf, whittled_n)) =
self.light_accel self.light_accel
.select(idata.incoming * sel_xform, .select(
idata.incoming * sel_xform,
idata.pos * sel_xform, idata.pos * sel_xform,
idata.nor * sel_xform, idata.nor * sel_xform,
closure.as_surface_closure(), closure.as_surface_closure(),
time, time,
n) { n,
) {
let inst = self.light_instances[light_i]; let inst = self.light_instances[light_i];
match inst.instance_type { match inst.instance_type {
@ -83,8 +81,7 @@ impl<'a> Assembly<'a> {
}; };
// Sample the light // Sample the light
let (color, shadow_vec, pdf) = let (color, shadow_vec, pdf) = light.sample(&xform, idata.pos, uvw.0, uvw.1, wavelength, time);
light.sample(&xform, idata.pos, uvw.0, uvw.1, wavelength, time);
return Some((color, shadow_vec, pdf, sel_pdf)); return Some((color, shadow_vec, pdf, sel_pdf));
} }
@ -100,8 +97,7 @@ impl<'a> Assembly<'a> {
} }
// Sample sub-assembly lights // Sample sub-assembly lights
let sample = self.assemblies[inst.data_index] let sample = self.assemblies[inst.data_index].sample_lights(xform_stack, whittled_n, uvw, wavelength, time, intr);
.sample_lights(xform_stack, whittled_n, uvw, wavelength, time, intr);
// Pop the assembly's transforms off the transform stack. // Pop the assembly's transforms off the transform stack.
if let Some(_) = inst.transform_indices { if let Some(_) = inst.transform_indices {
@ -173,12 +169,15 @@ impl<'a> AssemblyBuilder<'a> {
pub fn add_assembly(&mut self, name: &str, asmb: Assembly<'a>) { pub fn add_assembly(&mut self, name: &str, asmb: Assembly<'a>) {
// Make sure the name hasn't already been used. // Make sure the name hasn't already been used.
if self.name_exists(name) { if self.name_exists(name) {
panic!("Attempted to add assembly to another assembly with a name that already \ panic!(
exists."); "Attempted to add assembly to another assembly with a name that already \
exists."
);
} }
// Add assembly // Add assembly
self.assembly_map.insert(name.to_string(), self.assemblies.len()); self.assembly_map
.insert(name.to_string(), self.assemblies.len());
self.assemblies.push(asmb); self.assemblies.push(asmb);
} }
@ -201,16 +200,14 @@ impl<'a> AssemblyBuilder<'a> {
instance_type: InstanceType::Object, instance_type: InstanceType::Object,
data_index: self.object_map[name], data_index: self.object_map[name],
id: self.instances.len(), id: self.instances.len(),
transform_indices: transform_indices: xforms.map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())),
xforms.map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())),
} }
} else { } else {
Instance { Instance {
instance_type: InstanceType::Assembly, instance_type: InstanceType::Assembly,
data_index: self.assembly_map[name], data_index: self.assembly_map[name],
id: self.instances.len(), id: self.instances.len(),
transform_indices: transform_indices: xforms.map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())),
xforms.map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())),
} }
}; };
@ -231,16 +228,19 @@ impl<'a> AssemblyBuilder<'a> {
let (bis, bbs) = self.instance_bounds(); let (bis, bbs) = self.instance_bounds();
// Build object accel // Build object accel
let object_accel = BVH::from_objects(self.arena, let object_accel = BVH::from_objects(
self.arena,
&mut self.instances[..], &mut self.instances[..],
1, 1,
|inst| &bbs[bis[inst.id]..bis[inst.id + 1]]); |inst| &bbs[bis[inst.id]..bis[inst.id + 1]],
);
// Get list of instances that are for light sources or assemblies that contain light // Get list of instances that are for light sources or assemblies that contain light
// sources. // sources.
let mut light_instances: Vec<_> = self.instances let mut light_instances: Vec<_> = self.instances
.iter() .iter()
.filter(|inst| match inst.instance_type { .filter(
|inst| match inst.instance_type {
InstanceType::Object => { InstanceType::Object => {
if let Object::Light(_) = self.objects[inst.data_index] { if let Object::Light(_) = self.objects[inst.data_index] {
true true
@ -250,14 +250,18 @@ impl<'a> AssemblyBuilder<'a> {
} }
InstanceType::Assembly => { InstanceType::Assembly => {
self.assemblies[inst.data_index].light_accel.approximate_energy() > 0.0 self.assemblies[inst.data_index]
.light_accel
.approximate_energy() > 0.0
} }
}) }
)
.map(|&a| a) .map(|&a| a)
.collect(); .collect();
// Build light accel // Build light accel
let light_accel = LightTree::from_objects(self.arena, &mut light_instances[..], |inst| { let light_accel = LightTree::from_objects(
self.arena, &mut light_instances[..], |inst| {
let bounds = &bbs[bis[inst.id]..bis[inst.id + 1]]; let bounds = &bbs[bis[inst.id]..bis[inst.id + 1]];
let energy = match inst.instance_type { let energy = match inst.instance_type {
InstanceType::Object => { InstanceType::Object => {
@ -269,11 +273,14 @@ impl<'a> AssemblyBuilder<'a> {
} }
InstanceType::Assembly => { InstanceType::Assembly => {
self.assemblies[inst.data_index].light_accel.approximate_energy() self.assemblies[inst.data_index]
.light_accel
.approximate_energy()
} }
}; };
(bounds, energy) (bounds, energy)
}); }
);
Assembly { Assembly {
instances: self.arena.copy_slice(&self.instances), instances: self.arena.copy_slice(&self.instances),

View File

@ -19,14 +19,7 @@ pub struct Scene<'a> {
} }
impl<'a> Scene<'a> { impl<'a> Scene<'a> {
pub fn sample_lights(&self, pub fn sample_lights(&self, xform_stack: &mut TransformStack, n: f32, uvw: (f32, f32, f32), wavelength: f32, time: f32, intr: &SurfaceIntersection) -> Option<(SpectralSample, Vector, f32, f32, bool)> {
xform_stack: &mut TransformStack,
n: f32,
uvw: (f32, f32, f32),
wavelength: f32,
time: f32,
intr: &SurfaceIntersection)
-> Option<(SpectralSample, Vector, f32, f32, bool)> {
// TODO: this just selects between world lights and local lights // TODO: this just selects between world lights and local lights
// with a 50/50 chance. We should do something more sophisticated // with a 50/50 chance. We should do something more sophisticated
// than this, accounting for the estimated impact of the lights // than this, accounting for the estimated impact of the lights
@ -37,8 +30,7 @@ impl<'a> Scene<'a> {
let wl_energy = if self.world let wl_energy = if self.world
.lights .lights
.iter() .iter()
.fold(0.0, |energy, light| energy + light.approximate_energy()) <= .fold(0.0, |energy, light| energy + light.approximate_energy()) <= 0.0 {
0.0 {
0.0 0.0
} else { } else {
1.0 1.0
@ -67,7 +59,8 @@ impl<'a> Scene<'a> {
let n = (n - wl_prob) / (1.0 - wl_prob); let n = (n - wl_prob) / (1.0 - wl_prob);
if let Some((ss, sv, pdf, spdf)) = if let Some((ss, sv, pdf, spdf)) =
self.root.sample_lights(xform_stack, n, uvw, wavelength, time, intr) { self.root
.sample_lights(xform_stack, n, uvw, wavelength, time, intr) {
return Some((ss, sv, pdf, spdf * (1.0 - wl_prob), false)); return Some((ss, sv, pdf, spdf * (1.0 - wl_prob), false));
} else { } else {
return None; return None;

View File

@ -42,12 +42,7 @@ pub trait SurfaceClosure {
/// wavelength: The wavelength of light to sample at. /// wavelength: The wavelength of light to sample at.
/// ///
/// Returns a tuple with the generated outgoing light direction, color filter, and pdf. /// Returns a tuple with the generated outgoing light direction, color filter, and pdf.
fn sample(&self, fn sample(&self, inc: Vector, nor: Normal, uv: (f32, f32), wavelength: f32) -> (Vector, SpectralSample, f32);
inc: Vector,
nor: Normal,
uv: (f32, f32),
wavelength: f32)
-> (Vector, SpectralSample, f32);
/// Evaluates the closure for the given incoming and outgoing rays. /// Evaluates the closure for the given incoming and outgoing rays.
/// ///
@ -72,12 +67,7 @@ pub trait SurfaceClosure {
/// This is used for importance sampling, so does not need to be exact, /// This is used for importance sampling, so does not need to be exact,
/// but it does need to be non-zero anywhere that an exact solution would /// but it does need to be non-zero anywhere that an exact solution would
/// be non-zero. /// be non-zero.
fn estimate_eval_over_solid_angle(&self, fn estimate_eval_over_solid_angle(&self, inc: Vector, out: Vector, nor: Normal, cos_theta: f32) -> f32;
inc: Vector,
out: Vector,
nor: Normal,
cos_theta: f32)
-> f32;
} }
@ -173,12 +163,7 @@ impl SurfaceClosure for EmitClosure {
false false
} }
fn sample(&self, fn sample(&self, inc: Vector, nor: Normal, uv: (f32, f32), wavelength: f32) -> (Vector, SpectralSample, f32) {
inc: Vector,
nor: Normal,
uv: (f32, f32),
wavelength: f32)
-> (Vector, SpectralSample, f32) {
let _ = (inc, nor, uv); // Not using these, silence warning let _ = (inc, nor, uv); // Not using these, silence warning
(Vector::new(0.0, 0.0, 0.0), SpectralSample::new(wavelength), 1.0) (Vector::new(0.0, 0.0, 0.0), SpectralSample::new(wavelength), 1.0)
@ -196,12 +181,7 @@ impl SurfaceClosure for EmitClosure {
1.0 1.0
} }
fn estimate_eval_over_solid_angle(&self, fn estimate_eval_over_solid_angle(&self, inc: Vector, out: Vector, nor: Normal, cos_theta: f32) -> f32 {
inc: Vector,
out: Vector,
nor: Normal,
cos_theta: f32)
-> f32 {
let _ = (inc, out, nor, cos_theta); // Not using these, silence warning let _ = (inc, out, nor, cos_theta); // Not using these, silence warning
// TODO: what to do here? // TODO: what to do here?
@ -227,12 +207,7 @@ impl SurfaceClosure for LambertClosure {
false false
} }
fn sample(&self, fn sample(&self, inc: Vector, nor: Normal, uv: (f32, f32), wavelength: f32) -> (Vector, SpectralSample, f32) {
inc: Vector,
nor: Normal,
uv: (f32, f32),
wavelength: f32)
-> (Vector, SpectralSample, f32) {
let nn = if dot(nor.into_vector(), inc) <= 0.0 { let nn = if dot(nor.into_vector(), inc) <= 0.0 {
nor.normalized() nor.normalized()
} else { } else {
@ -275,12 +250,7 @@ impl SurfaceClosure for LambertClosure {
dot(nn, v).max(0.0) * INV_PI dot(nn, v).max(0.0) * INV_PI
} }
fn estimate_eval_over_solid_angle(&self, fn estimate_eval_over_solid_angle(&self, inc: Vector, out: Vector, nor: Normal, cos_theta: f32) -> f32 {
inc: Vector,
out: Vector,
nor: Normal,
cos_theta: f32)
-> f32 {
assert!(cos_theta >= -1.0 && cos_theta <= 1.0); assert!(cos_theta >= -1.0 && cos_theta <= 1.0);
// Analytically calculates lambert shading from a uniform light source // Analytically calculates lambert shading from a uniform light source
@ -405,9 +375,7 @@ impl GTRClosure {
let roughness2 = self.roughness * self.roughness; let roughness2 = self.roughness * self.roughness;
// Calculate top half of equation // Calculate top half of equation
let top = 1.0 - let top = 1.0 - ((roughness2.powf(1.0 - self.tail_shape) * (1.0 - u)) + u).powf(1.0 / (1.0 - self.tail_shape));
((roughness2.powf(1.0 - self.tail_shape) * (1.0 - u)) + u)
.powf(1.0 / (1.0 - self.tail_shape));
// Calculate bottom half of equation // Calculate bottom half of equation
let bottom = 1.0 - roughness2; let bottom = 1.0 - roughness2;
@ -440,12 +408,7 @@ impl SurfaceClosure for GTRClosure {
} }
fn sample(&self, fn sample(&self, inc: Vector, nor: Normal, uv: (f32, f32), wavelength: f32) -> (Vector, SpectralSample, f32) {
inc: Vector,
nor: Normal,
uv: (f32, f32),
wavelength: f32)
-> (Vector, SpectralSample, f32) {
// Get normalized surface normal // Get normalized surface normal
let nn = if dot(nor.into_vector(), inc) < 0.0 { let nn = if dot(nor.into_vector(), inc) < 0.0 {
nor.normalized() nor.normalized()
@ -499,18 +462,26 @@ impl SurfaceClosure for GTRClosure {
let mut col_f = self.col.to_spectral_sample(wavelength); let mut col_f = self.col.to_spectral_sample(wavelength);
let rev_fresnel = 1.0 - self.fresnel; let rev_fresnel = 1.0 - self.fresnel;
let c0 = lerp(schlick_fresnel_from_fac(col_f.e.get_0(), hb), let c0 = lerp(
schlick_fresnel_from_fac(col_f.e.get_0(), hb),
col_f.e.get_0(), col_f.e.get_0(),
rev_fresnel); rev_fresnel,
let c1 = lerp(schlick_fresnel_from_fac(col_f.e.get_1(), hb), );
let c1 = lerp(
schlick_fresnel_from_fac(col_f.e.get_1(), hb),
col_f.e.get_1(), col_f.e.get_1(),
rev_fresnel); rev_fresnel,
let c2 = lerp(schlick_fresnel_from_fac(col_f.e.get_2(), hb), );
let c2 = lerp(
schlick_fresnel_from_fac(col_f.e.get_2(), hb),
col_f.e.get_2(), col_f.e.get_2(),
rev_fresnel); rev_fresnel,
let c3 = lerp(schlick_fresnel_from_fac(col_f.e.get_3(), hb), );
let c3 = lerp(
schlick_fresnel_from_fac(col_f.e.get_3(), hb),
col_f.e.get_3(), col_f.e.get_3(),
rev_fresnel); rev_fresnel,
);
col_f.e.set_0(c0); col_f.e.set_0(c0);
col_f.e.set_1(c1); col_f.e.set_1(c1);
@ -580,12 +551,7 @@ impl SurfaceClosure for GTRClosure {
} }
fn estimate_eval_over_solid_angle(&self, fn estimate_eval_over_solid_angle(&self, inc: Vector, out: Vector, nor: Normal, cos_theta: f32) -> f32 {
inc: Vector,
out: Vector,
nor: Normal,
cos_theta: f32)
-> f32 {
// TODO: all of the stuff in this function is horribly hacky. // TODO: all of the stuff in this function is horribly hacky.
// Find a proper way to approximate the light contribution from a // Find a proper way to approximate the light contribution from a
// solid angle. // solid angle.
@ -622,8 +588,10 @@ impl SurfaceClosure for GTRClosure {
let theta = cos_theta.acos(); let theta = cos_theta.acos();
let hh = (aa + bb).normalized(); let hh = (aa + bb).normalized();
let nh = clamp(dot(nn, hh), -1.0, 1.0); let nh = clamp(dot(nn, hh), -1.0, 1.0);
let fac = self.dist(nh, let fac = self.dist(
(1.0f32).min(self.roughness.sqrt() + (2.0 * theta / PI_32))); nh,
(1.0f32).min(self.roughness.sqrt() + (2.0 * theta / PI_32)),
);
return fac * (1.0f32).min(1.0 - cos_theta) * INV_PI; return fac * (1.0f32).min(1.0 - cos_theta) * INV_PI;
} }

View File

@ -12,11 +12,7 @@ use shading::surface_closure::SurfaceClosureUnion;
pub trait Surface: Boundable + Debug + Sync { pub trait Surface: Boundable + Debug + Sync {
fn intersect_rays(&self, fn intersect_rays(&self, accel_rays: &mut [AccelRay], wrays: &[Ray], isects: &mut [SurfaceIntersection], space: &[Matrix4x4]);
accel_rays: &mut [AccelRay],
wrays: &[Ray],
isects: &mut [SurfaceIntersection],
space: &[Matrix4x4]);
} }

View File

@ -24,10 +24,7 @@ pub struct TriangleMesh<'a> {
} }
impl<'a> TriangleMesh<'a> { impl<'a> TriangleMesh<'a> {
pub fn from_triangles<'b>(arena: &'b MemArena, pub fn from_triangles<'b>(arena: &'b MemArena, time_samples: usize, triangles: Vec<(Point, Point, Point)>) -> TriangleMesh<'b> {
time_samples: usize,
triangles: Vec<(Point, Point, Point)>)
-> TriangleMesh<'b> {
assert!(triangles.len() % time_samples == 0); assert!(triangles.len() % time_samples == 0);
let mut indices: Vec<usize> = (0..(triangles.len() / time_samples)) let mut indices: Vec<usize> = (0..(triangles.len() / time_samples))
@ -44,10 +41,12 @@ impl<'a> TriangleMesh<'a> {
bounds bounds
}; };
let accel = BVH::from_objects(arena, let accel = BVH::from_objects(
arena,
&mut indices[..], &mut indices[..],
3, 3,
|tri_i| &bounds[*tri_i..(*tri_i + time_samples)]); |tri_i| &bounds[*tri_i..(*tri_i + time_samples)],
);
TriangleMesh { TriangleMesh {
time_samples: time_samples, time_samples: time_samples,
@ -66,20 +65,17 @@ impl<'a> Boundable for TriangleMesh<'a> {
impl<'a> Surface for TriangleMesh<'a> { impl<'a> Surface for TriangleMesh<'a> {
fn intersect_rays(&self, fn intersect_rays(&self, accel_rays: &mut [AccelRay], wrays: &[Ray], isects: &mut [SurfaceIntersection], space: &[Matrix4x4]) {
accel_rays: &mut [AccelRay], self.accel
wrays: &[Ray], .traverse(
isects: &mut [SurfaceIntersection], &mut accel_rays[..], &self.indices, |tri_i, rs| {
space: &[Matrix4x4]) {
self.accel.traverse(&mut accel_rays[..], &self.indices, |tri_i, rs| {
for r in rs { for r in rs {
let wr = &wrays[r.id as usize]; let wr = &wrays[r.id as usize];
let tri = let tri = lerp_slice_with(
lerp_slice_with(&self.geo[*tri_i..(*tri_i + self.time_samples)], &self.geo[*tri_i..(*tri_i + self.time_samples)],
wr.time, wr.time,
|a, b, t| { |a, b, t| (lerp(a.0, b.0, t), lerp(a.1, b.1, t), lerp(a.2, b.2, t)),
(lerp(a.0, b.0, t), lerp(a.1, b.1, t), lerp(a.2, b.2, t)) );
});
// TODO: when there's no transforms, we don't have to // TODO: when there's no transforms, we don't have to
// transform the triangles at all. // transform the triangles at all.
let mat_space = if space.len() > 0 { let mat_space = if space.len() > 0 {
@ -106,9 +102,7 @@ impl<'a> Surface for TriangleMesh<'a> {
local_space: mat_space, local_space: mat_space,
}, },
// TODO: get surface closure from surface shader. // TODO: get surface closure from surface shader.
closure: SurfaceClosureUnion::LambertClosure( closure: SurfaceClosureUnion::LambertClosure(LambertClosure::new(XYZ::new(0.8, 0.8, 0.8))),
LambertClosure::new(XYZ::new(0.8, 0.8, 0.8))
),
// closure: // closure:
// SurfaceClosureUnion::GTRClosure( // SurfaceClosureUnion::GTRClosure(
// GTRClosure::new(XYZ::new(0.8, 0.8, 0.8), // GTRClosure::new(XYZ::new(0.8, 0.8, 0.8),
@ -121,6 +115,7 @@ impl<'a> Surface for TriangleMesh<'a> {
} }
} }
} }
}); }
);
} }
} }

View File

@ -29,7 +29,12 @@ impl<'a> Tracer<'a> {
self.rays.clear(); self.rays.clear();
self.rays.reserve(wrays.len()); self.rays.reserve(wrays.len());
let mut ids = 0..(wrays.len() as u32); let mut ids = 0..(wrays.len() as u32);
self.rays.extend(wrays.iter().map(|wr| AccelRay::new(wr, ids.next().unwrap()))); self.rays
.extend(
wrays
.iter()
.map(|wr| AccelRay::new(wr, ids.next().unwrap()))
);
return self.inner.trace(wrays, &mut self.rays[..]); return self.inner.trace(wrays, &mut self.rays[..]);
} }
@ -46,7 +51,8 @@ impl<'a> TracerInner<'a> {
// Ready the isects // Ready the isects
self.isects.clear(); self.isects.clear();
self.isects.reserve(wrays.len()); self.isects.reserve(wrays.len());
self.isects.extend(iter::repeat(SurfaceIntersection::Miss).take(wrays.len())); self.isects
.extend(iter::repeat(SurfaceIntersection::Miss).take(wrays.len()));
let mut ray_sets = split_rays_by_direction(&mut rays[..]); let mut ray_sets = split_rays_by_direction(&mut rays[..]);
for ray_set in ray_sets.iter_mut().filter(|ray_set| ray_set.len() > 0) { for ray_set in ray_sets.iter_mut().filter(|ray_set| ray_set.len() > 0) {
@ -56,11 +62,11 @@ impl<'a> TracerInner<'a> {
return &self.isects; return &self.isects;
} }
fn trace_assembly<'b>(&'b mut self, fn trace_assembly<'b>(&'b mut self, assembly: &Assembly, wrays: &[Ray], accel_rays: &mut [AccelRay]) {
assembly: &Assembly, assembly
wrays: &[Ray], .object_accel
accel_rays: &mut [AccelRay]) { .traverse(
assembly.object_accel.traverse(&mut accel_rays[..], &assembly.instances[..], |inst, rs| { &mut accel_rays[..], &assembly.instances[..], |inst, rs| {
// Transform rays if needed // Transform rays if needed
if let Some((xstart, xend)) = inst.transform_indices { if let Some((xstart, xend)) = inst.transform_indices {
// Push transforms to stack // Push transforms to stack
@ -90,7 +96,16 @@ impl<'a> TracerInner<'a> {
let mut tmp = if let Some(_) = inst.transform_indices { let mut tmp = if let Some(_) = inst.transform_indices {
split_rays_by_direction(rs) split_rays_by_direction(rs)
} else { } else {
[&mut rs[..], &mut [], &mut [], &mut [], &mut [], &mut [], &mut [], &mut []] [
&mut rs[..],
&mut [],
&mut [],
&mut [],
&mut [],
&mut [],
&mut [],
&mut [],
]
}; };
let mut ray_sets = if let Some(_) = inst.transform_indices { let mut ray_sets = if let Some(_) = inst.transform_indices {
&mut tmp[..] &mut tmp[..]
@ -106,9 +121,7 @@ impl<'a> TracerInner<'a> {
} }
InstanceType::Assembly => { InstanceType::Assembly => {
self.trace_assembly(&assembly.assemblies[inst.data_index], self.trace_assembly(&assembly.assemblies[inst.data_index], wrays, ray_set);
wrays,
ray_set);
} }
} }
} }
@ -125,8 +138,7 @@ impl<'a> TracerInner<'a> {
for ray in &mut rs[..] { for ray in &mut rs[..] {
let id = ray.id; let id = ray.id;
let t = ray.time; let t = ray.time;
ray.update_from_xformed_world_ray(&wrays[id as usize], ray.update_from_xformed_world_ray(&wrays[id as usize], &lerp_slice(xforms, t));
&lerp_slice(xforms, t));
} }
} else { } else {
for ray in &mut rs[..] { for ray in &mut rs[..] {
@ -135,7 +147,8 @@ impl<'a> TracerInner<'a> {
} }
} }
} }
}); }
);
} }
fn trace_object<'b>(&'b mut self, obj: &Object, wrays: &[Ray], rays: &mut [AccelRay]) { fn trace_object<'b>(&'b mut self, obj: &Object, wrays: &[Ray], rays: &mut [AccelRay]) {

View File

@ -94,7 +94,8 @@ impl Float4 {
#[cfg(not(feature = "simd_perf"))] #[cfg(not(feature = "simd_perf"))]
#[inline] #[inline]
pub fn v_min(&self, other: Float4) -> Float4 { pub fn v_min(&self, other: Float4) -> Float4 {
Float4::new(if self.get_0() < other.get_0() { Float4::new(
if self.get_0() < other.get_0() {
self.get_0() self.get_0()
} else { } else {
other.get_0() other.get_0()
@ -113,7 +114,8 @@ impl Float4 {
self.get_3() self.get_3()
} else { } else {
other.get_3() other.get_3()
}) },
)
} }
@ -125,7 +127,8 @@ impl Float4 {
#[cfg(not(feature = "simd_perf"))] #[cfg(not(feature = "simd_perf"))]
#[inline] #[inline]
pub fn v_max(&self, other: Float4) -> Float4 { pub fn v_max(&self, other: Float4) -> Float4 {
Float4::new(if self.get_0() > other.get_0() { Float4::new(
if self.get_0() > other.get_0() {
self.get_0() self.get_0()
} else { } else {
other.get_0() other.get_0()
@ -144,7 +147,8 @@ impl Float4 {
self.get_3() self.get_3()
} else { } else {
other.get_3() other.get_3()
}) },
)
} }
#[cfg(feature = "simd_perf")] #[cfg(feature = "simd_perf")]
@ -344,8 +348,7 @@ impl Float4 {
impl PartialEq for Float4 { impl PartialEq for Float4 {
#[inline] #[inline]
fn eq(&self, other: &Float4) -> bool { fn eq(&self, other: &Float4) -> bool {
self.get_0() == other.get_0() && self.get_1() == other.get_1() && self.get_0() == other.get_0() && self.get_1() == other.get_1() && self.get_2() == other.get_2() && self.get_3() == other.get_3()
self.get_2() == other.get_2() && self.get_3() == other.get_3()
} }
} }
@ -595,8 +598,7 @@ impl Bool4 {
#[inline] #[inline]
pub fn to_bitmask(&self) -> u8 { pub fn to_bitmask(&self) -> u8 {
(self.get_0() as u8) | ((self.get_1() as u8) << 1) | ((self.get_2() as u8) << 2) | (self.get_0() as u8) | ((self.get_1() as u8) << 1) | ((self.get_2() as u8) << 2) | ((self.get_3() as u8) << 3)
((self.get_3() as u8) << 3)
} }
} }

View File

@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE. // SOFTWARE.
// //
// Adapted to Rust and to generate Rust instead of C by Nathan Vegdahl // Adapted from Python to Rust and to generate Rust instead of C by Nathan Vegdahl
// Generate Rust code for evaluating Halton points with Faure-permutations for different bases. // Generate Rust code for evaluating Halton points with Faure-permutations for different bases.
@ -63,7 +63,9 @@ fn main() {
}; };
// Write the beginning bits of the file // Write the beginning bits of the file
f.write_all(format!(r#" f.write_all(
format!(
r#"
// Copyright (c) 2012 Leonhard Gruenschloss (leonhard@gruenschloss.org) // Copyright (c) 2012 Leonhard Gruenschloss (leonhard@gruenschloss.org)
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
@ -90,38 +92,54 @@ fn main() {
pub const MAX_DIMENSION: u32 = {}; pub const MAX_DIMENSION: u32 = {};
"#, "#,
NUM_DIMENSIONS) NUM_DIMENSIONS
.as_bytes()) )
.as_bytes()
)
.unwrap(); .unwrap();
// Write the sampling function // Write the sampling function
f.write_all(format!(r#" f.write_all(
format!(
r#"
#[inline] #[inline]
pub fn sample(dimension: u32, index: u32) -> f32 {{ pub fn sample(dimension: u32, index: u32) -> f32 {{
match dimension {{"#) match dimension {{"#
.as_bytes()) )
.as_bytes()
)
.unwrap(); .unwrap();
for i in 0..NUM_DIMENSIONS { for i in 0..NUM_DIMENSIONS {
f.write_all(format!(r#" f.write_all(
format!(
r#"
{} => halton{}(index),"#, {} => halton{}(index),"#,
i, i,
primes[i]) primes[i]
.as_bytes()) )
.as_bytes()
)
.unwrap(); .unwrap();
} }
f.write_all(format!(r#" f.write_all(
format!(
r#"
_ => panic!("Exceeded max dimensions."), _ => panic!("Exceeded max dimensions."),
}} }}
}} }}
"#) "#
.as_bytes()) )
.as_bytes()
)
.unwrap(); .unwrap();
// Write the special-cased first dimension // Write the special-cased first dimension
f.write_all(format!(r#" f.write_all(
format!(
r#"
// Special case: radical inverse in base 2, with direct bit reversal. // Special case: radical inverse in base 2, with direct bit reversal.
fn halton2(mut index: u32) -> f32 {{ fn halton2(mut index: u32) -> f32 {{
index = (index << 16) | (index >> 16); index = (index << 16) | (index >> 16);
@ -131,8 +149,10 @@ fn halton2(mut index: u32) -> f32 {{
index = ((index & 0x55555555) << 1) | ((index & 0xaaaaaaaa) >> 1); index = ((index & 0x55555555) << 1) | ((index & 0xaaaaaaaa) >> 1);
return (index as f32) * (1.0 / ((1u64 << 32) as f32)); return (index as f32) * (1.0 / ((1u64 << 32) as f32));
}} }}
"#) "#
.as_bytes()) )
.as_bytes()
)
.unwrap(); .unwrap();
for i in 1..NUM_DIMENSIONS { for i in 1..NUM_DIMENSIONS {
@ -157,7 +177,9 @@ fn halton2(mut index: u32) -> f32 {{
} }
// Build the permutation table. // Build the permutation table.
let perm = (0..pow_base).map(|j| invert(&faure, base, j, digits)).collect::<Vec<_>>(); let perm = (0..pow_base)
.map(|j| invert(&faure, base, j, digits))
.collect::<Vec<_>>();
let perm_string = { let perm_string = {
let mut perm_string = String::new(); let mut perm_string = String::new();
for i in perm.iter() { for i in perm.iter() {
@ -168,22 +190,30 @@ fn halton2(mut index: u32) -> f32 {{
}; };
let mut power = max_power / pow_base; let mut power = max_power / pow_base;
f.write_all(format!(r#" f.write_all(
format!(
r#"
fn halton{}(index: u32) -> f32 {{ fn halton{}(index: u32) -> f32 {{
const PERM{}: [u16; {}] = [{}];"#, const PERM{}: [u16; {}] = [{}];"#,
base, base,
base, base,
perm.len(), perm.len(),
perm_string) perm_string
.as_bytes()) )
.as_bytes()
)
.unwrap();; .unwrap();;
f.write_all(format!(r#" f.write_all(
format!(
r#"
return (unsafe{{*PERM{}.get_unchecked((index % {}) as usize)}} as u32 * {} +"#, return (unsafe{{*PERM{}.get_unchecked((index % {}) as usize)}} as u32 * {} +"#,
base, base,
pow_base, pow_base,
power) power
.as_bytes()) )
.as_bytes()
)
.unwrap();; .unwrap();;
// Advance to next set of digits. // Advance to next set of digits.
@ -191,17 +221,23 @@ fn halton{}(index: u32) -> f32 {{
while power / pow_base > 1 { while power / pow_base > 1 {
div *= pow_base; div *= pow_base;
power /= pow_base; power /= pow_base;
f.write_all(format!(r#" f.write_all(
format!(
r#"
unsafe{{*PERM{}.get_unchecked(((index / {}) % {}) as usize)}} as u32 * {} +"#, unsafe{{*PERM{}.get_unchecked(((index / {}) % {}) as usize)}} as u32 * {} +"#,
base, base,
div, div,
pow_base, pow_base,
power) power
.as_bytes()) )
.as_bytes()
)
.unwrap();; .unwrap();;
} }
f.write_all(format!(r#" f.write_all(
format!(
r#"
unsafe{{*PERM{}.get_unchecked(((index / {}) % {}) as usize)}} as u32) as f32 * unsafe{{*PERM{}.get_unchecked(((index / {}) % {}) as usize)}} as u32) as f32 *
(0.999999940395355224609375f32 / ({}u32 as f32)); // Results in [0,1). (0.999999940395355224609375f32 / ({}u32 as f32)); // Results in [0,1).
}} }}
@ -209,8 +245,10 @@ fn halton{}(index: u32) -> f32 {{
base, base,
div * pow_base, div * pow_base,
pow_base, pow_base,
max_power) max_power
.as_bytes()) )
.as_bytes()
)
.unwrap();; .unwrap();;
} }
} }
@ -237,25 +275,29 @@ fn get_faure_permutation(faure: &Vec<Vec<usize>>, b: usize) -> Vec<usize> {
let c = (b - 1) / 2; let c = (b - 1) / 2;
return (0..b) return (0..b)
.map(|i| { .map(
|i| {
if i == c { if i == c {
return c; return c;
} }
let f: usize = faure[b - 1][i - ((i > c) as usize)]; let f: usize = faure[b - 1][i - ((i > c) as usize)];
f + ((f >= c) as usize) f + ((f >= c) as usize)
}) }
)
.collect(); .collect();
} else { } else {
// even // even
let c = b / 2; let c = b / 2;
return (0..b) return (0..b)
.map(|i| if i < c { .map(
|i| if i < c {
2 * faure[c][i] 2 * faure[c][i]
} else { } else {
2 * faure[c][i - c] + 1 2 * faure[c][i - c] + 1
}) }
)
.collect(); .collect();
} }
} }

View File

@ -33,23 +33,7 @@ impl Matrix4x4 {
/// i j k l /// i j k l
/// m n o p /// m n o p
#[inline] #[inline]
pub fn new_from_values(a: f32, pub fn new_from_values(a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: f32, i: f32, j: f32, k: f32, l: f32, m: f32, n: f32, o: f32, p: f32) -> Matrix4x4 {
b: f32,
c: f32,
d: f32,
e: f32,
f: f32,
g: f32,
h: f32,
i: f32,
j: f32,
k: f32,
l: f32,
m: f32,
n: f32,
o: f32,
p: f32)
-> Matrix4x4 {
Matrix4x4 { Matrix4x4 {
values: [Float4::new(a, b, c, d), values: [Float4::new(a, b, c, d),
Float4::new(e, f, g, h), Float4::new(e, f, g, h),
@ -106,22 +90,30 @@ impl Matrix4x4 {
pub fn transposed(&self) -> Matrix4x4 { pub fn transposed(&self) -> Matrix4x4 {
Matrix4x4 { Matrix4x4 {
values: { values: {
[Float4::new(self[0].get_0(), [Float4::new(
self[0].get_0(),
self[1].get_0(), self[1].get_0(),
self[2].get_0(), self[2].get_0(),
self[3].get_0()), self[3].get_0(),
Float4::new(self[0].get_1(), ),
Float4::new(
self[0].get_1(),
self[1].get_1(), self[1].get_1(),
self[2].get_1(), self[2].get_1(),
self[3].get_1()), self[3].get_1(),
Float4::new(self[0].get_2(), ),
Float4::new(
self[0].get_2(),
self[1].get_2(), self[1].get_2(),
self[2].get_2(), self[2].get_2(),
self[3].get_2()), self[3].get_2(),
Float4::new(self[0].get_3(), ),
Float4::new(
self[0].get_3(),
self[1].get_3(), self[1].get_3(),
self[2].get_3(), self[2].get_3(),
self[3].get_3())] self[3].get_3(),
)]
}, },
} }
} }
@ -150,41 +142,33 @@ impl Matrix4x4 {
Matrix4x4 { Matrix4x4 {
values: { values: {
[Float4::new(((self[1].get_1() * c5) - (self[1].get_2() * c4) + [Float4::new(
(self[1].get_3() * c3)) * invdet, ((self[1].get_1() * c5) - (self[1].get_2() * c4) + (self[1].get_3() * c3)) * invdet,
((-self[0].get_1() * c5) + (self[0].get_2() * c4) - ((-self[0].get_1() * c5) + (self[0].get_2() * c4) - (self[0].get_3() * c3)) * invdet,
(self[0].get_3() * c3)) * invdet, ((self[3].get_1() * s5) - (self[3].get_2() * s4) + (self[3].get_3() * s3)) * invdet,
((self[3].get_1() * s5) - (self[3].get_2() * s4) + ((-self[2].get_1() * s5) + (self[2].get_2() * s4) - (self[2].get_3() * s3)) * invdet,
(self[3].get_3() * s3)) * invdet, ),
((-self[2].get_1() * s5) + (self[2].get_2() * s4) -
(self[2].get_3() * s3)) * invdet),
Float4::new(((-self[1].get_0() * c5) + (self[1].get_2() * c2) - Float4::new(
(self[1].get_3() * c1)) * invdet, ((-self[1].get_0() * c5) + (self[1].get_2() * c2) - (self[1].get_3() * c1)) * invdet,
((self[0].get_0() * c5) - (self[0].get_2() * c2) + ((self[0].get_0() * c5) - (self[0].get_2() * c2) + (self[0].get_3() * c1)) * invdet,
(self[0].get_3() * c1)) * invdet, ((-self[3].get_0() * s5) + (self[3].get_2() * s2) - (self[3].get_3() * s1)) * invdet,
((-self[3].get_0() * s5) + (self[3].get_2() * s2) - ((self[2].get_0() * s5) - (self[2].get_2() * s2) + (self[2].get_3() * s1)) * invdet,
(self[3].get_3() * s1)) * invdet, ),
((self[2].get_0() * s5) - (self[2].get_2() * s2) +
(self[2].get_3() * s1)) * invdet),
Float4::new(((self[1].get_0() * c4) - (self[1].get_1() * c2) + Float4::new(
(self[1].get_3() * c0)) * invdet, ((self[1].get_0() * c4) - (self[1].get_1() * c2) + (self[1].get_3() * c0)) * invdet,
((-self[0].get_0() * c4) + (self[0].get_1() * c2) - ((-self[0].get_0() * c4) + (self[0].get_1() * c2) - (self[0].get_3() * c0)) * invdet,
(self[0].get_3() * c0)) * invdet, ((self[3].get_0() * s4) - (self[3].get_1() * s2) + (self[3].get_3() * s0)) * invdet,
((self[3].get_0() * s4) - (self[3].get_1() * s2) + ((-self[2].get_0() * s4) + (self[2].get_1() * s2) - (self[2].get_3() * s0)) * invdet,
(self[3].get_3() * s0)) * invdet, ),
((-self[2].get_0() * s4) + (self[2].get_1() * s2) -
(self[2].get_3() * s0)) * invdet),
Float4::new(((-self[1].get_0() * c3) + (self[1].get_1() * c1) - Float4::new(
(self[1].get_2() * c0)) * invdet, ((-self[1].get_0() * c3) + (self[1].get_1() * c1) - (self[1].get_2() * c0)) * invdet,
((self[0].get_0() * c3) - (self[0].get_1() * c1) + ((self[0].get_0() * c3) - (self[0].get_1() * c1) + (self[0].get_2() * c0)) * invdet,
(self[0].get_2() * c0)) * invdet, ((-self[3].get_0() * s3) + (self[3].get_1() * s1) - (self[3].get_2() * s0)) * invdet,
((-self[3].get_0() * s3) + (self[3].get_1() * s1) - ((self[2].get_0() * s3) - (self[2].get_1() * s1) + (self[2].get_2() * s0)) * invdet,
(self[3].get_2() * s0)) * invdet, )]
((self[2].get_0() * s3) - (self[2].get_1() * s1) +
(self[2].get_2() * s0)) * invdet)]
}, },
} }
} }
@ -233,25 +217,33 @@ impl Mul<Matrix4x4> for Matrix4x4 {
fn mul(self, other: Matrix4x4) -> Matrix4x4 { fn mul(self, other: Matrix4x4) -> Matrix4x4 {
let m = self.transposed(); let m = self.transposed();
Matrix4x4 { Matrix4x4 {
values: [Float4::new((m[0] * other[0]).h_sum(), values: [Float4::new(
(m[0] * other[0]).h_sum(),
(m[1] * other[0]).h_sum(), (m[1] * other[0]).h_sum(),
(m[2] * other[0]).h_sum(), (m[2] * other[0]).h_sum(),
(m[3] * other[0]).h_sum()), (m[3] * other[0]).h_sum(),
),
Float4::new((m[0] * other[1]).h_sum(), Float4::new(
(m[0] * other[1]).h_sum(),
(m[1] * other[1]).h_sum(), (m[1] * other[1]).h_sum(),
(m[2] * other[1]).h_sum(), (m[2] * other[1]).h_sum(),
(m[3] * other[1]).h_sum()), (m[3] * other[1]).h_sum(),
),
Float4::new((m[0] * other[2]).h_sum(), Float4::new(
(m[0] * other[2]).h_sum(),
(m[1] * other[2]).h_sum(), (m[1] * other[2]).h_sum(),
(m[2] * other[2]).h_sum(), (m[2] * other[2]).h_sum(),
(m[3] * other[2]).h_sum()), (m[3] * other[2]).h_sum(),
),
Float4::new((m[0] * other[3]).h_sum(), Float4::new(
(m[0] * other[3]).h_sum(),
(m[1] * other[3]).h_sum(), (m[1] * other[3]).h_sum(),
(m[2] * other[3]).h_sum(), (m[2] * other[3]).h_sum(),
(m[3] * other[3]).h_sum())], (m[3] * other[3]).h_sum(),
)],
} }
} }
} }
@ -268,11 +260,7 @@ mod tests {
fn equality_test() { fn equality_test() {
let a = Matrix4x4::new(); let a = Matrix4x4::new();
let b = Matrix4x4::new(); let b = Matrix4x4::new();
let c = Matrix4x4::new_from_values(1.1, let c = Matrix4x4::new_from_values(
0.0,
0.0,
0.0,
0.0,
1.1, 1.1,
0.0, 0.0,
0.0, 0.0,
@ -283,7 +271,13 @@ mod tests {
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
1.1); 1.1,
0.0,
0.0,
0.0,
0.0,
1.1,
);
assert_eq!(a, b); assert_eq!(a, b);
assert!(a != c); assert!(a != c);
@ -292,11 +286,7 @@ mod tests {
#[test] #[test]
fn aproximate_equality_test() { fn aproximate_equality_test() {
let a = Matrix4x4::new(); let a = Matrix4x4::new();
let b = Matrix4x4::new_from_values(1.001, let b = Matrix4x4::new_from_values(
0.0,
0.0,
0.0,
0.0,
1.001, 1.001,
0.0, 0.0,
0.0, 0.0,
@ -307,12 +297,14 @@ mod tests {
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
1.001); 1.001,
let c = Matrix4x4::new_from_values(1.003,
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
1.001,
);
let c = Matrix4x4::new_from_values(
1.003, 1.003,
0.0, 0.0,
0.0, 0.0,
@ -323,12 +315,14 @@ mod tests {
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
1.003); 1.003,
let d = Matrix4x4::new_from_values(-1.001,
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
1.003,
);
let d = Matrix4x4::new_from_values(
-1.001, -1.001,
0.0, 0.0,
0.0, 0.0,
@ -339,7 +333,13 @@ mod tests {
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
-1.001); -1.001,
0.0,
0.0,
0.0,
0.0,
-1.001,
);
assert!(a.aprx_eq(b, 0.002)); assert!(a.aprx_eq(b, 0.002));
assert!(!a.aprx_eq(c, 0.002)); assert!(!a.aprx_eq(c, 0.002));
@ -348,7 +348,8 @@ mod tests {
#[test] #[test]
fn multiply_test() { fn multiply_test() {
let a = Matrix4x4::new_from_values(1.0, let a = Matrix4x4::new_from_values(
1.0,
2.0, 2.0,
2.0, 2.0,
1.5, 1.5,
@ -363,8 +364,10 @@ mod tests {
13.0, 13.0,
7.0, 7.0,
15.0, 15.0,
3.0); 3.0,
let b = Matrix4x4::new_from_values(1.0, );
let b = Matrix4x4::new_from_values(
1.0,
5.0, 5.0,
9.0, 9.0,
13.0, 13.0,
@ -379,8 +382,10 @@ mod tests {
4.0, 4.0,
8.0, 8.0,
12.0, 12.0,
16.0); 16.0,
let c = Matrix4x4::new_from_values(266.0, );
let c = Matrix4x4::new_from_values(
266.0,
141.0, 141.0,
331.0, 331.0,
188.5, 188.5,
@ -395,14 +400,16 @@ mod tests {
344.0, 344.0,
192.0, 192.0,
436.0, 436.0,
262.0); 262.0,
);
assert_eq!(a * b, c); assert_eq!(a * b, c);
} }
#[test] #[test]
fn inverse_test() { fn inverse_test() {
let a = Matrix4x4::new_from_values(1.0, let a = Matrix4x4::new_from_values(
1.0,
0.33, 0.33,
0.0, 0.0,
-2.0, -2.0,
@ -417,7 +424,8 @@ mod tests {
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
-1.0); -1.0,
);
let b = a.inverse(); let b = a.inverse();
let c = Matrix4x4::new(); let c = Matrix4x4::new();
@ -426,7 +434,8 @@ mod tests {
#[test] #[test]
fn transpose_test() { fn transpose_test() {
let a = Matrix4x4::new_from_values(1.0, let a = Matrix4x4::new_from_values(
1.0,
2.0, 2.0,
3.0, 3.0,
4.0, 4.0,
@ -441,8 +450,10 @@ mod tests {
13.0, 13.0,
14.0, 14.0,
15.0, 15.0,
16.0); 16.0,
let b = Matrix4x4::new_from_values(1.0, );
let b = Matrix4x4::new_from_values(
1.0,
5.0, 5.0,
9.0, 9.0,
13.0, 13.0,
@ -457,7 +468,8 @@ mod tests {
4.0, 4.0,
8.0, 8.0,
12.0, 12.0,
16.0); 16.0,
);
let c = a.transposed(); let c = a.transposed();
assert_eq!(b, c); assert_eq!(b, c);

View File

@ -127,10 +127,12 @@ impl Mul<Matrix4x4> for Normal {
fn mul(self, other: Matrix4x4) -> Normal { fn mul(self, other: Matrix4x4) -> Normal {
let mat = other.inverse().transposed(); let mat = other.inverse().transposed();
Normal { Normal {
co: Float4::new((self.co * mat.values[0]).h_sum(), co: Float4::new(
(self.co * mat.values[0]).h_sum(),
(self.co * mat.values[1]).h_sum(), (self.co * mat.values[1]).h_sum(),
(self.co * mat.values[2]).h_sum(), (self.co * mat.values[2]).h_sum(),
0.0), 0.0,
),
} }
} }
} }
@ -168,13 +170,12 @@ impl CrossProduct for Normal {
#[inline] #[inline]
fn cross(self, other: Normal) -> Normal { fn cross(self, other: Normal) -> Normal {
Normal { Normal {
co: Float4::new((self.co.get_1() * other.co.get_2()) - co: Float4::new(
(self.co.get_2() * other.co.get_1()), (self.co.get_1() * other.co.get_2()) - (self.co.get_2() * other.co.get_1()),
(self.co.get_2() * other.co.get_0()) - (self.co.get_2() * other.co.get_0()) - (self.co.get_0() * other.co.get_2()),
(self.co.get_0() * other.co.get_2()), (self.co.get_0() * other.co.get_1()) - (self.co.get_1() * other.co.get_0()),
(self.co.get_0() * other.co.get_1()) - 0.0,
(self.co.get_1() * other.co.get_0()), ),
0.0),
} }
} }
} }
@ -215,7 +216,8 @@ mod tests {
#[test] #[test]
fn mul_matrix_1() { fn mul_matrix_1() {
let n = Normal::new(1.0, 2.5, 4.0); let n = Normal::new(1.0, 2.5, 4.0);
let m = Matrix4x4::new_from_values(1.0, let m = Matrix4x4::new_from_values(
1.0,
2.0, 2.0,
2.0, 2.0,
1.5, 1.5,
@ -230,7 +232,8 @@ mod tests {
13.0, 13.0,
7.0, 7.0,
15.0, 15.0,
3.0); 3.0,
);
let nm = Normal::new(-19.258825, 5.717648, -1.770588); let nm = Normal::new(-19.258825, 5.717648, -1.770588);
assert!(((n * m) - nm).length2() < 0.00001); assert!(((n * m) - nm).length2() < 0.00001);
} }

View File

@ -133,10 +133,12 @@ impl Mul<Matrix4x4> for Point {
#[inline] #[inline]
fn mul(self, other: Matrix4x4) -> Point { fn mul(self, other: Matrix4x4) -> Point {
Point { Point {
co: Float4::new((self.co * other.values[0]).h_sum(), co: Float4::new(
(self.co * other.values[0]).h_sum(),
(self.co * other.values[1]).h_sum(), (self.co * other.values[1]).h_sum(),
(self.co * other.values[2]).h_sum(), (self.co * other.values[2]).h_sum(),
(self.co * other.values[3]).h_sum()), (self.co * other.values[3]).h_sum(),
),
} }
} }
} }
@ -177,7 +179,8 @@ mod tests {
#[test] #[test]
fn mul_matrix_1() { fn mul_matrix_1() {
let p = Point::new(1.0, 2.5, 4.0); let p = Point::new(1.0, 2.5, 4.0);
let m = Matrix4x4::new_from_values(1.0, let m = Matrix4x4::new_from_values(
1.0,
2.0, 2.0,
2.0, 2.0,
1.5, 1.5,
@ -192,7 +195,8 @@ mod tests {
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
1.0); 1.0,
);
let pm = Point::new(15.5, 54.0, 70.0); let pm = Point::new(15.5, 54.0, 70.0);
assert_eq!(p * m, pm); assert_eq!(p * m, pm);
} }
@ -200,7 +204,8 @@ mod tests {
#[test] #[test]
fn mul_matrix_2() { fn mul_matrix_2() {
let p = Point::new(1.0, 2.5, 4.0); let p = Point::new(1.0, 2.5, 4.0);
let m = Matrix4x4::new_from_values(1.0, let m = Matrix4x4::new_from_values(
1.0,
2.0, 2.0,
2.0, 2.0,
1.5, 1.5,
@ -215,7 +220,8 @@ mod tests {
2.0, 2.0,
3.0, 3.0,
1.0, 1.0,
5.0); 5.0,
);
let mut pm = Point::new(15.5, 54.0, 70.0); let mut pm = Point::new(15.5, 54.0, 70.0);
pm.co.set_3(18.5); pm.co.set_3(18.5);
assert_eq!(p * m, pm); assert_eq!(p * m, pm);
@ -225,7 +231,8 @@ mod tests {
fn mul_matrix_3() { fn mul_matrix_3() {
// Make sure matrix multiplication composes the way one would expect // Make sure matrix multiplication composes the way one would expect
let p = Point::new(1.0, 2.5, 4.0); let p = Point::new(1.0, 2.5, 4.0);
let m1 = Matrix4x4::new_from_values(1.0, let m1 = Matrix4x4::new_from_values(
1.0,
2.0, 2.0,
2.0, 2.0,
1.5, 1.5,
@ -240,8 +247,10 @@ mod tests {
13.0, 13.0,
7.0, 7.0,
15.0, 15.0,
3.0); 3.0,
let m2 = Matrix4x4::new_from_values(4.0, );
let m2 = Matrix4x4::new_from_values(
4.0,
1.0, 1.0,
2.0, 2.0,
3.5, 3.5,
@ -256,7 +265,8 @@ mod tests {
5.0, 5.0,
7.0, 7.0,
8.0, 8.0,
11.0); 11.0,
);
println!("{:?}", m1 * m2); println!("{:?}", m1 * m2);
let pmm1 = p * (m1 * m2); let pmm1 = p * (m1 * m2);

View File

@ -127,10 +127,12 @@ impl Mul<Matrix4x4> for Vector {
#[inline] #[inline]
fn mul(self, other: Matrix4x4) -> Vector { fn mul(self, other: Matrix4x4) -> Vector {
Vector { Vector {
co: Float4::new((self.co * other.values[0]).h_sum(), co: Float4::new(
(self.co * other.values[0]).h_sum(),
(self.co * other.values[1]).h_sum(), (self.co * other.values[1]).h_sum(),
(self.co * other.values[2]).h_sum(), (self.co * other.values[2]).h_sum(),
(self.co * other.values[3]).h_sum()), (self.co * other.values[3]).h_sum(),
),
} }
} }
} }
@ -168,13 +170,12 @@ impl CrossProduct for Vector {
#[inline] #[inline]
fn cross(self, other: Vector) -> Vector { fn cross(self, other: Vector) -> Vector {
Vector { Vector {
co: Float4::new((self.co.get_1() * other.co.get_2()) - co: Float4::new(
(self.co.get_2() * other.co.get_1()), (self.co.get_1() * other.co.get_2()) - (self.co.get_2() * other.co.get_1()),
(self.co.get_2() * other.co.get_0()) - (self.co.get_2() * other.co.get_0()) - (self.co.get_0() * other.co.get_2()),
(self.co.get_0() * other.co.get_2()), (self.co.get_0() * other.co.get_1()) - (self.co.get_1() * other.co.get_0()),
(self.co.get_0() * other.co.get_1()) - 0.0,
(self.co.get_1() * other.co.get_0()), ),
0.0),
} }
} }
} }
@ -215,7 +216,8 @@ mod tests {
#[test] #[test]
fn mul_matrix_1() { fn mul_matrix_1() {
let v = Vector::new(1.0, 2.5, 4.0); let v = Vector::new(1.0, 2.5, 4.0);
let m = Matrix4x4::new_from_values(1.0, let m = Matrix4x4::new_from_values(
1.0,
2.0, 2.0,
2.0, 2.0,
1.5, 1.5,
@ -230,7 +232,8 @@ mod tests {
13.0, 13.0,
7.0, 7.0,
15.0, 15.0,
3.0); 3.0,
);
let mut vm = Vector::new(14.0, 46.0, 58.0); let mut vm = Vector::new(14.0, 46.0, 58.0);
vm.co.set_3(90.5); vm.co.set_3(90.5);
assert_eq!(v * m, vm); assert_eq!(v * m, vm);
@ -239,7 +242,8 @@ mod tests {
#[test] #[test]
fn mul_matrix_2() { fn mul_matrix_2() {
let v = Vector::new(1.0, 2.5, 4.0); let v = Vector::new(1.0, 2.5, 4.0);
let m = Matrix4x4::new_from_values(1.0, let m = Matrix4x4::new_from_values(
1.0,
2.0, 2.0,
2.0, 2.0,
1.5, 1.5,
@ -254,7 +258,8 @@ mod tests {
0.0, 0.0,
0.0, 0.0,
0.0, 0.0,
1.0); 1.0,
);
let vm = Vector::new(14.0, 46.0, 58.0); let vm = Vector::new(14.0, 46.0, 58.0);
assert_eq!(v * m, vm); assert_eq!(v * m, vm);
} }

View File

@ -137,9 +137,7 @@ impl MemArena {
/// the type's inherent alignment, whichever is greater. /// the type's inherent alignment, whichever is greater.
/// ///
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using! /// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
pub unsafe fn alloc_uninitialized_with_alignment<'a, T: Copy>(&'a self, pub unsafe fn alloc_uninitialized_with_alignment<'a, T: Copy>(&'a self, align: usize) -> &'a mut T {
align: usize)
-> &'a mut T {
assert!(size_of::<T>() > 0); assert!(size_of::<T>() > 0);
let memory = self.alloc_raw(size_of::<T>(), max(align, align_of::<T>())) as *mut T; let memory = self.alloc_raw(size_of::<T>(), max(align, align_of::<T>())) as *mut T;
@ -164,11 +162,7 @@ impl MemArena {
/// ///
/// Additionally, the allocation will be made with the given byte alignment or /// Additionally, the allocation will be made with the given byte alignment or
/// the type's inherent alignment, whichever is greater. /// the type's inherent alignment, whichever is greater.
pub fn alloc_array_with_alignment<'a, T: Copy>(&'a self, pub fn alloc_array_with_alignment<'a, T: Copy>(&'a self, len: usize, value: T, align: usize) -> &'a mut [T] {
len: usize,
value: T,
align: usize)
-> &'a mut [T] {
let memory = unsafe { self.alloc_array_uninitialized_with_alignment(len, align) }; let memory = unsafe { self.alloc_array_uninitialized_with_alignment(len, align) };
for v in memory.iter_mut() { for v in memory.iter_mut() {
@ -195,10 +189,7 @@ impl MemArena {
/// ///
/// Additionally, the allocation will be made with the given byte alignment or /// Additionally, the allocation will be made with the given byte alignment or
/// the type's inherent alignment, whichever is greater. /// the type's inherent alignment, whichever is greater.
pub fn copy_slice_with_alignment<'a, T: Copy>(&'a self, pub fn copy_slice_with_alignment<'a, T: Copy>(&'a self, other: &[T], align: usize) -> &'a mut [T] {
other: &[T],
align: usize)
-> &'a mut [T] {
let memory = unsafe { self.alloc_array_uninitialized_with_alignment(other.len(), align) }; let memory = unsafe { self.alloc_array_uninitialized_with_alignment(other.len(), align) };
for (v, other) in memory.iter_mut().zip(other.iter()) { for (v, other) in memory.iter_mut().zip(other.iter()) {
@ -231,10 +222,7 @@ impl MemArena {
/// the type's inherent alignment, whichever is greater. /// the type's inherent alignment, whichever is greater.
/// ///
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using! /// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
pub unsafe fn alloc_array_uninitialized_with_alignment<'a, T: Copy>(&'a self, pub unsafe fn alloc_array_uninitialized_with_alignment<'a, T: Copy>(&'a self, len: usize, align: usize) -> &'a mut [T] {
len: usize,
align: usize)
-> &'a mut [T] {
assert!(size_of::<T>() > 0); assert!(size_of::<T>() > 0);
let array_mem_size = { let array_mem_size = {
@ -257,7 +245,8 @@ impl MemArena {
unsafe fn alloc_raw(&self, size: usize, alignment: usize) -> *mut u8 { unsafe fn alloc_raw(&self, size: usize, alignment: usize) -> *mut u8 {
assert!(alignment > 0); assert!(alignment > 0);
self.stat_space_allocated.set(self.stat_space_allocated.get() + size); // Update stats self.stat_space_allocated
.set(self.stat_space_allocated.get() + size); // Update stats
let mut blocks = self.blocks.borrow_mut(); let mut blocks = self.blocks.borrow_mut();
@ -295,10 +284,8 @@ impl MemArena {
}; };
let waste_percentage = { let waste_percentage = {
let w1 = ((blocks[0].capacity() - blocks[0].len()) * 100) / let w1 = ((blocks[0].capacity() - blocks[0].len()) * 100) / blocks[0].capacity();
blocks[0].capacity(); let w2 = ((self.stat_space_occupied.get() - self.stat_space_allocated.get()) * 100) / self.stat_space_occupied.get();
let w2 = ((self.stat_space_occupied.get() - self.stat_space_allocated.get()) *
100) / self.stat_space_occupied.get();
if w1 < w2 { w1 } else { w2 } if w1 < w2 { w1 } else { w2 }
}; };
@ -311,8 +298,7 @@ impl MemArena {
blocks.push(Vec::with_capacity(size + alignment - 1)); blocks.push(Vec::with_capacity(size + alignment - 1));
blocks.last_mut().unwrap().set_len(size + alignment - 1); blocks.last_mut().unwrap().set_len(size + alignment - 1);
let start_index = alignment_offset(blocks.last().unwrap().as_ptr() as usize, let start_index = alignment_offset(blocks.last().unwrap().as_ptr() as usize, alignment);
alignment);
let block_ptr = blocks.last_mut().unwrap().as_mut_ptr(); let block_ptr = blocks.last_mut().unwrap().as_mut_ptr();
return block_ptr.offset(start_index as isize); return block_ptr.offset(start_index as isize);
@ -320,14 +306,14 @@ impl MemArena {
// Otherwise create a new shared block. // Otherwise create a new shared block.
else { else {
// Update stats // Update stats
self.stat_space_occupied.set(self.stat_space_occupied.get() + next_size); self.stat_space_occupied
.set(self.stat_space_occupied.get() + next_size);
blocks.push(Vec::with_capacity(next_size)); blocks.push(Vec::with_capacity(next_size));
let block_count = blocks.len(); let block_count = blocks.len();
blocks.swap(0, block_count - 1); blocks.swap(0, block_count - 1);
let start_index = alignment_offset(blocks.first().unwrap().as_ptr() as usize, let start_index = alignment_offset(blocks.first().unwrap().as_ptr() as usize, alignment);
alignment);
blocks.first_mut().unwrap().set_len(start_index + size); blocks.first_mut().unwrap().set_len(start_index + size);

File diff suppressed because it is too large Load Diff