diff --git a/rustfmt.toml b/rustfmt.toml deleted file mode 100644 index bd046c7..0000000 --- a/rustfmt.toml +++ /dev/null @@ -1,6 +0,0 @@ -max_width = 1024 -error_on_line_overflow = false -array_layout = "Block" -chain_indent = "Block" -fn_args_layout = "Block" -fn_call_style = "Block" \ No newline at end of file diff --git a/src/accel/bvh.rs b/src/accel/bvh.rs index a1533f7..c78b477 100644 --- a/src/accel/bvh.rs +++ b/src/accel/bvh.rs @@ -38,8 +38,14 @@ pub enum BVHNode<'a> { } impl<'a> BVH<'a> { - pub fn from_objects<'b, T, F>(arena: &'a MemArena, objects: &mut [T], objects_per_leaf: usize, bounder: F) -> BVH<'a> - where F: 'b + Fn(&T) -> &'b [BBox] + pub fn from_objects<'b, T, F>( + arena: &'a MemArena, + objects: &mut [T], + objects_per_leaf: usize, + bounder: F, + ) -> BVH<'a> + where + F: 'b + Fn(&T) -> &'b [BBox], { if objects.len() == 0 { BVH { @@ -50,7 +56,11 @@ impl<'a> BVH<'a> { let base = BVHBase::from_objects(objects, objects_per_leaf, bounder); BVH { - root: Some(BVH::construct_from_base(arena, &base, base.root_node_index())), + root: Some(BVH::construct_from_base( + arena, + &base, + base.root_node_index(), + )), depth: base.depth, } } @@ -61,7 +71,8 @@ impl<'a> BVH<'a> { } pub fn traverse(&self, rays: &mut [AccelRay], objects: &[T], mut obj_ray_test: F) - where F: FnMut(&T, &mut [AccelRay]) + where + F: FnMut(&T, &mut [AccelRay]), { if self.root.is_none() { return; @@ -89,11 +100,11 @@ impl<'a> BVH<'a> { bounds_len, split_axis, } => { - let bounds = unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) }; - let part = partition( - &mut rays[..ray_i_stack[stack_ptr]], - |r| (!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r), - ); + let bounds = + unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) }; + let part = partition(&mut rays[..ray_i_stack[stack_ptr]], |r| { + (!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r) + }); if part > 0 { ray_i_stack[stack_ptr] = part; ray_i_stack[stack_ptr + 1] = part; @@ -115,11 +126,11 @@ impl<'a> BVH<'a> { bounds_start, bounds_len, } => { - let bounds = unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) }; - let part = partition( - &mut rays[..ray_i_stack[stack_ptr]], - |r| (!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r), - ); + let bounds = + unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) }; + let part = partition(&mut rays[..ray_i_stack[stack_ptr]], |r| { + (!r.is_done()) && lerp_slice(bounds, r.time).intersect_accel_ray(r) + }); trav_time += timer.tick() as f64; @@ -137,15 +148,17 @@ impl<'a> BVH<'a> { } trav_time += timer.tick() as f64; - ACCEL_TRAV_TIME.with( - |att| { - let v = att.get(); - att.set(v + trav_time); - } - ); + ACCEL_TRAV_TIME.with(|att| { + let v = att.get(); + att.set(v + trav_time); + }); } - fn construct_from_base(arena: &'a MemArena, base: &BVHBase, node_index: usize) -> &'a mut BVHNode<'a> { + fn construct_from_base( + arena: &'a MemArena, + base: &BVHBase, + node_index: usize, + ) -> &'a mut BVHNode<'a> { match &base.nodes[node_index] { &BVHBaseNode::Internal { bounds_range, @@ -154,7 +167,10 @@ impl<'a> BVH<'a> { } => { let mut node = unsafe { arena.alloc_uninitialized_with_alignment::(32) }; - let bounds = arena.copy_slice_with_alignment(&base.bounds[bounds_range.0..bounds_range.1], 32); + let bounds = arena.copy_slice_with_alignment( + &base.bounds[bounds_range.0..bounds_range.1], + 32, + ); let child1 = BVH::construct_from_base(arena, base, children_indices.0); let child2 = BVH::construct_from_base(arena, base, children_indices.1); diff --git a/src/accel/bvh_base.rs b/src/accel/bvh_base.rs index e1cbc5c..b359e25 100644 --- a/src/accel/bvh_base.rs +++ b/src/accel/bvh_base.rs @@ -58,7 +58,8 @@ impl BVHBase { } pub fn from_objects<'b, T, F>(objects: &mut [T], objects_per_leaf: usize, bounder: F) -> BVHBase - where F: 'b + Fn(&T) -> &'b [BBox] + where + F: 'b + Fn(&T) -> &'b [BBox], { let mut bvh = BVHBase::new(); bvh.recursive_build(0, 0, objects_per_leaf, objects, &bounder); @@ -70,7 +71,8 @@ impl BVHBase { } fn acc_bounds<'a, T, F>(&mut self, objects: &mut [T], bounder: &F) - where F: 'a + Fn(&T) -> &'a [BBox] + where + F: 'a + Fn(&T) -> &'a [BBox], { // TODO: do all of this without the temporary cache let max_len = objects.iter().map(|obj| bounder(obj).len()).max().unwrap(); @@ -94,8 +96,16 @@ impl BVHBase { } } - fn recursive_build<'a, T, F>(&mut self, offset: usize, depth: usize, objects_per_leaf: usize, objects: &mut [T], bounder: &F) -> (usize, (usize, usize)) - where F: 'a + Fn(&T) -> &'a [BBox] + fn recursive_build<'a, T, F>( + &mut self, + offset: usize, + depth: usize, + objects_per_leaf: usize, + objects: &mut [T], + bounder: &F, + ) -> (usize, (usize, usize)) + where + F: 'a + Fn(&T) -> &'a [BBox], { let me = self.nodes.len(); @@ -109,12 +119,13 @@ impl BVHBase { // We make sure that it's worth having multiple time samples, and if not // we reduce to the union of the time samples. self.acc_bounds(objects, bounder); - let union_bounds = self.bounds_cache - .iter() - .fold(BBox::new(), |b1, b2| (b1 | *b2)); - let average_area = self.bounds_cache - .iter() - .fold(0.0, |area, bb| area + bb.surface_area()) / self.bounds_cache.len() as f32; + let union_bounds = self.bounds_cache.iter().fold( + BBox::new(), + |b1, b2| (b1 | *b2), + ); + let average_area = self.bounds_cache.iter().fold(0.0, |area, bb| { + area + bb.surface_area() + }) / self.bounds_cache.len() as f32; if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) { self.bounds.push(union_bounds); } else { @@ -123,13 +134,10 @@ impl BVHBase { } // Create node - self.nodes - .push( - BVHBaseNode::Leaf { - bounds_range: (bi, self.bounds.len()), - object_range: (offset, offset + objects.len()), - } - ); + self.nodes.push(BVHBaseNode::Leaf { + bounds_range: (bi, self.bounds.len()), + object_range: (offset, offset + objects.len()), + }); if self.depth < depth { self.depth = depth; @@ -138,26 +146,24 @@ impl BVHBase { return (me, (bi, self.bounds.len())); } else { // Not a leaf node - self.nodes - .push( - BVHBaseNode::Internal { - bounds_range: (0, 0), - children_indices: (0, 0), - split_axis: 0, - } - ); + self.nodes.push(BVHBaseNode::Internal { + bounds_range: (0, 0), + children_indices: (0, 0), + split_axis: 0, + }); // Partition objects. // If we're too near the max depth, we do balanced building to // avoid exceeding max depth. // Otherwise we do SAH splitting to build better trees. - let (split_index, split_axis) = if (log2_64(objects.len() as u64) as usize) < (BVH_MAX_DEPTH - depth) { - // SAH splitting, when we have room to play - sah_split(objects, &bounder) - } else { - // Balanced splitting, when we don't have room to play - median_split(objects, &bounder) - }; + let (split_index, split_axis) = + if (log2_64(objects.len() as u64) as usize) < (BVH_MAX_DEPTH - depth) { + // SAH splitting, when we have room to play + sah_split(objects, &bounder) + } else { + // Balanced splitting, when we don't have room to play + median_split(objects, &bounder) + }; // Create child nodes let (c1_index, c1_bounds) = self.recursive_build( @@ -189,7 +195,8 @@ impl BVHBase { // We make sure that it's worth having multiple time samples, and if not // we reduce to the union of the time samples. let union_bounds = merged.iter().fold(BBox::new(), |b1, b2| (b1 | *b2)); - let average_area = merged.iter().fold(0.0, |area, bb| area + bb.surface_area()) / merged.len() as f32; + let average_area = merged.iter().fold(0.0, |area, bb| area + bb.surface_area()) / + merged.len() as f32; if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) { self.bounds.push(union_bounds); } else { diff --git a/src/accel/light_array.rs b/src/accel/light_array.rs index f78fd69..f071810 100644 --- a/src/accel/light_array.rs +++ b/src/accel/light_array.rs @@ -13,7 +13,8 @@ pub struct LightArray { impl LightArray { #[allow(dead_code)] pub fn new<'a, T, F>(things: &mut [T], q: F) -> LightArray - where F: 'a + Fn(&T) -> Option<(&'a [BBox], f32)> + where + F: 'a + Fn(&T) -> Option<(&'a [BBox], f32)>, { let mut indices = Vec::new(); let mut aprx_energy = 0.0; @@ -34,7 +35,15 @@ impl LightArray { } impl LightAccel for LightArray { - fn select(&self, inc: Vector, pos: Point, nor: Normal, sc: &SurfaceClosure, time: f32, n: f32) -> Option<(usize, f32, f32)> { + fn select( + &self, + inc: Vector, + pos: Point, + nor: Normal, + sc: &SurfaceClosure, + time: f32, + n: f32, + ) -> Option<(usize, f32, f32)> { let _ = (inc, pos, nor, sc, time); // Not using these, silence warnings assert!(n >= 0.0 && n <= 1.0); diff --git a/src/accel/light_tree.rs b/src/accel/light_tree.rs index ff2017d..c9de022 100644 --- a/src/accel/light_tree.rs +++ b/src/accel/light_tree.rs @@ -26,8 +26,13 @@ struct Node { } impl<'a> LightTree<'a> { - pub fn from_objects<'b, T, F>(arena: &'a MemArena, objects: &mut [T], info_getter: F) -> LightTree<'a> - where F: 'b + Fn(&T) -> (&'b [BBox], f32) + pub fn from_objects<'b, T, F>( + arena: &'a MemArena, + objects: &mut [T], + info_getter: F, + ) -> LightTree<'a> + where + F: 'b + Fn(&T) -> (&'b [BBox], f32), { let mut builder = LightTreeBuilder::new(); builder.recursive_build(0, 0, objects, &info_getter); @@ -42,7 +47,15 @@ impl<'a> LightTree<'a> { impl<'a> LightAccel for LightTree<'a> { - fn select(&self, inc: Vector, pos: Point, nor: Normal, sc: &SurfaceClosure, time: f32, n: f32) -> Option<(usize, f32, f32)> { + fn select( + &self, + inc: Vector, + pos: Point, + nor: Normal, + sc: &SurfaceClosure, + time: f32, + n: f32, + ) -> Option<(usize, f32, f32)> { if self.nodes.len() == 0 { return None; } @@ -141,8 +154,15 @@ impl LightTreeBuilder { } } - fn recursive_build<'a, T, F>(&mut self, offset: usize, depth: usize, objects: &mut [T], info_getter: &F) -> (usize, (usize, usize)) - where F: 'a + Fn(&T) -> (&'a [BBox], f32) + fn recursive_build<'a, T, F>( + &mut self, + offset: usize, + depth: usize, + objects: &mut [T], + info_getter: &F, + ) -> (usize, (usize, usize)) + where + F: 'a + Fn(&T) -> (&'a [BBox], f32), { let me_index = self.nodes.len(); @@ -153,15 +173,12 @@ impl LightTreeBuilder { let bi = self.bounds.len(); let (obj_bounds, energy) = info_getter(&objects[0]); self.bounds.extend(obj_bounds); - self.nodes - .push( - Node { - is_leaf: true, - bounds_range: (bi, self.bounds.len()), - energy: energy, - child_index: offset, - } - ); + self.nodes.push(Node { + is_leaf: true, + bounds_range: (bi, self.bounds.len()), + energy: energy, + child_index: offset, + }); if self.depth < depth { self.depth = depth; @@ -170,21 +187,19 @@ impl LightTreeBuilder { return (me_index, (bi, self.bounds.len())); } else { // Not a leaf node - self.nodes - .push( - Node { - is_leaf: false, - bounds_range: (0, 0), - energy: 0.0, - child_index: 0, - } - ); + self.nodes.push(Node { + is_leaf: false, + bounds_range: (0, 0), + energy: 0.0, + child_index: 0, + }); // Partition objects. let (split_index, _) = sah_split(objects, &|obj_ref| info_getter(obj_ref).0); // Create child nodes - let (_, c1_bounds) = self.recursive_build(offset, depth + 1, &mut objects[..split_index], info_getter); + let (_, c1_bounds) = + self.recursive_build(offset, depth + 1, &mut objects[..split_index], info_getter); let (c2_index, c2_bounds) = self.recursive_build( offset + split_index, depth + 1, diff --git a/src/accel/mod.rs b/src/accel/mod.rs index 8efafe8..2a05378 100644 --- a/src/accel/mod.rs +++ b/src/accel/mod.rs @@ -19,7 +19,15 @@ thread_local! { pub trait LightAccel { /// Returns (index_of_light, selection_pdf, whittled_n) - fn select(&self, inc: Vector, pos: Point, nor: Normal, sc: &SurfaceClosure, time: f32, n: f32) -> Option<(usize, f32, f32)>; + fn select( + &self, + inc: Vector, + pos: Point, + nor: Normal, + sc: &SurfaceClosure, + time: f32, + n: f32, + ) -> Option<(usize, f32, f32)>; fn approximate_energy(&self) -> f32; } diff --git a/src/accel/objects_split.rs b/src/accel/objects_split.rs index 6a1eb88..62ac8c6 100644 --- a/src/accel/objects_split.rs +++ b/src/accel/objects_split.rs @@ -22,7 +22,8 @@ const SPLIT_PLANE_COUNT: usize = 5; /// Returns the index of the partition boundary and the axis that it split on /// (0 = x, 1 = y, 2 = z). pub fn free_sah_split<'a, T, F>(seed: u32, objects: &mut [T], bounder: &F) -> (usize, usize) - where F: Fn(&T) -> &'a [BBox] +where + F: Fn(&T) -> &'a [BBox], { // Generate the planes for splitting let planes = { @@ -65,7 +66,8 @@ pub fn free_sah_split<'a, T, F>(seed: u32, objects: &mut [T], bounder: &F) -> (u // Build SAH bins let sah_bins = { - let mut sah_bins = [[(BBox::new(), BBox::new(), 0, 0); SAH_BIN_COUNT - 1]; SPLIT_PLANE_COUNT]; + let mut sah_bins = [[(BBox::new(), BBox::new(), 0, 0); SAH_BIN_COUNT - 1]; + SPLIT_PLANE_COUNT]; for obj in objects.iter() { let tb = lerp_slice(bounder(obj), 0.5); let centroid = tb.center().into_vector(); @@ -131,13 +133,11 @@ pub fn free_sah_split<'a, T, F>(seed: u32, objects: &mut [T], bounder: &F) -> (u }; // Partition - let mut split_i = partition( - &mut objects[..], |obj| { - let centroid = lerp_slice(bounder(obj), 0.5).center().into_vector(); - let dist = dot(centroid, plane); - dist < div - } - ); + let mut split_i = partition(&mut objects[..], |obj| { + let centroid = lerp_slice(bounder(obj), 0.5).center().into_vector(); + let dist = dot(centroid, plane); + dist < div + }); if split_i < 1 { split_i = 1; @@ -155,7 +155,8 @@ pub fn free_sah_split<'a, T, F>(seed: u32, objects: &mut [T], bounder: &F) -> (u /// Returns the index of the partition boundary and the axis that it split on /// (0 = x, 1 = y, 2 = z). pub fn sah_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, usize) - where F: Fn(&T) -> &'a [BBox] +where + F: Fn(&T) -> &'a [BBox], { // Get combined object centroid extents let bounds = { @@ -224,13 +225,11 @@ pub fn sah_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, usize) }; // Partition - let mut split_i = partition( - &mut objects[..], |obj| { - let tb = lerp_slice(bounder(obj), 0.5); - let centroid = (tb.min.get_n(split_axis) + tb.max.get_n(split_axis)) * 0.5; - centroid < div - } - ); + let mut split_i = partition(&mut objects[..], |obj| { + let tb = lerp_slice(bounder(obj), 0.5); + let centroid = (tb.min.get_n(split_axis) + tb.max.get_n(split_axis)) * 0.5; + centroid < div + }); if split_i < 1 { split_i = 1; } else if split_i >= objects.len() { @@ -245,7 +244,8 @@ pub fn sah_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, usize) /// Returns the index of the partition boundary and the axis that it split on /// (0 = x, 1 = y, 2 = z). pub fn bounds_mean_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, usize) - where F: Fn(&T) -> &'a [BBox] +where + F: Fn(&T) -> &'a [BBox], { // Get combined object bounds let bounds = { @@ -272,13 +272,11 @@ pub fn bounds_mean_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, us let div = (bounds.min.get_n(split_axis) + bounds.max.get_n(split_axis)) * 0.5; // Partition - let mut split_i = partition( - &mut objects[..], |obj| { - let tb = lerp_slice(bounder(obj), 0.5); - let centroid = (tb.min.get_n(split_axis) + tb.max.get_n(split_axis)) * 0.5; - centroid < div - } - ); + let mut split_i = partition(&mut objects[..], |obj| { + let tb = lerp_slice(bounder(obj), 0.5); + let centroid = (tb.min.get_n(split_axis) + tb.max.get_n(split_axis)) * 0.5; + centroid < div + }); if split_i < 1 { split_i = 1; } else if split_i >= objects.len() { @@ -294,7 +292,8 @@ pub fn bounds_mean_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, us /// Returns the index of the partition boundary and the axis that it split on /// (0 = x, 1 = y, 2 = z). pub fn median_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, usize) - where F: Fn(&T) -> &'a [BBox] +where + F: Fn(&T) -> &'a [BBox], { // Get combined object bounds let bounds = { @@ -322,22 +321,20 @@ pub fn median_split<'a, T, F>(objects: &mut [T], bounder: &F) -> (usize, usize) let place = objects.len() / 2; if place > 0 { place } else { 1 } }; - quick_select( - objects, place, |a, b| { - let tb_a = lerp_slice(bounder(a), 0.5); - let tb_b = lerp_slice(bounder(b), 0.5); - let centroid_a = (tb_a.min.get_n(split_axis) + tb_a.max.get_n(split_axis)) * 0.5; - let centroid_b = (tb_b.min.get_n(split_axis) + tb_b.max.get_n(split_axis)) * 0.5; + quick_select(objects, place, |a, b| { + let tb_a = lerp_slice(bounder(a), 0.5); + let tb_b = lerp_slice(bounder(b), 0.5); + let centroid_a = (tb_a.min.get_n(split_axis) + tb_a.max.get_n(split_axis)) * 0.5; + let centroid_b = (tb_b.min.get_n(split_axis) + tb_b.max.get_n(split_axis)) * 0.5; - if centroid_a < centroid_b { - Ordering::Less - } else if centroid_a == centroid_b { - Ordering::Equal - } else { - Ordering::Greater - } + if centroid_a < centroid_b { + Ordering::Less + } else if centroid_a == centroid_b { + Ordering::Equal + } else { + Ordering::Greater } - ); + }); (place, split_axis) } diff --git a/src/algorithm.rs b/src/algorithm.rs index 81fe7bb..fa6708d 100644 --- a/src/algorithm.rs +++ b/src/algorithm.rs @@ -13,7 +13,8 @@ use lerp::{Lerp, lerp_slice}; /// item and the probability that it would have been selected with a /// random n. pub fn weighted_choice(slc: &[T], n: f32, weight: F) -> (usize, f32) - where F: Fn(&T) -> f32 +where + F: Fn(&T) -> f32, { assert!(slc.len() > 0); @@ -40,7 +41,8 @@ pub fn weighted_choice(slc: &[T], n: f32, weight: F) -> (usize, f32) /// The predicate is executed precisely once on every element in /// the slice, and is allowed to modify the elements. pub fn partition(slc: &mut [T], mut pred: F) -> usize - where F: FnMut(&mut T) -> bool +where + F: FnMut(&mut T) -> bool, { // This version uses raw pointers and pointer arithmetic to squeeze more // performance out of the code. @@ -89,7 +91,8 @@ pub fn partition(slc: &mut [T], mut pred: F) -> usize /// of the array we're currently on: left or right. False means left, /// True means right. pub fn partition_with_side(slc: &mut [T], mut pred: F) -> usize - where F: FnMut(&mut T, bool) -> bool +where + F: FnMut(&mut T, bool) -> bool, { // This version uses raw pointers and pointer arithmetic to squeeze more // performance out of the code. @@ -141,7 +144,8 @@ pub fn partition_with_side(slc: &mut [T], mut pred: F) -> usize /// The predicate is executed precisely once on every element in /// the slices, and is allowed to modify the elements. pub fn partition_pair(slc1: &mut [A], slc2: &mut [B], mut pred: F) -> usize - where F: FnMut(usize, &mut A, &mut B) -> bool +where + F: FnMut(usize, &mut A, &mut B) -> bool, { assert!(slc1.len() == slc2.len()); @@ -163,7 +167,8 @@ pub fn partition_pair(slc1: &mut [A], slc2: &mut [B], mut pred: F) -> u ((a1 as usize) - start) / std::mem::size_of::(), &mut *a1, &mut *a2, - ) { + ) + { break; } a1 = a1.offset(1); @@ -180,7 +185,8 @@ pub fn partition_pair(slc1: &mut [A], slc2: &mut [B], mut pred: F) -> u ((b1 as usize) - start) / std::mem::size_of::(), &mut *b1, &mut *b2, - ) { + ) + { break; } } @@ -197,7 +203,8 @@ pub fn partition_pair(slc1: &mut [A], slc2: &mut [B], mut pred: F) -> u /// Partitions the slice of items to place the nth-ordered item in the nth place, /// and the items less than it before and the items more than it after. pub fn quick_select(slc: &mut [T], n: usize, mut order: F) - where F: FnMut(&T, &T) -> Ordering +where + F: FnMut(&T, &T) -> Ordering, { let mut left = 0; let mut right = slc.len(); @@ -208,10 +215,10 @@ pub fn quick_select(slc: &mut [T], n: usize, mut order: F) slc.swap(i, right - 1); let ii = left + - { - let (val, list) = (&mut slc[left..right]).split_last_mut().unwrap(); - partition(list, |n| order(n, val) == Ordering::Less) - }; + { + let (val, list) = (&mut slc[left..right]).split_last_mut().unwrap(); + partition(list, |n| order(n, val) == Ordering::Less) + }; slc.swap(ii, right - 1); if ii == n { @@ -227,8 +234,13 @@ pub fn quick_select(slc: &mut [T], n: usize, mut order: F) } /// Merges two slices of things, appending the result to vec_out -pub fn merge_slices_append(slice1: &[T], slice2: &[T], vec_out: &mut Vec, merge: F) - where F: Fn(&T, &T) -> T +pub fn merge_slices_append( + slice1: &[T], + slice2: &[T], + vec_out: &mut Vec, + merge: F, +) where + F: Fn(&T, &T) -> T, { // Transform the bounding boxes if slice1.len() == 0 || slice2.len() == 0 { @@ -255,7 +267,8 @@ pub fn merge_slices_append(slice1: &[T], slice2: &[T], vec_ou /// Merges two slices of things, storing the result in slice_out. /// Panics if slice_out is not the right size. pub fn merge_slices_to(slice1: &[T], slice2: &[T], slice_out: &mut [T], merge: F) - where F: Fn(&T, &T) -> T +where + F: Fn(&T, &T) -> T, { assert!(slice_out.len() == cmp::max(slice1.len(), slice2.len())); @@ -267,7 +280,8 @@ pub fn merge_slices_to(slice1: &[T], slice2: &[T], slice_out: Iterator::zip( slice_out.iter_mut(), Iterator::zip(slice1.iter(), slice2.iter()), - ) { + ) + { *xfo = merge(xf1, xf2); } } else if slice1.len() > slice2.len() { @@ -291,15 +305,13 @@ mod tests { use super::*; fn quick_select_ints(list: &mut [i32], i: usize) { - quick_select( - list, i, |a, b| if a < b { - Ordering::Less - } else if a == b { - Ordering::Equal - } else { - Ordering::Greater - } - ); + quick_select(list, i, |a, b| if a < b { + Ordering::Less + } else if a == b { + Ordering::Equal + } else { + Ordering::Greater + }); } #[test] diff --git a/src/camera.rs b/src/camera.rs index eb9c587..018f577 100644 --- a/src/camera.rs +++ b/src/camera.rs @@ -18,7 +18,13 @@ pub struct Camera<'a> { } impl<'a> Camera<'a> { - pub fn new(arena: &'a MemArena, transforms: Vec, fovs: Vec, mut aperture_radii: Vec, mut focus_distances: Vec) -> Camera<'a> { + pub fn new( + arena: &'a MemArena, + transforms: Vec, + fovs: Vec, + mut aperture_radii: Vec, + mut focus_distances: Vec, + ) -> Camera<'a> { assert!(transforms.len() != 0, "Camera has no transform(s)!"); assert!(fovs.len() != 0, "Camera has no fov(s)!"); @@ -81,8 +87,7 @@ impl<'a> Camera<'a> { (x * tfov) - (orig.x() / focus_distance), (y * tfov) - (orig.y() / focus_distance), 1.0, - ) - .normalized(); + ).normalized(); Ray::new(orig * transform, dir * transform, time, false) } diff --git a/src/color.rs b/src/color.rs index 6d2ff37..f4f8396 100644 --- a/src/color.rs +++ b/src/color.rs @@ -278,7 +278,8 @@ pub fn x_1931(wavelength: f32) -> f32 { let t1 = (wavelength - 442.0) * (if wavelength < 442.0 { 0.0624 } else { 0.0374 }); let t2 = (wavelength - 599.8) * (if wavelength < 599.8 { 0.0264 } else { 0.0323 }); let t3 = (wavelength - 501.1) * (if wavelength < 501.1 { 0.0490 } else { 0.0382 }); - (0.362 * faster_exp(-0.5 * t1 * t1)) + (1.056 * faster_exp(-0.5 * t2 * t2)) - (0.065 * faster_exp(-0.5 * t3 * t3)) + (0.362 * faster_exp(-0.5 * t1 * t1)) + (1.056 * faster_exp(-0.5 * t2 * t2)) - + (0.065 * faster_exp(-0.5 * t3 * t3)) } pub fn y_1931(wavelength: f32) -> f32 { diff --git a/src/image.rs b/src/image.rs index 3a9844c..4b7a02c 100644 --- a/src/image.rs +++ b/src/image.rs @@ -77,7 +77,10 @@ impl Image { } // Clip bucket to image - let max = (cmp::min(max.0, self.res.0 as u32), cmp::min(max.1, self.res.1 as u32)); + let max = ( + cmp::min(max.0, self.res.0 as u32), + cmp::min(max.1, self.res.1 as u32), + ); // Push bucket onto list bucket_list.push((min, max)); @@ -138,7 +141,8 @@ impl Image { let res_y = self.res.1; for y in 0..res_y { for x in 0..res_x { - let (r, g, b) = quantize_tri_255(xyz_to_srgbe(self.get(x, res_y - 1 - y).to_tuple())); + let (r, g, b) = + quantize_tri_255(xyz_to_srgbe(self.get(x, res_y - 1 - y).to_tuple())); image.push(r); image.push(g); image.push(b); @@ -178,8 +182,7 @@ impl Image { .add_channel("G", openexr::PixelType::HALF) .add_channel("B", openexr::PixelType::HALF) .set_compression(openexr::Compression::PIZ_COMPRESSION), - ) - .unwrap(); + ).unwrap(); let mut fb = { // Create the frame buffer @@ -230,11 +233,14 @@ impl<'a> Bucket<'a> { /// encoding to base64. The fourth channel is alpha, and is set to 1.0 for /// all pixels. pub fn rgba_base64(&mut self, color_convert: F) -> String - where F: Fn((f32, f32, f32)) -> (f32, f32, f32) + where + F: Fn((f32, f32, f32)) -> (f32, f32, f32), { use base64; use std::slice; - let mut data = Vec::with_capacity((4 * (self.max.0 - self.min.0) * (self.max.1 - self.min.1)) as usize); + let mut data = Vec::with_capacity( + (4 * (self.max.0 - self.min.0) * (self.max.1 - self.min.1)) as usize, + ); for y in self.min.1..self.max.1 { for x in self.min.0..self.max.0 { let color = color_convert(self.get(x, y).to_tuple()); @@ -244,7 +250,8 @@ impl<'a> Bucket<'a> { data.push(1.0); } } - let data_u8 = unsafe { slice::from_raw_parts(&data[0] as *const f32 as *const u8, data.len() * 4) }; + let data_u8 = + unsafe { slice::from_raw_parts(&data[0] as *const f32 as *const u8, data.len() * 4) }; base64::encode(data_u8) } } @@ -256,9 +263,10 @@ impl<'a> Drop for Bucket<'a> { let mut bucket_list = tmp.borrow_mut(); // Find matching bucket and remove it - let i = bucket_list - .iter() - .position(|bucket| (bucket.0).0 == self.min.0 && (bucket.0).1 == self.min.1 && (bucket.1).0 == self.max.0 && (bucket.1).1 == self.max.1); + let i = bucket_list.iter().position(|bucket| { + (bucket.0).0 == self.min.0 && (bucket.0).1 == self.min.1 && + (bucket.1).0 == self.max.0 && (bucket.1).1 == self.max.1 + }); bucket_list.swap_remove(i.unwrap()); } } diff --git a/src/lerp.rs b/src/lerp.rs index 7c6fafa..ad7b0b2 100644 --- a/src/lerp.rs +++ b/src/lerp.rs @@ -38,8 +38,9 @@ pub fn lerp_slice(s: &[T], alpha: f32) -> T { } pub fn lerp_slice_with(s: &[T], alpha: f32, f: F) -> T - where T: Copy, - F: Fn(T, T, f32) -> T +where + T: Copy, + F: Fn(T, T, f32) -> T, { debug_assert!(s.len() > 0); debug_assert!(alpha >= 0.0); diff --git a/src/light/distant_disk_light.rs b/src/light/distant_disk_light.rs index f12bffc..9885475 100644 --- a/src/light/distant_disk_light.rs +++ b/src/light/distant_disk_light.rs @@ -19,7 +19,12 @@ pub struct DistantDiskLight<'a> { } impl<'a> DistantDiskLight<'a> { - pub fn new(arena: &'a MemArena, radii: Vec, directions: Vec, colors: Vec) -> DistantDiskLight<'a> { + pub fn new( + arena: &'a MemArena, + radii: Vec, + directions: Vec, + colors: Vec, + ) -> DistantDiskLight<'a> { DistantDiskLight { radii: arena.copy_slice(&radii), directions: arena.copy_slice(&directions), @@ -75,9 +80,10 @@ impl<'a> WorldLightSource for DistantDiskLight<'a> { } fn approximate_energy(&self) -> f32 { - let color: XYZ = self.colors - .iter() - .fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b) / self.colors.len() as f32; + let color: XYZ = self.colors.iter().fold( + XYZ::new(0.0, 0.0, 0.0), + |a, &b| a + b, + ) / self.colors.len() as f32; color.y } } diff --git a/src/light/mod.rs b/src/light/mod.rs index ed775a6..80aa7e7 100644 --- a/src/light/mod.rs +++ b/src/light/mod.rs @@ -26,7 +26,15 @@ pub trait LightSource: Boundable + Debug + Sync { /// /// Returns: The light arriving at the point arr, the vector to use for /// shadow testing, and the pdf of the sample. - fn sample(&self, space: &Matrix4x4, arr: Point, u: f32, v: f32, wavelength: f32, time: f32) -> (SpectralSample, Vector, f32); + fn sample( + &self, + space: &Matrix4x4, + arr: Point, + u: f32, + v: f32, + wavelength: f32, + time: f32, + ) -> (SpectralSample, Vector, f32); /// Calculates the pdf of sampling the given @@ -37,7 +45,16 @@ pub trait LightSource: Boundable + Debug + Sync { /// are a valid sample for the light source (i.e. hits/lies on the light /// source). No guarantees are made about the correctness of the return /// value if they are not valid. - fn sample_pdf(&self, space: &Matrix4x4, arr: Point, sample_dir: Vector, sample_u: f32, sample_v: f32, wavelength: f32, time: f32) -> f32; + fn sample_pdf( + &self, + space: &Matrix4x4, + arr: Point, + sample_dir: Vector, + sample_u: f32, + sample_v: f32, + wavelength: f32, + time: f32, + ) -> f32; /// Returns the color emitted in the given direction from the @@ -48,7 +65,15 @@ pub trait LightSource: Boundable + Debug + Sync { /// - v: Random parameter V. /// - wavelength: The hero wavelength of light to sample at. /// - time: The time to sample at. - fn outgoing(&self, space: &Matrix4x4, dir: Vector, u: f32, v: f32, wavelength: f32, time: f32) -> SpectralSample; + fn outgoing( + &self, + space: &Matrix4x4, + dir: Vector, + u: f32, + v: f32, + wavelength: f32, + time: f32, + ) -> SpectralSample; /// Returns whether the light has a delta distribution. diff --git a/src/light/rectangle_light.rs b/src/light/rectangle_light.rs index 6a49a4e..007970d 100644 --- a/src/light/rectangle_light.rs +++ b/src/light/rectangle_light.rs @@ -18,17 +18,19 @@ pub struct RectangleLight<'a> { } impl<'a> RectangleLight<'a> { - pub fn new<'b>(arena: &'b MemArena, dimensions: Vec<(f32, f32)>, colors: Vec) -> RectangleLight<'b> { + pub fn new<'b>( + arena: &'b MemArena, + dimensions: Vec<(f32, f32)>, + colors: Vec, + ) -> RectangleLight<'b> { let bbs: Vec<_> = dimensions .iter() - .map( - |d| { - BBox { - min: Point::new(d.0 * -0.5, d.1 * -0.5, 0.0), - max: Point::new(d.0 * 0.5, d.1 * 0.5, 0.0), - } + .map(|d| { + BBox { + min: Point::new(d.0 * -0.5, d.1 * -0.5, 0.0), + max: Point::new(d.0 * 0.5, d.1 * 0.5, 0.0), } - ) + }) .collect(); RectangleLight { dimensions: arena.copy_slice(&dimensions), @@ -39,7 +41,15 @@ impl<'a> RectangleLight<'a> { } impl<'a> LightSource for RectangleLight<'a> { - fn sample(&self, space: &Matrix4x4, arr: Point, u: f32, v: f32, wavelength: f32, time: f32) -> (SpectralSample, Vector, f32) { + fn sample( + &self, + space: &Matrix4x4, + arr: Point, + u: f32, + v: f32, + wavelength: f32, + time: f32, + ) -> (SpectralSample, Vector, f32) { // Calculate time interpolated values let dim = lerp_slice(&self.dimensions, time); let col = lerp_slice(&self.colors, time); @@ -98,7 +108,16 @@ impl<'a> LightSource for RectangleLight<'a> { return (spectral_sample, shadow_vec, pdf as f32); } - fn sample_pdf(&self, space: &Matrix4x4, arr: Point, sample_dir: Vector, sample_u: f32, sample_v: f32, wavelength: f32, time: f32) -> f32 { + fn sample_pdf( + &self, + space: &Matrix4x4, + arr: Point, + sample_dir: Vector, + sample_u: f32, + sample_v: f32, + wavelength: f32, + time: f32, + ) -> f32 { // We're not using these, silence warnings let _ = (sample_dir, sample_u, sample_v, wavelength); @@ -125,7 +144,15 @@ impl<'a> LightSource for RectangleLight<'a> { 1.0 / (area_1 + area_2) } - fn outgoing(&self, space: &Matrix4x4, dir: Vector, u: f32, v: f32, wavelength: f32, time: f32) -> SpectralSample { + fn outgoing( + &self, + space: &Matrix4x4, + dir: Vector, + u: f32, + v: f32, + wavelength: f32, + time: f32, + ) -> SpectralSample { // We're not using these, silence warnings let _ = (space, dir, u, v); @@ -143,9 +170,10 @@ impl<'a> LightSource for RectangleLight<'a> { } fn approximate_energy(&self) -> f32 { - let color: XYZ = self.colors - .iter() - .fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b) / self.colors.len() as f32; + let color: XYZ = self.colors.iter().fold( + XYZ::new(0.0, 0.0, 0.0), + |a, &b| a + b, + ) / self.colors.len() as f32; color.y } } diff --git a/src/light/sphere_light.rs b/src/light/sphere_light.rs index 57191f4..53b0aca 100644 --- a/src/light/sphere_light.rs +++ b/src/light/sphere_light.rs @@ -24,14 +24,12 @@ impl<'a> SphereLight<'a> { pub fn new<'b>(arena: &'b MemArena, radii: Vec, colors: Vec) -> SphereLight<'b> { let bbs: Vec<_> = radii .iter() - .map( - |r| { - BBox { - min: Point::new(-*r, -*r, -*r), - max: Point::new(*r, *r, *r), - } + .map(|r| { + BBox { + min: Point::new(-*r, -*r, -*r), + max: Point::new(*r, *r, *r), } - ) + }) .collect(); SphereLight { radii: arena.copy_slice(&radii), @@ -42,7 +40,15 @@ impl<'a> SphereLight<'a> { } impl<'a> LightSource for SphereLight<'a> { - fn sample(&self, space: &Matrix4x4, arr: Point, u: f32, v: f32, wavelength: f32, time: f32) -> (SpectralSample, Vector, f32) { + fn sample( + &self, + space: &Matrix4x4, + arr: Point, + u: f32, + v: f32, + wavelength: f32, + time: f32, + ) -> (SpectralSample, Vector, f32) { // TODO: track fp error due to transforms let arr = arr * *space; let pos = Point::new(0.0, 0.0, 0.0); @@ -96,7 +102,8 @@ impl<'a> LightSource for SphereLight<'a> { ); // Calculate the final values and return everything. - let shadow_vec = ((x * sample.x()) + (y * sample.y()) + (z * sample.z())) * space.inverse(); + let shadow_vec = ((x * sample.x()) + (y * sample.y()) + (z * sample.z())) * + space.inverse(); let pdf = uniform_sample_cone_pdf(cos_theta_max); let spectral_sample = (col * surface_area_inv as f32).to_spectral_sample(wavelength); return (spectral_sample, shadow_vec, pdf as f32); @@ -109,7 +116,16 @@ impl<'a> LightSource for SphereLight<'a> { } } - fn sample_pdf(&self, space: &Matrix4x4, arr: Point, sample_dir: Vector, sample_u: f32, sample_v: f32, wavelength: f32, time: f32) -> f32 { + fn sample_pdf( + &self, + space: &Matrix4x4, + arr: Point, + sample_dir: Vector, + sample_u: f32, + sample_v: f32, + wavelength: f32, + time: f32, + ) -> f32 { // We're not using these, silence warnings let _ = (sample_dir, sample_u, sample_v, wavelength); @@ -132,7 +148,15 @@ impl<'a> LightSource for SphereLight<'a> { } } - fn outgoing(&self, space: &Matrix4x4, dir: Vector, u: f32, v: f32, wavelength: f32, time: f32) -> SpectralSample { + fn outgoing( + &self, + space: &Matrix4x4, + dir: Vector, + u: f32, + v: f32, + wavelength: f32, + time: f32, + ) -> SpectralSample { // We're not using these, silence warnings let _ = (space, dir, u, v); @@ -148,9 +172,10 @@ impl<'a> LightSource for SphereLight<'a> { } fn approximate_energy(&self) -> f32 { - let color: XYZ = self.colors - .iter() - .fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b) / self.colors.len() as f32; + let color: XYZ = self.colors.iter().fold( + XYZ::new(0.0, 0.0, 0.0), + |a, &b| a + b, + ) / self.colors.len() as f32; color.y } } diff --git a/src/main.rs b/src/main.rs index b96868f..864d16d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -83,7 +83,7 @@ fn main() { .value_name("FILE") .help("Input .psy file") .takes_value(true) - .required_unless_one(&["dev", "use_stdin"]) + .required_unless_one(&["dev", "use_stdin"]), ) .arg( Arg::with_name("spp") @@ -92,43 +92,45 @@ fn main() { .value_name("N") .help("Number of samples per pixel") .takes_value(true) - .validator( - |s| { - usize::from_str(&s) - .and(Ok(())) - .or(Err("must be an integer".to_string())) - } - ) + .validator(|s| { + usize::from_str(&s).and(Ok(())).or(Err( + "must be an integer" + .to_string(), + )) + }), ) .arg( Arg::with_name("max_bucket_samples") .short("b") .long("spb") .value_name("N") - .help("Target number of samples per bucket (determines bucket size)") - .takes_value(true) - .validator( - |s| { - usize::from_str(&s) - .and(Ok(())) - .or(Err("must be an integer".to_string())) - } + .help( + "Target number of samples per bucket (determines bucket size)", ) + .takes_value(true) + .validator(|s| { + usize::from_str(&s).and(Ok(())).or(Err( + "must be an integer" + .to_string(), + )) + }), ) .arg( Arg::with_name("crop") .long("crop") .value_name("X1 Y1 X2 Y2") - .help("Only render the image between pixel coordinates (X1, Y1) and (X2, Y2). Coordinates are zero-indexed and inclusive.") + .help( + "Only render the image between pixel coordinates (X1, Y1) \ + and (X2, Y2). Coordinates are zero-indexed and inclusive.", + ) .takes_value(true) .number_of_values(4) - .validator( - |s| { - usize::from_str(&s) - .and(Ok(())) - .or(Err("must be four integers".to_string())) - } - ) + .validator(|s| { + usize::from_str(&s).and(Ok(())).or(Err( + "must be four integers" + .to_string(), + )) + }), ) .arg( Arg::with_name("threads") @@ -137,38 +139,33 @@ fn main() { .value_name("N") .help( "Number of threads to render with. Defaults to the number of logical \ - cores on the system." + cores on the system.", ) .takes_value(true) - .validator( - |s| { - usize::from_str(&s) - .and(Ok(())) - .or(Err("must be an integer".to_string())) - } - ) - ) - .arg( - Arg::with_name("stats") - .long("stats") - .help("Print additional statistics about rendering") - ) - .arg( - Arg::with_name("dev") - .long("dev") - .help("Show useful dev/debug info.") + .validator(|s| { + usize::from_str(&s).and(Ok(())).or(Err( + "must be an integer" + .to_string(), + )) + }), ) + .arg(Arg::with_name("stats").long("stats").help( + "Print additional statistics about rendering", + )) + .arg(Arg::with_name("dev").long("dev").help( + "Show useful dev/debug info.", + )) .arg( Arg::with_name("serialized_output") .long("serialized_output") .help("Serialize and send render output to standard output.") - .hidden(true) + .hidden(true), ) .arg( Arg::with_name("use_stdin") .long("use_stdin") .help("Take scene file in from stdin instead of a file path.") - .hidden(true) + .hidden(true), ) .get_matches(); @@ -186,19 +183,21 @@ fn main() { return; } - let crop = args.values_of("crop") - .map( - |mut vals| { - let coords = (u32::from_str(vals.next().unwrap()).unwrap(), u32::from_str(vals.next().unwrap()).unwrap(), u32::from_str(vals.next().unwrap()).unwrap(), u32::from_str(vals.next().unwrap()).unwrap()); - if coords.0 > coords.2 { - panic!("Argument '--crop': X1 must be less than or equal to X2"); - } - if coords.1 > coords.3 { - panic!("Argument '--crop': Y1 must be less than or equal to Y2"); - } - coords - } + let crop = args.values_of("crop").map(|mut vals| { + let coords = ( + u32::from_str(vals.next().unwrap()).unwrap(), + u32::from_str(vals.next().unwrap()).unwrap(), + u32::from_str(vals.next().unwrap()).unwrap(), + u32::from_str(vals.next().unwrap()).unwrap(), ); + if coords.0 > coords.2 { + panic!("Argument '--crop': X1 must be less than or equal to X2"); + } + if coords.1 > coords.3 { + panic!("Argument '--crop': Y1 must be less than or equal to Y2"); + } + coords + }); // Parse data tree of scene file if !args.is_present("serialized_output") { @@ -214,9 +213,9 @@ fn main() { let mut stdin = tmp.lock(); let mut buf = vec![0u8; 4096]; loop { - let count = stdin - .read(&mut buf) - .expect("Unexpected end of scene input."); + let count = stdin.read(&mut buf).expect( + "Unexpected end of scene input.", + ); let start = if input.len() < 11 { 0 } else { @@ -227,7 +226,9 @@ fn main() { let mut done = false; let mut trunc_len = 0; - if let nom::IResult::Done(remaining, _) = take_until!(&input[start..end], "__PSY_EOF__") { + if let nom::IResult::Done(remaining, _) = + take_until!(&input[start..end], "__PSY_EOF__") + { done = true; trunc_len = input.len() - remaining.len(); } @@ -261,12 +262,10 @@ fn main() { } let arena = MemArena::with_min_block_size((1 << 20) * 4); - let mut r = parse_scene(&arena, child).unwrap_or_else( - |e| { - e.print(&psy_contents); - panic!("Parse error."); - } - ); + let mut r = parse_scene(&arena, child).unwrap_or_else(|e| { + e.print(&psy_contents); + panic!("Parse error."); + }); if let Some(spp) = args.value_of("spp") { if !args.is_present("serialized_output") { @@ -275,11 +274,12 @@ fn main() { r.spp = usize::from_str(&spp).unwrap(); } - let max_samples_per_bucket = if let Some(max_samples_per_bucket) = args.value_of("max_bucket_samples") { - u32::from_str(&max_samples_per_bucket).unwrap() - } else { - 4096 - }; + let max_samples_per_bucket = + if let Some(max_samples_per_bucket) = args.value_of("max_bucket_samples") { + u32::from_str(&max_samples_per_bucket).unwrap() + } else { + 4096 + }; let thread_count = if let Some(threads) = args.value_of("threads") { u32::from_str(&threads).unwrap() @@ -331,7 +331,9 @@ fn main() { if !args.is_present("serialized_output") { println!("Writing image to disk into '{}'...", r.output_file); if r.output_file.ends_with(".png") { - image.write_png(Path::new(&r.output_file)).expect("Failed to write png..."); + image.write_png(Path::new(&r.output_file)).expect( + "Failed to write png...", + ); } else if r.output_file.ends_with(".exr") { image.write_exr(Path::new(&r.output_file)); } else { diff --git a/src/math.rs b/src/math.rs index 4194b78..ab674af 100644 --- a/src/math.rs +++ b/src/math.rs @@ -135,7 +135,9 @@ pub fn fast_pow2(p: f32) -> f32 { let w: i32 = clipp as i32; let z: f32 = clipp - w as f32 + offset; - let i: u32 = ((1 << 23) as f32 * (clipp + 121.2740575 + 27.7280233 / (4.84252568 - z) - 1.49012907 * z)) as u32; + let i: u32 = ((1 << 23) as f32 * + (clipp + 121.2740575 + 27.7280233 / (4.84252568 - z) - 1.49012907 * z)) as + u32; unsafe { transmute_copy::(&i) } } diff --git a/src/parse/data_tree.rs b/src/parse/data_tree.rs index 71bca76..5dcb8fe 100644 --- a/src/parse/data_tree.rs +++ b/src/parse/data_tree.rs @@ -35,14 +35,12 @@ impl<'a> DataTree<'a> { remaining_text = skip_ws_and_comments(remaining_text); if remaining_text.1.len() == 0 { - return Ok( - DataTree::Internal { - type_name: "ROOT", - ident: None, - children: items, - byte_offset: 0, - } - ); + return Ok(DataTree::Internal { + type_name: "ROOT", + ident: None, + children: items, + byte_offset: 0, + }); } else { // If the whole text wasn't parsed, something went wrong. return Err(ParseError::Other((0, "Failed to parse the entire string."))); @@ -106,7 +104,10 @@ impl<'a> DataTree<'a> { } } - pub fn iter_internal_children_with_type(&'a self, type_name: &'static str) -> DataTreeFilterInternalIter<'a> { + pub fn iter_internal_children_with_type( + &'a self, + type_name: &'static str, + ) -> DataTreeFilterInternalIter<'a> { if let &DataTree::Internal { ref children, .. } = self { DataTreeFilterInternalIter { type_name: type_name, @@ -120,7 +121,10 @@ impl<'a> DataTree<'a> { } } - pub fn iter_leaf_children_with_type(&'a self, type_name: &'static str) -> DataTreeFilterLeafIter<'a> { + pub fn iter_leaf_children_with_type( + &'a self, + type_name: &'static str, + ) -> DataTreeFilterLeafIter<'a> { if let &DataTree::Internal { ref children, .. } = self { DataTreeFilterLeafIter { type_name: type_name, @@ -137,11 +141,12 @@ impl<'a> DataTree<'a> { // For unit tests fn internal_data_or_panic(&'a self) -> (&'a str, Option<&'a str>, &'a Vec>) { if let DataTree::Internal { - type_name, - ident, - ref children, - byte_offset: _, - } = *self { + type_name, + ident, + ref children, + byte_offset: _, + } = *self + { (type_name, ident, children) } else { panic!("Expected DataTree::Internal, found DataTree::Leaf") @@ -149,10 +154,11 @@ impl<'a> DataTree<'a> { } fn leaf_data_or_panic(&'a self) -> (&'a str, &'a str) { if let DataTree::Leaf { - type_name, - contents, - byte_offset: _, - } = *self { + type_name, + contents, + byte_offset: _, + } = *self + { (type_name, contents) } else { panic!("Expected DataTree::Leaf, found DataTree::Internal") @@ -312,17 +318,15 @@ fn parse_node<'a>(source_text: (usize, &'a str)) -> ParseResult<'a> { children.push(node); } if let (Token::CloseInner, text4) = next_token(text_remaining) { - return Ok( - Some( - (DataTree::Internal { - type_name: type_name, - ident: Some(n), - children: children, - byte_offset: text1.0, - }, - text4) - ) - ); + return Ok(Some(( + DataTree::Internal { + type_name: type_name, + ident: Some(n), + children: children, + byte_offset: text1.0, + }, + text4, + ))); } else { return Err(ParseError::MissingCloseInternal(text_remaining.0)); } @@ -341,17 +345,15 @@ fn parse_node<'a>(source_text: (usize, &'a str)) -> ParseResult<'a> { } if let (Token::CloseInner, text3) = next_token(text_remaining) { - return Ok( - Some( - (DataTree::Internal { - type_name: type_name, - ident: None, - children: children, - byte_offset: text1.0, - }, - text3) - ) - ); + return Ok(Some(( + DataTree::Internal { + type_name: type_name, + ident: None, + children: children, + byte_offset: text1.0, + }, + text3, + ))); } else { return Err(ParseError::MissingCloseInternal(text_remaining.0)); } @@ -361,16 +363,14 @@ fn parse_node<'a>(source_text: (usize, &'a str)) -> ParseResult<'a> { (Token::OpenLeaf, text2) => { let (contents, text3) = parse_leaf_content(text2); if let (Token::CloseLeaf, text4) = next_token(text3) { - return Ok( - Some( - (DataTree::Leaf { - type_name: type_name, - contents: contents, - byte_offset: text1.0, - }, - text4) - ) - ); + return Ok(Some(( + DataTree::Leaf { + type_name: type_name, + contents: contents, + byte_offset: text1.0, + }, + text4, + ))); } else { return Err(ParseError::MissingCloseLeaf(text3.0)); } @@ -407,7 +407,10 @@ fn parse_leaf_content<'a>(source_text: (usize, &'a str)) -> (&'a str, (usize, &' si = source_text.1.len(); } - return (&source_text.1[0..si], (source_text.0 + si, &source_text.1[si..])); + return (&source_text.1[0..si], ( + source_text.0 + si, + &source_text.1[si..], + )); } @@ -454,7 +457,10 @@ fn next_token<'a>(source_text: (usize, &'a str)) -> (Token<'a>, (usize, &'a str) si = text1.1.len(); } - return (Token::Ident(&text1.1[0..si]), (text1.0 + si, &text1.1[si..])); + return ( + Token::Ident(&text1.1[0..si]), + (text1.0 + si, &text1.1[si..]), + ); } _ => { @@ -474,7 +480,10 @@ fn next_token<'a>(source_text: (usize, &'a str)) -> (Token<'a>, (usize, &'a str) si = text1.1.len(); } - return (Token::TypeName(&text1.1[0..si]), (text1.0 + si, &text1.1[si..])); + return (Token::TypeName(&text1.1[0..si]), ( + text1.0 + si, + &text1.1[si..], + )); } } @@ -614,10 +623,10 @@ mod tests { fn tokenize_5() { let input = (0, " $hi\\ t\\#he\\[re "); - assert_eq!( - next_token(input), - (Token::Ident("$hi\\ t\\#he\\[re"), (15, " ")) - ); + assert_eq!(next_token(input), ( + Token::Ident("$hi\\ t\\#he\\[re"), + (15, " "), + )); } #[test] @@ -648,18 +657,18 @@ mod tests { let (token7, input8) = next_token(input7); let (token8, input9) = next_token(input8); - assert_eq!( - (token1, input2), - (Token::TypeName("Thing"), (5, " $yar { # A comment\n\tThing2 []\n}")) - ); - assert_eq!( - (token2, input3), - (Token::Ident("$yar"), (10, " { # A comment\n\tThing2 []\n}")) - ); - assert_eq!( - (token3, input4), - (Token::OpenInner, (12, " # A comment\n\tThing2 []\n}")) - ); + assert_eq!((token1, input2), (Token::TypeName("Thing"), ( + 5, + " $yar { # A comment\n\tThing2 []\n}", + ))); + assert_eq!((token2, input3), (Token::Ident("$yar"), ( + 10, + " { # A comment\n\tThing2 []\n}", + ))); + assert_eq!((token3, input4), (Token::OpenInner, ( + 12, + " # A comment\n\tThing2 []\n}", + ))); assert_eq!( (token4, input5), (Token::TypeName("Thing2"), (32, " []\n}")) @@ -700,9 +709,8 @@ mod tests { A [] A {} B {} - "# - ) - .unwrap(); + "#, + ).unwrap(); let i = dt.iter_children_with_type("A"); assert_eq!(i.count(), 3); @@ -717,9 +725,8 @@ mod tests { A [] A {} B {} - "# - ) - .unwrap(); + "#, + ).unwrap(); let i = dt.iter_internal_children_with_type("A"); assert_eq!(i.count(), 2); @@ -734,9 +741,8 @@ mod tests { A {} A [] B {} - "# - ) - .unwrap(); + "#, + ).unwrap(); let i = dt.iter_leaf_children_with_type("A"); assert_eq!(i.count(), 2); diff --git a/src/parse/psy.rs b/src/parse/psy.rs index a376092..232ea9e 100644 --- a/src/parse/psy.rs +++ b/src/parse/psy.rs @@ -91,61 +91,54 @@ fn line_count_to_byte_offset(text: &str, offset: usize) -> usize { /// Takes in a DataTree representing a Scene node and returns -pub fn parse_scene<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result, PsyParseError> { +pub fn parse_scene<'a>( + arena: &'a MemArena, + tree: &'a DataTree, +) -> Result, PsyParseError> { // Verify we have the right number of each section if tree.iter_children_with_type("Output").count() != 1 { let count = tree.iter_children_with_type("Output").count(); - return Err( - PsyParseError::WrongNodeCount( - tree.byte_offset(), - "Scene should have precisely one Output \ + return Err(PsyParseError::WrongNodeCount( + tree.byte_offset(), + "Scene should have precisely one Output \ section.", - count, - ) - ); + count, + )); } if tree.iter_children_with_type("RenderSettings").count() != 1 { let count = tree.iter_children_with_type("RenderSettings").count(); - return Err( - PsyParseError::WrongNodeCount( - tree.byte_offset(), - "Scene should have precisely one \ + return Err(PsyParseError::WrongNodeCount( + tree.byte_offset(), + "Scene should have precisely one \ RenderSettings section.", - count, - ) - ); + count, + )); } if tree.iter_children_with_type("Camera").count() != 1 { let count = tree.iter_children_with_type("Camera").count(); - return Err( - PsyParseError::WrongNodeCount( - tree.byte_offset(), - "Scene should have precisely one Camera \ + return Err(PsyParseError::WrongNodeCount( + tree.byte_offset(), + "Scene should have precisely one Camera \ section.", - count, - ) - ); + count, + )); } if tree.iter_children_with_type("World").count() != 1 { let count = tree.iter_children_with_type("World").count(); - return Err( - PsyParseError::WrongNodeCount( - tree.byte_offset(), - "Scene should have precisely one World section.", - count, - ) - ); + return Err(PsyParseError::WrongNodeCount( + tree.byte_offset(), + "Scene should have precisely one World section.", + count, + )); } if tree.iter_children_with_type("Assembly").count() != 1 { let count = tree.iter_children_with_type("Assembly").count(); - return Err( - PsyParseError::WrongNodeCount( - tree.byte_offset(), - "Scene should have precisely one Root Assembly \ + return Err(PsyParseError::WrongNodeCount( + tree.byte_offset(), + "Scene should have precisely one Root Assembly \ section.", - count, - ) - ); + count, + )); } // Parse output info @@ -155,7 +148,7 @@ pub fn parse_scene<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result(arena: &'a MemArena, tree: &'a DataTree) -> Result Result { // Trim and validate let tc = contents.trim(); if tc.chars().count() < 2 { - return Err( - PsyParseError::IncorrectLeafData( - byte_offset, - "File path format is \ + return Err(PsyParseError::IncorrectLeafData( + byte_offset, + "File path format is \ incorrect.", - ) - ); + )); } if tc.chars().nth(0).unwrap() != '"' || tc.chars().last().unwrap() != '"' { - return Err( - PsyParseError::IncorrectLeafData( - byte_offset, - "File paths must be \ + return Err(PsyParseError::IncorrectLeafData( + byte_offset, + "File paths must be \ surrounded by quotes.", - ) - ); + )); } let len = tc.len(); let tc = &tc[1..len - 1]; @@ -253,16 +245,17 @@ fn parse_output_info(tree: &DataTree) -> Result { if found_path { return Ok((path)); } else { - return Err(PsyParseError::MissingNode(tree.byte_offset(), "Output section must contain a Path.")); + return Err(PsyParseError::MissingNode( + tree.byte_offset(), + "Output section must contain a Path.", + )); } } else { - return Err( - PsyParseError::ExpectedInternalNode( - tree.byte_offset(), - "Output section should be an internal \ + return Err(PsyParseError::ExpectedInternalNode( + tree.byte_offset(), + "Output section should be an internal \ node, containing at least a Path.", - ) - ); + )); }; } @@ -285,18 +278,18 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP contents, byte_offset, } if type_name == "Resolution" => { - if let IResult::Done(_, (w, h)) = closure!(terminated!(tuple!(ws_u32, ws_u32), nom::eof))(contents.as_bytes()) { + if let IResult::Done(_, (w, h)) = + closure!(terminated!(tuple!(ws_u32, ws_u32), nom::eof))(contents.as_bytes()) + { found_res = true; res = (w, h); } else { // Found Resolution, but its contents is not in the right format - return Err( - PsyParseError::IncorrectLeafData( - byte_offset, - "Resolution should be specified with two \ + return Err(PsyParseError::IncorrectLeafData( + byte_offset, + "Resolution should be specified with two \ integers in the form '[width height]'.", - ) - ); + )); } } @@ -311,14 +304,12 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP spp = n; } else { // Found SamplesPerPixel, but its contents is not in the right format - return Err( - PsyParseError::IncorrectLeafData( - byte_offset, - "SamplesPerPixel should be \ + return Err(PsyParseError::IncorrectLeafData( + byte_offset, + "SamplesPerPixel should be \ an integer specified in \ the form '[samples]'.", - ) - ); + )); } } @@ -332,14 +323,12 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP seed = n; } else { // Found Seed, but its contents is not in the right format - return Err( - PsyParseError::IncorrectLeafData( - byte_offset, - "Seed should be an integer \ + return Err(PsyParseError::IncorrectLeafData( + byte_offset, + "Seed should be an integer \ specified in the form \ '[samples]'.", - ) - ); + )); } } @@ -350,23 +339,19 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP if found_res && found_spp { return Ok((res, spp, seed)); } else { - return Err( - PsyParseError::MissingNode( - tree.byte_offset(), - "RenderSettings must have both Resolution and \ + return Err(PsyParseError::MissingNode( + tree.byte_offset(), + "RenderSettings must have both Resolution and \ SamplesPerPixel specified.", - ) - ); + )); } } else { - return Err( - PsyParseError::ExpectedInternalNode( - tree.byte_offset(), - "RenderSettings section should be an \ + return Err(PsyParseError::ExpectedInternalNode( + tree.byte_offset(), + "RenderSettings section should be an \ internal node, containing at least \ Resolution and SamplesPerPixel.", - ) - ); + )); }; } @@ -393,14 +378,12 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result(arena: &'a MemArena, tree: &'a DataTree) -> Result(arena: &'a MemArena, tree: &'a DataTree) -> Result(arena: &'a MemArena, tree: &'a DataTree) -> Result(arena: &'a MemArena, tree: &'a DataTree) -> Result, // Parse background shader let bgs = { if tree.iter_children_with_type("BackgroundShader").count() != 1 { - return Err( - PsyParseError::WrongNodeCount( - tree.byte_offset(), - "World should have precisely one BackgroundShader section.", - tree.iter_children_with_type("BackgroundShader").count(), - ) - ); + return Err(PsyParseError::WrongNodeCount( + tree.byte_offset(), + "World should have precisely one BackgroundShader section.", + tree.iter_children_with_type("BackgroundShader").count(), + )); } tree.iter_children_with_type("BackgroundShader") .nth(0) @@ -502,25 +483,23 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result, }; let bgs_type = { if bgs.iter_children_with_type("Type").count() != 1 { - return Err( - PsyParseError::WrongNodeCount( - bgs.byte_offset(), - "BackgroundShader should have \ + return Err(PsyParseError::WrongNodeCount( + bgs.byte_offset(), + "BackgroundShader should have \ precisely one Type specified.", - bgs.iter_children_with_type("Type").count(), - ) - ); + bgs.iter_children_with_type("Type").count(), + )); } - if let &DataTree::Leaf { contents, .. } = bgs.iter_children_with_type("Type").nth(0).unwrap() { + if let &DataTree::Leaf { contents, .. } = + bgs.iter_children_with_type("Type").nth(0).unwrap() + { contents.trim() } else { - return Err( - PsyParseError::ExpectedLeafNode( - bgs.byte_offset(), - "BackgroundShader's Type should be a \ + return Err(PsyParseError::ExpectedLeafNode( + bgs.byte_offset(), + "BackgroundShader's Type should be a \ leaf node.", - ) - ); + )); } }; match bgs_type { @@ -529,40 +508,37 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result, contents, byte_offset, .. - }) = bgs.iter_children_with_type("Color").nth(0) { - if let IResult::Done(_, color) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.trim().as_bytes()) { + }) = bgs.iter_children_with_type("Color").nth(0) + { + if let IResult::Done(_, color) = + closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.trim().as_bytes()) + { // TODO: proper color space management, not just assuming // rec.709. background_color = XYZ::from_tuple(rec709_e_to_xyz(color)); } else { - return Err( - PsyParseError::IncorrectLeafData( - byte_offset, - "Color should be specified \ + return Err(PsyParseError::IncorrectLeafData( + byte_offset, + "Color should be specified \ with three decimal numbers \ in the form '[R G B]'.", - ) - ); + )); } } else { - return Err( - PsyParseError::MissingNode( - bgs.byte_offset(), - "BackgroundShader's Type is Color, \ + return Err(PsyParseError::MissingNode( + bgs.byte_offset(), + "BackgroundShader's Type is Color, \ but no Color is specified.", - ) - ); + )); } } _ => { - return Err( - PsyParseError::UnknownVariant( - bgs.byte_offset(), - "The specified BackgroundShader Type \ + return Err(PsyParseError::UnknownVariant( + bgs.byte_offset(), + "The specified BackgroundShader Type \ isn't a recognized type.", - ) - ) + )) } } @@ -578,21 +554,17 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result, } // Build and return the world - return Ok( - World { - background_color: background_color, - lights: arena.copy_slice(&lights), - } - ); + return Ok(World { + background_color: background_color, + lights: arena.copy_slice(&lights), + }); } else { - return Err( - PsyParseError::ExpectedInternalNode( - tree.byte_offset(), - "World section should be an internal \ + return Err(PsyParseError::ExpectedInternalNode( + tree.byte_offset(), + "World section should be an internal \ node, containing at least a \ BackgroundShader.", - ) - ); + )); } } @@ -601,49 +573,46 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result, pub fn parse_matrix(contents: &str) -> Result { if let IResult::Done(_, ns) = - closure!( - terminated!( - tuple!( - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32, - ws_f32 - ), - nom::eof - ) - )(contents.as_bytes()) { - return Ok( - Matrix4x4::new_from_values( - ns.0, - ns.4, - ns.8, - ns.12, - ns.1, - ns.5, - ns.9, - ns.13, - ns.2, - ns.6, - ns.10, - ns.14, - ns.3, - ns.7, - ns.11, - ns.15, - ) - ); + closure!(terminated!( + tuple!( + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32, + ws_f32 + ), + nom::eof + ))(contents.as_bytes()) + { + return Ok(Matrix4x4::new_from_values( + ns.0, + ns.4, + ns.8, + ns.12, + ns.1, + ns.5, + ns.9, + ns.13, + ns.2, + ns.6, + ns.10, + ns.14, + ns.3, + ns.7, + ns.11, + ns.15, + )); } else { return Err(PsyParseError::UnknownError(0)); } diff --git a/src/parse/psy_assembly.rs b/src/parse/psy_assembly.rs index 021aa23..f7d54bd 100644 --- a/src/parse/psy_assembly.rs +++ b/src/parse/psy_assembly.rs @@ -12,7 +12,10 @@ use super::psy_mesh_surface::parse_mesh_surface; use super::psy::{parse_matrix, PsyParseError}; -pub fn parse_assembly<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result, PsyParseError> { +pub fn parse_assembly<'a>( + arena: &'a MemArena, + tree: &'a DataTree, +) -> Result, PsyParseError> { let mut builder = AssemblyBuilder::new(arena); if tree.is_internal() { @@ -52,16 +55,14 @@ pub fn parse_assembly<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result(arena: &'a MemArena, tree: &'a DataTree) -> Result, PsyParseError> { +pub fn parse_distant_disk_light<'a>( + arena: &'a MemArena, + tree: &'a DataTree, +) -> Result, PsyParseError> { if let &DataTree::Internal { ref children, .. } = tree { let mut radii = Vec::new(); let mut directions = Vec::new(); @@ -44,7 +47,9 @@ pub fn parse_distant_disk_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> contents, byte_offset, } if type_name == "Direction" => { - if let IResult::Done(_, direction) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) { + if let IResult::Done(_, direction) = + closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) + { directions.push(Vector::new(direction.0, direction.1, direction.2)); } else { // Found direction, but its contents is not in the right format @@ -58,7 +63,9 @@ pub fn parse_distant_disk_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> contents, byte_offset, } if type_name == "Color" => { - if let IResult::Done(_, color) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) { + if let IResult::Done(_, color) = + closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) + { // TODO: handle color space conversions properly. // Probably will need a special color type with its // own parser...? @@ -80,7 +87,10 @@ pub fn parse_distant_disk_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> } -pub fn parse_sphere_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result, PsyParseError> { +pub fn parse_sphere_light<'a>( + arena: &'a MemArena, + tree: &'a DataTree, +) -> Result, PsyParseError> { if let &DataTree::Internal { ref children, .. } = tree { let mut radii = Vec::new(); let mut colors = Vec::new(); @@ -108,7 +118,9 @@ pub fn parse_sphere_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result contents, byte_offset, } if type_name == "Color" => { - if let IResult::Done(_, color) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) { + if let IResult::Done(_, color) = + closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) + { // TODO: handle color space conversions properly. // Probably will need a special color type with its // own parser...? @@ -129,7 +141,10 @@ pub fn parse_sphere_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result } } -pub fn parse_rectangle_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result, PsyParseError> { +pub fn parse_rectangle_light<'a>( + arena: &'a MemArena, + tree: &'a DataTree, +) -> Result, PsyParseError> { if let &DataTree::Internal { ref children, .. } = tree { let mut dimensions = Vec::new(); let mut colors = Vec::new(); @@ -143,7 +158,9 @@ pub fn parse_rectangle_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Res contents, byte_offset, } if type_name == "Dimensions" => { - if let IResult::Done(_, radius) = closure!(tuple!(ws_f32, ws_f32))(contents.as_bytes()) { + if let IResult::Done(_, radius) = + closure!(tuple!(ws_f32, ws_f32))(contents.as_bytes()) + { dimensions.push(radius); } else { // Found dimensions, but its contents is not in the right format @@ -157,7 +174,9 @@ pub fn parse_rectangle_light<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Res contents, byte_offset, } if type_name == "Color" => { - if let IResult::Done(_, color) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) { + if let IResult::Done(_, color) = + closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) + { // TODO: handle color space conversions properly. // Probably will need a special color type with its // own parser...? diff --git a/src/parse/psy_mesh_surface.rs b/src/parse/psy_mesh_surface.rs index f9d6114..a960cf5 100644 --- a/src/parse/psy_mesh_surface.rs +++ b/src/parse/psy_mesh_surface.rs @@ -21,7 +21,10 @@ use super::psy::PsyParseError; // accel: BVH, // } -pub fn parse_mesh_surface<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result, PsyParseError> { +pub fn parse_mesh_surface<'a>( + arena: &'a MemArena, + tree: &'a DataTree, +) -> Result, PsyParseError> { let mut verts = Vec::new(); let mut face_vert_counts = Vec::new(); let mut face_vert_indices = Vec::new(); @@ -37,7 +40,9 @@ pub fn parse_mesh_surface<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result // Collect verts for this time sample let mut vert_count = 0; - while let IResult::Done(remaining, vert) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(raw_text) { + while let IResult::Done(remaining, vert) = + closure!(tuple!(ws_f32, ws_f32, ws_f32))(raw_text) + { raw_text = remaining; verts.push(Point::new(vert.0, vert.1, vert.2)); @@ -88,7 +93,11 @@ pub fn parse_mesh_surface<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result // Store all the time samples of each triangle contiguously for time_sample in 0..time_samples { let start_vi = vert_count * time_sample; - triangles.push((verts[start_vi + face_vert_indices[v1]], verts[start_vi + face_vert_indices[v1 + vi + 1]], verts[start_vi + face_vert_indices[v1 + vi + 2]])); + triangles.push(( + verts[start_vi + face_vert_indices[v1]], + verts[start_vi + face_vert_indices[v1 + vi + 1]], + verts[start_vi + face_vert_indices[v1 + vi + 2]], + )); } } } else { diff --git a/src/renderer.rs b/src/renderer.rs index 4b4e8af..4e8671a 100644 --- a/src/renderer.rs +++ b/src/renderer.rs @@ -68,7 +68,13 @@ impl RenderStats { } impl<'a> Renderer<'a> { - pub fn render(&self, max_samples_per_bucket: u32, crop: Option<(u32, u32, u32, u32)>, thread_count: u32, do_blender_output: bool) -> (Image, RenderStats) { + pub fn render( + &self, + max_samples_per_bucket: u32, + crop: Option<(u32, u32, u32, u32)>, + thread_count: u32, + do_blender_output: bool, + ) -> (Image, RenderStats) { let mut tpool = Pool::new(thread_count); let image = Image::new(self.resolution.0, self.resolution.1); @@ -97,86 +103,80 @@ impl<'a> Renderer<'a> { }; // Render - tpool.scoped( - |scope| { - // Spawn worker tasks - for _ in 0..thread_count { - let jq = &job_queue; - let ajq = &all_jobs_queued; - let img = ℑ - let pixrenref = &pixels_rendered; - let cstats = &collective_stats; - scope.execute( - move || { - self.render_job( - jq, - ajq, - img, - width * height, - pixrenref, - cstats, - do_blender_output, - ) - } - ); - } - - // Print initial 0.00% progress - print!("0.00%"); - let _ = io::stdout().flush(); - - // Determine bucket size based on the per-thread maximum number of samples to - // calculate at a time. - let (bucket_w, bucket_h) = { - let target_pixels_per_bucket = max_samples_per_bucket as f64 / self.spp as f64; - let target_bucket_dim = if target_pixels_per_bucket.sqrt() < 1.0 { - 1usize - } else { - target_pixels_per_bucket.sqrt() as usize - }; - - (target_bucket_dim, target_bucket_dim) - }; - - // Populate job queue - let bucket_n = { - let bucket_count_x = ((width / bucket_w) + 1) as u32; - let bucket_count_y = ((height / bucket_h) + 1) as u32; - let larger = cmp::max(bucket_count_x, bucket_count_y); - let pow2 = upper_power_of_two(larger); - pow2 * pow2 - }; - for hilbert_d in 0..bucket_n { - let (bx, by) = hilbert::d2xy(hilbert_d); - - let x = bx as usize * bucket_w; - let y = by as usize * bucket_h; - let w = if width >= x { - min(bucket_w, width - x) - } else { - bucket_w - }; - let h = if height >= y { - min(bucket_h, height - y) - } else { - bucket_h - }; - if x < width && y < height && w > 0 && h > 0 { - job_queue.push( - BucketJob { - x: (start_x + x) as u32, - y: (start_y + y) as u32, - w: w as u32, - h: h as u32, - } - ); - } - } - - // Mark done queuing jobs - *all_jobs_queued.write().unwrap() = true; + tpool.scoped(|scope| { + // Spawn worker tasks + for _ in 0..thread_count { + let jq = &job_queue; + let ajq = &all_jobs_queued; + let img = ℑ + let pixrenref = &pixels_rendered; + let cstats = &collective_stats; + scope.execute(move || { + self.render_job( + jq, + ajq, + img, + width * height, + pixrenref, + cstats, + do_blender_output, + ) + }); } - ); + + // Print initial 0.00% progress + print!("0.00%"); + let _ = io::stdout().flush(); + + // Determine bucket size based on the per-thread maximum number of samples to + // calculate at a time. + let (bucket_w, bucket_h) = { + let target_pixels_per_bucket = max_samples_per_bucket as f64 / self.spp as f64; + let target_bucket_dim = if target_pixels_per_bucket.sqrt() < 1.0 { + 1usize + } else { + target_pixels_per_bucket.sqrt() as usize + }; + + (target_bucket_dim, target_bucket_dim) + }; + + // Populate job queue + let bucket_n = { + let bucket_count_x = ((width / bucket_w) + 1) as u32; + let bucket_count_y = ((height / bucket_h) + 1) as u32; + let larger = cmp::max(bucket_count_x, bucket_count_y); + let pow2 = upper_power_of_two(larger); + pow2 * pow2 + }; + for hilbert_d in 0..bucket_n { + let (bx, by) = hilbert::d2xy(hilbert_d); + + let x = bx as usize * bucket_w; + let y = by as usize * bucket_h; + let w = if width >= x { + min(bucket_w, width - x) + } else { + bucket_w + }; + let h = if height >= y { + min(bucket_h, height - y) + } else { + bucket_h + }; + if x < width && y < height && w > 0 && h > 0 { + job_queue.push(BucketJob { + x: (start_x + x) as u32, + y: (start_y + y) as u32, + w: w as u32, + h: h as u32, + }); + } + } + + // Mark done queuing jobs + *all_jobs_queued.write().unwrap() = true; + }); // Clear percentage progress print print!( @@ -188,7 +188,16 @@ impl<'a> Renderer<'a> { } /// Waits for buckets in the job queue to render and renders them when available. - fn render_job(&self, job_queue: &MsQueue, all_jobs_queued: &RwLock, image: &Image, total_pixels: usize, pixels_rendered: &Mutex>, collected_stats: &RwLock, do_blender_output: bool) { + fn render_job( + &self, + job_queue: &MsQueue, + all_jobs_queued: &RwLock, + image: &Image, + total_pixels: usize, + pixels_rendered: &Mutex>, + collected_stats: &RwLock, + do_blender_output: bool, + ) { let mut stats = RenderStats::new(); let mut timer = Timer::new(); let mut total_timer = Timer::new(); @@ -246,7 +255,10 @@ impl<'a> Renderer<'a> { &self.scene, (x, y), (img_x, img_y), - (get_sample(0, offset + si as u32), get_sample(1, offset + si as u32)), + ( + get_sample(0, offset + si as u32), + get_sample(1, offset + si as u32), + ), get_sample(2, offset + si as u32), map_0_1_to_wavelength(get_sample(3, offset + si as u32)), offset + si as u32, @@ -266,11 +278,9 @@ impl<'a> Renderer<'a> { stats.trace_time += timer.tick() as f64; // Determine next rays to shoot based on result - pi = partition_pair( - &mut paths[..pi], - &mut rays[..pi], - |i, path, ray| path.next(&mut xform_stack, &self.scene, &isects[i], &mut *ray), - ); + pi = partition_pair(&mut paths[..pi], &mut rays[..pi], |i, path, ray| { + path.next(&mut xform_stack, &self.scene, &isects[i], &mut *ray) + }); stats.ray_generation_time += timer.tick() as f64; } @@ -326,12 +336,10 @@ impl<'a> Renderer<'a> { } stats.total_time += total_timer.tick() as f64; - ACCEL_TRAV_TIME.with( - |att| { - stats.accel_traversal_time = att.get(); - att.set(0.0); - } - ); + ACCEL_TRAV_TIME.with(|att| { + stats.accel_traversal_time = att.get(); + att.set(0.0); + }); // Collect stats collected_stats.write().unwrap().collect(stats); @@ -366,34 +374,42 @@ pub struct LightPath { } impl LightPath { - fn new(scene: &Scene, pixel_co: (u32, u32), image_plane_co: (f32, f32), lens_uv: (f32, f32), time: f32, wavelength: f32, lds_offset: u32) -> (LightPath, Ray) { - (LightPath { - event: LightPathEvent::CameraRay, - bounce_count: 0, + fn new( + scene: &Scene, + pixel_co: (u32, u32), + image_plane_co: (f32, f32), + lens_uv: (f32, f32), + time: f32, + wavelength: f32, + lds_offset: u32, + ) -> (LightPath, Ray) { + ( + LightPath { + event: LightPathEvent::CameraRay, + bounce_count: 0, - pixel_co: pixel_co, - lds_offset: lds_offset, - dim_offset: Cell::new(6), - time: time, - wavelength: wavelength, + pixel_co: pixel_co, + lds_offset: lds_offset, + dim_offset: Cell::new(6), + time: time, + wavelength: wavelength, - next_bounce_ray: None, - next_attentuation_fac: Float4::splat(1.0), + next_bounce_ray: None, + next_attentuation_fac: Float4::splat(1.0), - light_attenuation: Float4::splat(1.0), - pending_color_addition: Float4::splat(0.0), - color: Float4::splat(0.0), - }, + light_attenuation: Float4::splat(1.0), + pending_color_addition: Float4::splat(0.0), + color: Float4::splat(0.0), + }, - scene - .camera - .generate_ray( - image_plane_co.0, - image_plane_co.1, - time, - lens_uv.0, - lens_uv.1, - )) + scene.camera.generate_ray( + image_plane_co.0, + image_plane_co.1, + time, + lens_uv.0, + lens_uv.1, + ), + ) } fn next_lds_samp(&self) -> f32 { @@ -402,23 +418,38 @@ impl LightPath { get_sample(dimension, self.lds_offset) } - fn next(&mut self, xform_stack: &mut TransformStack, scene: &Scene, isect: &surface::SurfaceIntersection, ray: &mut Ray) -> bool { + fn next( + &mut self, + xform_stack: &mut TransformStack, + scene: &Scene, + isect: &surface::SurfaceIntersection, + ray: &mut Ray, + ) -> bool { match self.event { //-------------------------------------------------------------------- // Result of Camera or bounce ray, prepare next bounce and light rays LightPathEvent::CameraRay | LightPathEvent::BounceRay => { if let &surface::SurfaceIntersection::Hit { - intersection_data: ref idata, - ref closure, - } = isect { + intersection_data: ref idata, + ref closure, + } = isect + { // Hit something! Do the stuff // Prepare light ray let light_n = self.next_lds_samp(); - let light_uvw = (self.next_lds_samp(), self.next_lds_samp(), self.next_lds_samp()); + let light_uvw = ( + self.next_lds_samp(), + self.next_lds_samp(), + self.next_lds_samp(), + ); xform_stack.clear(); - let found_light = if let Some((light_color, shadow_vec, light_pdf, light_sel_pdf, is_infinite)) = + let found_light = if let Some((light_color, + shadow_vec, + light_pdf, + light_sel_pdf, + is_infinite)) = scene.sample_lights( xform_stack, light_n, @@ -426,15 +457,22 @@ impl LightPath { self.wavelength, self.time, isect, - ) { + ) + { // Check if pdf is zero, to avoid NaN's. if light_pdf > 0.0 { // Calculate and store the light that will be contributed // to the film plane if the light is not in shadow. self.pending_color_addition = { let material = closure.as_surface_closure(); - let la = material.evaluate(ray.dir, shadow_vec, idata.nor, self.wavelength); - light_color.e * la.e * self.light_attenuation / (light_pdf * light_sel_pdf) + let la = material.evaluate( + ray.dir, + shadow_vec, + idata.nor, + self.wavelength, + ); + light_color.e * la.e * self.light_attenuation / + (light_pdf * light_sel_pdf) }; // Calculate the shadow ray for testing if the light is @@ -480,7 +518,12 @@ impl LightPath { self.next_attentuation_fac = filter.e / pdf; // Calculate the ray for this bounce - self.next_bounce_ray = Some(Ray::new(idata.pos + dir.normalized() * 0.0001, dir, self.time, false)); + self.next_bounce_ray = Some(Ray::new( + idata.pos + dir.normalized() * 0.0001, + dir, + self.time, + false, + )); true } else { diff --git a/src/sampling/mod.rs b/src/sampling/mod.rs index 729b21a..2bab45e 100644 --- a/src/sampling/mod.rs +++ b/src/sampling/mod.rs @@ -1,3 +1,5 @@ mod monte_carlo; -pub use self::monte_carlo::{square_to_circle, cosine_sample_hemisphere, uniform_sample_hemisphere, uniform_sample_sphere, uniform_sample_cone, uniform_sample_cone_pdf, spherical_triangle_solid_angle, uniform_sample_spherical_triangle}; +pub use self::monte_carlo::{square_to_circle, cosine_sample_hemisphere, uniform_sample_hemisphere, + uniform_sample_sphere, uniform_sample_cone, uniform_sample_cone_pdf, + spherical_triangle_solid_angle, uniform_sample_spherical_triangle}; diff --git a/src/sampling/monte_carlo.rs b/src/sampling/monte_carlo.rs index 4dc70e0..e1a3935 100644 --- a/src/sampling/monte_carlo.rs +++ b/src/sampling/monte_carlo.rs @@ -120,7 +120,13 @@ pub fn spherical_triangle_solid_angle(va: Vector, vb: Vector, vc: Vector) -> f32 /// Generates a uniform sample on a spherical triangle given two uniform /// random variables i and j in [0, 1]. -pub fn uniform_sample_spherical_triangle(va: Vector, vb: Vector, vc: Vector, i: f32, j: f32) -> Vector { +pub fn uniform_sample_spherical_triangle( + va: Vector, + vb: Vector, + vc: Vector, + i: f32, + j: f32, +) -> Vector { // Calculate sines and cosines of the spherical triangle's edge lengths let cos_a: f64 = dot(vb, vc).max(-1.0).min(1.0) as f64; let cos_b: f64 = dot(vc, va).max(-1.0).min(1.0) as f64; @@ -172,7 +178,8 @@ pub fn uniform_sample_spherical_triangle(va: Vector, vb: Vector, vc: Vector, i: let q_bottom = ((v * s) + (u * t)) * sin_va; let q = q_top / q_bottom; - let vc_2 = (va * q as f32) + ((vc - (va * dot(vc, va))).normalized() * (1.0 - (q * q)).sqrt() as f32); + let vc_2 = (va * q as f32) + + ((vc - (va * dot(vc, va))).normalized() * (1.0 - (q * q)).sqrt() as f32); let z = 1.0 - (j * (1.0 - dot(vc_2, vb))); diff --git a/src/scene/assembly.rs b/src/scene/assembly.rs index 0b112fa..3760ac1 100644 --- a/src/scene/assembly.rs +++ b/src/scene/assembly.rs @@ -36,26 +36,35 @@ pub struct Assembly<'a> { impl<'a> Assembly<'a> { // Returns (light_color, shadow_vector, pdf, selection_pdf) - pub fn sample_lights(&self, xform_stack: &mut TransformStack, n: f32, uvw: (f32, f32, f32), wavelength: f32, time: f32, intr: &SurfaceIntersection) -> Option<(SpectralSample, Vector, f32, f32)> { + pub fn sample_lights( + &self, + xform_stack: &mut TransformStack, + n: f32, + uvw: (f32, f32, f32), + wavelength: f32, + time: f32, + intr: &SurfaceIntersection, + ) -> Option<(SpectralSample, Vector, f32, f32)> { if let &SurfaceIntersection::Hit { - intersection_data: idata, - closure, - } = intr { + intersection_data: idata, + closure, + } = intr + { let sel_xform = if xform_stack.top().len() > 0 { lerp_slice(xform_stack.top(), time) } else { Matrix4x4::new() }; if let Some((light_i, sel_pdf, whittled_n)) = - self.light_accel - .select( - idata.incoming * sel_xform, - idata.pos * sel_xform, - idata.nor * sel_xform, - closure.as_surface_closure(), - time, - n, - ) { + self.light_accel.select( + idata.incoming * sel_xform, + idata.pos * sel_xform, + idata.nor * sel_xform, + closure.as_surface_closure(), + time, + n, + ) + { let inst = self.light_instances[light_i]; match inst.instance_type { @@ -81,7 +90,8 @@ impl<'a> Assembly<'a> { }; // Sample the light - let (color, shadow_vec, pdf) = light.sample(&xform, idata.pos, uvw.0, uvw.1, wavelength, time); + let (color, shadow_vec, pdf) = + light.sample(&xform, idata.pos, uvw.0, uvw.1, wavelength, time); return Some((color, shadow_vec, pdf, sel_pdf)); } @@ -97,7 +107,14 @@ impl<'a> Assembly<'a> { } // Sample sub-assembly lights - let sample = self.assemblies[inst.data_index].sample_lights(xform_stack, whittled_n, uvw, wavelength, time, intr); + let sample = self.assemblies[inst.data_index].sample_lights( + xform_stack, + whittled_n, + uvw, + wavelength, + time, + intr, + ); // Pop the assembly's transforms off the transform stack. if let Some(_) = inst.transform_indices { @@ -176,8 +193,10 @@ impl<'a> AssemblyBuilder<'a> { } // Add assembly - self.assembly_map - .insert(name.to_string(), self.assemblies.len()); + self.assembly_map.insert( + name.to_string(), + self.assemblies.len(), + ); self.assemblies.push(asmb); } @@ -200,14 +219,18 @@ impl<'a> AssemblyBuilder<'a> { instance_type: InstanceType::Object, data_index: self.object_map[name], id: self.instances.len(), - transform_indices: xforms.map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())), + transform_indices: xforms.map( + |xf| (self.xforms.len(), self.xforms.len() + xf.len()), + ), } } else { Instance { instance_type: InstanceType::Assembly, data_index: self.assembly_map[name], id: self.instances.len(), - transform_indices: xforms.map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())), + transform_indices: xforms.map( + |xf| (self.xforms.len(), self.xforms.len() + xf.len()), + ), } }; @@ -228,59 +251,52 @@ impl<'a> AssemblyBuilder<'a> { let (bis, bbs) = self.instance_bounds(); // Build object accel - let object_accel = BVH::from_objects( - self.arena, - &mut self.instances[..], - 1, - |inst| &bbs[bis[inst.id]..bis[inst.id + 1]], - ); + let object_accel = BVH::from_objects(self.arena, &mut self.instances[..], 1, |inst| { + &bbs[bis[inst.id]..bis[inst.id + 1]] + }); // Get list of instances that are for light sources or assemblies that contain light // sources. let mut light_instances: Vec<_> = self.instances .iter() - .filter( - |inst| match inst.instance_type { - InstanceType::Object => { - if let Object::Light(_) = self.objects[inst.data_index] { - true - } else { - false - } - } - - InstanceType::Assembly => { - self.assemblies[inst.data_index] - .light_accel - .approximate_energy() > 0.0 + .filter(|inst| match inst.instance_type { + InstanceType::Object => { + if let Object::Light(_) = self.objects[inst.data_index] { + true + } else { + false } } - ) + + InstanceType::Assembly => { + self.assemblies[inst.data_index] + .light_accel + .approximate_energy() > 0.0 + } + }) .map(|&a| a) .collect(); // Build light accel - let light_accel = LightTree::from_objects( - self.arena, &mut light_instances[..], |inst| { - let bounds = &bbs[bis[inst.id]..bis[inst.id + 1]]; - let energy = match inst.instance_type { - InstanceType::Object => { - if let Object::Light(ref light) = self.objects[inst.data_index] { - light.approximate_energy() - } else { - 0.0 - } + let light_accel = LightTree::from_objects(self.arena, &mut light_instances[..], |inst| { + let bounds = &bbs[bis[inst.id]..bis[inst.id + 1]]; + let energy = match inst.instance_type { + InstanceType::Object => { + if let Object::Light(ref light) = self.objects[inst.data_index] { + light.approximate_energy() + } else { + 0.0 } + } - InstanceType::Assembly => { - self.assemblies[inst.data_index] - .light_accel - .approximate_energy() - } - }; - (bounds, energy) - } - ); + InstanceType::Assembly => { + self.assemblies[inst.data_index] + .light_accel + .approximate_energy() + } + }; + (bounds, energy) + }); Assembly { instances: self.arena.copy_slice(&self.instances), diff --git a/src/scene/scene.rs b/src/scene/scene.rs index a9217f6..5761c38 100644 --- a/src/scene/scene.rs +++ b/src/scene/scene.rs @@ -19,7 +19,15 @@ pub struct Scene<'a> { } impl<'a> Scene<'a> { - pub fn sample_lights(&self, xform_stack: &mut TransformStack, n: f32, uvw: (f32, f32, f32), wavelength: f32, time: f32, intr: &SurfaceIntersection) -> Option<(SpectralSample, Vector, f32, f32, bool)> { + pub fn sample_lights( + &self, + xform_stack: &mut TransformStack, + n: f32, + uvw: (f32, f32, f32), + wavelength: f32, + time: f32, + intr: &SurfaceIntersection, + ) -> Option<(SpectralSample, Vector, f32, f32, bool)> { // TODO: this just selects between world lights and local lights // with a 50/50 chance. We should do something more sophisticated // than this, accounting for the estimated impact of the lights @@ -27,10 +35,10 @@ impl<'a> Scene<'a> { // Calculate relative probabilities of traversing into world lights // or local lights. - let wl_energy = if self.world - .lights - .iter() - .fold(0.0, |energy, light| energy + light.approximate_energy()) <= 0.0 { + let wl_energy = if self.world.lights.iter().fold(0.0, |energy, light| { + energy + light.approximate_energy() + }) <= 0.0 + { 0.0 } else { 1.0 @@ -59,8 +67,15 @@ impl<'a> Scene<'a> { let n = (n - wl_prob) / (1.0 - wl_prob); if let Some((ss, sv, pdf, spdf)) = - self.root - .sample_lights(xform_stack, n, uvw, wavelength, time, intr) { + self.root.sample_lights( + xform_stack, + n, + uvw, + wavelength, + time, + intr, + ) + { return Some((ss, sv, pdf, spdf * (1.0 - wl_prob), false)); } else { return None; diff --git a/src/shading/surface_closure.rs b/src/shading/surface_closure.rs index adcbc66..c6fa6c0 100644 --- a/src/shading/surface_closure.rs +++ b/src/shading/surface_closure.rs @@ -42,7 +42,13 @@ pub trait SurfaceClosure { /// wavelength: The wavelength of light to sample at. /// /// Returns a tuple with the generated outgoing light direction, color filter, and pdf. - fn sample(&self, inc: Vector, nor: Normal, uv: (f32, f32), wavelength: f32) -> (Vector, SpectralSample, f32); + fn sample( + &self, + inc: Vector, + nor: Normal, + uv: (f32, f32), + wavelength: f32, + ) -> (Vector, SpectralSample, f32); /// Evaluates the closure for the given incoming and outgoing rays. /// @@ -67,7 +73,13 @@ pub trait SurfaceClosure { /// This is used for importance sampling, so does not need to be exact, /// but it does need to be non-zero anywhere that an exact solution would /// be non-zero. - fn estimate_eval_over_solid_angle(&self, inc: Vector, out: Vector, nor: Normal, cos_theta: f32) -> f32; + fn estimate_eval_over_solid_angle( + &self, + inc: Vector, + out: Vector, + nor: Normal, + cos_theta: f32, + ) -> f32; } @@ -163,10 +175,20 @@ impl SurfaceClosure for EmitClosure { false } - fn sample(&self, inc: Vector, nor: Normal, uv: (f32, f32), wavelength: f32) -> (Vector, SpectralSample, f32) { + fn sample( + &self, + inc: Vector, + nor: Normal, + uv: (f32, f32), + wavelength: f32, + ) -> (Vector, SpectralSample, f32) { let _ = (inc, nor, uv); // Not using these, silence warning - (Vector::new(0.0, 0.0, 0.0), SpectralSample::new(wavelength), 1.0) + ( + Vector::new(0.0, 0.0, 0.0), + SpectralSample::new(wavelength), + 1.0, + ) } fn evaluate(&self, inc: Vector, out: Vector, nor: Normal, wavelength: f32) -> SpectralSample { @@ -181,7 +203,13 @@ impl SurfaceClosure for EmitClosure { 1.0 } - fn estimate_eval_over_solid_angle(&self, inc: Vector, out: Vector, nor: Normal, cos_theta: f32) -> f32 { + fn estimate_eval_over_solid_angle( + &self, + inc: Vector, + out: Vector, + nor: Normal, + cos_theta: f32, + ) -> f32 { let _ = (inc, out, nor, cos_theta); // Not using these, silence warning // TODO: what to do here? @@ -207,13 +235,18 @@ impl SurfaceClosure for LambertClosure { false } - fn sample(&self, inc: Vector, nor: Normal, uv: (f32, f32), wavelength: f32) -> (Vector, SpectralSample, f32) { + fn sample( + &self, + inc: Vector, + nor: Normal, + uv: (f32, f32), + wavelength: f32, + ) -> (Vector, SpectralSample, f32) { let nn = if dot(nor.into_vector(), inc) <= 0.0 { - nor.normalized() - } else { - -nor.normalized() - } - .into_vector(); + nor.normalized() + } else { + -nor.normalized() + }.into_vector(); // Generate a random ray direction in the hemisphere // of the surface. @@ -228,11 +261,10 @@ impl SurfaceClosure for LambertClosure { fn evaluate(&self, inc: Vector, out: Vector, nor: Normal, wavelength: f32) -> SpectralSample { let v = out.normalized(); let nn = if dot(nor.into_vector(), inc) <= 0.0 { - nor.normalized() - } else { - -nor.normalized() - } - .into_vector(); + nor.normalized() + } else { + -nor.normalized() + }.into_vector(); let fac = dot(nn, v).max(0.0) * INV_PI; self.col.to_spectral_sample(wavelength) * fac @@ -241,16 +273,21 @@ impl SurfaceClosure for LambertClosure { fn sample_pdf(&self, inc: Vector, out: Vector, nor: Normal) -> f32 { let v = out.normalized(); let nn = if dot(nor.into_vector(), inc) <= 0.0 { - nor.normalized() - } else { - -nor.normalized() - } - .into_vector(); + nor.normalized() + } else { + -nor.normalized() + }.into_vector(); dot(nn, v).max(0.0) * INV_PI } - fn estimate_eval_over_solid_angle(&self, inc: Vector, out: Vector, nor: Normal, cos_theta: f32) -> f32 { + fn estimate_eval_over_solid_angle( + &self, + inc: Vector, + out: Vector, + nor: Normal, + cos_theta: f32, + ) -> f32 { assert!(cos_theta >= -1.0 && cos_theta <= 1.0); // Analytically calculates lambert shading from a uniform light source @@ -292,11 +329,10 @@ impl SurfaceClosure for LambertClosure { } else { let v = out.normalized(); let nn = if dot(nor.into_vector(), inc) <= 0.0 { - nor.normalized() - } else { - -nor.normalized() - } - .into_vector(); + nor.normalized() + } else { + -nor.normalized() + }.into_vector(); let cos_nv = dot(nn, v).max(-1.0).min(1.0); @@ -375,7 +411,9 @@ impl GTRClosure { let roughness2 = self.roughness * self.roughness; // Calculate top half of equation - let top = 1.0 - ((roughness2.powf(1.0 - self.tail_shape) * (1.0 - u)) + u).powf(1.0 / (1.0 - self.tail_shape)); + let top = 1.0 - + ((roughness2.powf(1.0 - self.tail_shape) * (1.0 - u)) + u) + .powf(1.0 / (1.0 - self.tail_shape)); // Calculate bottom half of equation let bottom = 1.0 - roughness2; @@ -408,14 +446,19 @@ impl SurfaceClosure for GTRClosure { } - fn sample(&self, inc: Vector, nor: Normal, uv: (f32, f32), wavelength: f32) -> (Vector, SpectralSample, f32) { + fn sample( + &self, + inc: Vector, + nor: Normal, + uv: (f32, f32), + wavelength: f32, + ) -> (Vector, SpectralSample, f32) { // Get normalized surface normal let nn = if dot(nor.into_vector(), inc) < 0.0 { - nor.normalized() - } else { - -nor.normalized() // If back-facing, flip normal - } - .into_vector(); + nor.normalized() + } else { + -nor.normalized() // If back-facing, flip normal + }.into_vector(); // Generate a random ray direction in the hemisphere // of the surface. @@ -441,11 +484,10 @@ impl SurfaceClosure for GTRClosure { // Surface normal let nn = if dot(nor.into_vector(), hh) < 0.0 { - -nor.normalized() // If back-facing, flip normal - } else { - nor.normalized() - } - .into_vector(); + -nor.normalized() // If back-facing, flip normal + } else { + nor.normalized() + }.into_vector(); // Calculate needed dot products let na = clamp(dot(nn, aa), -1.0, 1.0); @@ -538,11 +580,10 @@ impl SurfaceClosure for GTRClosure { // Surface normal let nn = if dot(nor.into_vector(), hh) < 0.0 { - -nor.normalized() // If back-facing, flip normal - } else { - nor.normalized() - } - .into_vector(); + -nor.normalized() // If back-facing, flip normal + } else { + nor.normalized() + }.into_vector(); // Calculate needed dot products let nh = clamp(dot(nn, hh), -1.0, 1.0); @@ -551,7 +592,13 @@ impl SurfaceClosure for GTRClosure { } - fn estimate_eval_over_solid_angle(&self, inc: Vector, out: Vector, nor: Normal, cos_theta: f32) -> f32 { + fn estimate_eval_over_solid_angle( + &self, + inc: Vector, + out: Vector, + nor: Normal, + cos_theta: f32, + ) -> f32 { // TODO: all of the stuff in this function is horribly hacky. // Find a proper way to approximate the light contribution from a // solid angle. @@ -560,11 +607,10 @@ impl SurfaceClosure for GTRClosure { // Surface normal let nn = if dot(nor.into_vector(), inc) < 0.0 { - nor.normalized() - } else { - -nor.normalized() // If back-facing, flip normal - } - .into_vector(); + nor.normalized() + } else { + -nor.normalized() // If back-facing, flip normal + }.into_vector(); let aa = -inc.normalized(); // Vector pointing to where "in" came from let bb = out.normalized(); // Out diff --git a/src/surface/mod.rs b/src/surface/mod.rs index 52666b9..7d80999 100644 --- a/src/surface/mod.rs +++ b/src/surface/mod.rs @@ -12,7 +12,13 @@ use shading::surface_closure::SurfaceClosureUnion; pub trait Surface: Boundable + Debug + Sync { - fn intersect_rays(&self, accel_rays: &mut [AccelRay], wrays: &[Ray], isects: &mut [SurfaceIntersection], space: &[Matrix4x4]); + fn intersect_rays( + &self, + accel_rays: &mut [AccelRay], + wrays: &[Ray], + isects: &mut [SurfaceIntersection], + space: &[Matrix4x4], + ); } diff --git a/src/surface/triangle_mesh.rs b/src/surface/triangle_mesh.rs index 7b8b1e6..aefe708 100644 --- a/src/surface/triangle_mesh.rs +++ b/src/surface/triangle_mesh.rs @@ -24,7 +24,11 @@ pub struct TriangleMesh<'a> { } impl<'a> TriangleMesh<'a> { - pub fn from_triangles<'b>(arena: &'b MemArena, time_samples: usize, triangles: Vec<(Point, Point, Point)>) -> TriangleMesh<'b> { + pub fn from_triangles<'b>( + arena: &'b MemArena, + time_samples: usize, + triangles: Vec<(Point, Point, Point)>, + ) -> TriangleMesh<'b> { assert!(triangles.len() % time_samples == 0); let mut indices: Vec = (0..(triangles.len() / time_samples)) @@ -41,12 +45,9 @@ impl<'a> TriangleMesh<'a> { bounds }; - let accel = BVH::from_objects( - arena, - &mut indices[..], - 3, - |tri_i| &bounds[*tri_i..(*tri_i + time_samples)], - ); + let accel = BVH::from_objects(arena, &mut indices[..], 3, |tri_i| { + &bounds[*tri_i..(*tri_i + time_samples)] + }); TriangleMesh { time_samples: time_samples, @@ -65,7 +66,13 @@ impl<'a> Boundable for TriangleMesh<'a> { impl<'a> Surface for TriangleMesh<'a> { - fn intersect_rays(&self, accel_rays: &mut [AccelRay], wrays: &[Ray], isects: &mut [SurfaceIntersection], space: &[Matrix4x4]) { + fn intersect_rays( + &self, + accel_rays: &mut [AccelRay], + wrays: &[Ray], + isects: &mut [SurfaceIntersection], + space: &[Matrix4x4], + ) { self.accel .traverse( &mut accel_rays[..], &self.indices, |tri_i, rs| { @@ -96,13 +103,17 @@ impl<'a> Surface for TriangleMesh<'a> { incoming: wr.dir, t: t, pos: wr.orig + (wr.dir * t), - nor: cross(tri.0 - tri.1, tri.0 - tri.2).into_normal(), // TODO - nor_g: cross(tri.0 - tri.1, tri.0 - tri.2).into_normal(), + nor: cross(tri.0 - tri.1, tri.0 - tri.2) + .into_normal(), // TODO + nor_g: cross(tri.0 - tri.1, tri.0 - tri.2) + .into_normal(), uv: (0.0, 0.0), // TODO local_space: mat_space, }, // TODO: get surface closure from surface shader. - closure: SurfaceClosureUnion::LambertClosure(LambertClosure::new(XYZ::new(0.8, 0.8, 0.8))), + closure: SurfaceClosureUnion::LambertClosure( + LambertClosure::new(XYZ::new(0.8, 0.8, 0.8)) + ), // closure: // SurfaceClosureUnion::GTRClosure( // GTRClosure::new(XYZ::new(0.8, 0.8, 0.8), diff --git a/src/tracer.rs b/src/tracer.rs index e8d2342..50623aa 100644 --- a/src/tracer.rs +++ b/src/tracer.rs @@ -29,12 +29,9 @@ impl<'a> Tracer<'a> { self.rays.clear(); self.rays.reserve(wrays.len()); let mut ids = 0..(wrays.len() as u32); - self.rays - .extend( - wrays - .iter() - .map(|wr| AccelRay::new(wr, ids.next().unwrap())) - ); + self.rays.extend(wrays.iter().map( + |wr| AccelRay::new(wr, ids.next().unwrap()), + )); return self.inner.trace(wrays, &mut self.rays[..]); } @@ -51,8 +48,12 @@ impl<'a> TracerInner<'a> { // Ready the isects self.isects.clear(); self.isects.reserve(wrays.len()); - self.isects - .extend(iter::repeat(SurfaceIntersection::Miss).take(wrays.len())); + self.isects.extend( + iter::repeat(SurfaceIntersection::Miss).take( + wrays + .len(), + ), + ); let mut ray_sets = split_rays_by_direction(&mut rays[..]); for ray_set in ray_sets.iter_mut().filter(|ray_set| ray_set.len() > 0) { @@ -62,93 +63,112 @@ impl<'a> TracerInner<'a> { return &self.isects; } - fn trace_assembly<'b>(&'b mut self, assembly: &Assembly, wrays: &[Ray], accel_rays: &mut [AccelRay]) { - assembly - .object_accel - .traverse( - &mut accel_rays[..], &assembly.instances[..], |inst, rs| { - // Transform rays if needed - if let Some((xstart, xend)) = inst.transform_indices { - // Push transforms to stack - self.xform_stack.push(&assembly.xforms[xstart..xend]); + fn trace_assembly<'b>( + &'b mut self, + assembly: &Assembly, + wrays: &[Ray], + accel_rays: &mut [AccelRay], + ) { + assembly.object_accel.traverse( + &mut accel_rays[..], + &assembly.instances[..], + |inst, rs| { + // Transform rays if needed + if let Some((xstart, xend)) = inst.transform_indices { + // Push transforms to stack + self.xform_stack.push(&assembly.xforms[xstart..xend]); - // Do transforms - let xforms = self.xform_stack.top(); - for ray in &mut rs[..] { - let id = ray.id; - let t = ray.time; - ray.update_from_xformed_world_ray(&wrays[id as usize], &lerp_slice(xforms, t)); - } + // Do transforms + let xforms = self.xform_stack.top(); + for ray in &mut rs[..] { + let id = ray.id; + let t = ray.time; + ray.update_from_xformed_world_ray( + &wrays[id as usize], + &lerp_slice(xforms, t), + ); } + } - // Trace rays - { - // This is kind of weird looking, but what we're doing here is - // splitting the rays up based on direction if they were - // transformed, and not splitting them up if they weren't - // transformed. - // But to keep the actual tracing code in one place (DRY), - // we map both cases to an array slice that contains slices of - // ray arrays. Gah... that's confusing even when explained. - // TODO: do this in a way that's less confusing. Probably split - // the tracing code out into a trace_instance() method or - // something. - let mut tmp = if let Some(_) = inst.transform_indices { - split_rays_by_direction(rs) - } else { - [ - &mut rs[..], - &mut [], - &mut [], - &mut [], - &mut [], - &mut [], - &mut [], - &mut [], - ] - }; - let mut ray_sets = if let Some(_) = inst.transform_indices { - &mut tmp[..] - } else { - &mut tmp[..1] - }; + // Trace rays + { + // This is kind of weird looking, but what we're doing here is + // splitting the rays up based on direction if they were + // transformed, and not splitting them up if they weren't + // transformed. + // But to keep the actual tracing code in one place (DRY), + // we map both cases to an array slice that contains slices of + // ray arrays. Gah... that's confusing even when explained. + // TODO: do this in a way that's less confusing. Probably split + // the tracing code out into a trace_instance() method or + // something. + let mut tmp = if let Some(_) = inst.transform_indices { + split_rays_by_direction(rs) + } else { + [ + &mut rs[..], + &mut [], + &mut [], + &mut [], + &mut [], + &mut [], + &mut [], + &mut [], + ] + }; + let mut ray_sets = if let Some(_) = inst.transform_indices { + &mut tmp[..] + } else { + &mut tmp[..1] + }; - // Loop through the split ray slices and trace them - for ray_set in ray_sets.iter_mut().filter(|ray_set| ray_set.len() > 0) { - match inst.instance_type { - InstanceType::Object => { - self.trace_object(&assembly.objects[inst.data_index], wrays, ray_set); - } - - InstanceType::Assembly => { - self.trace_assembly(&assembly.assemblies[inst.data_index], wrays, ray_set); - } + // Loop through the split ray slices and trace them + for ray_set in ray_sets.iter_mut().filter(|ray_set| ray_set.len() > 0) { + match inst.instance_type { + InstanceType::Object => { + self.trace_object( + &assembly.objects[inst.data_index], + wrays, + ray_set, + ); } - } - } - // Un-transform rays if needed - if let Some(_) = inst.transform_indices { - // Pop transforms off stack - self.xform_stack.pop(); - - // Undo transforms - let xforms = self.xform_stack.top(); - if xforms.len() > 0 { - for ray in &mut rs[..] { - let id = ray.id; - let t = ray.time; - ray.update_from_xformed_world_ray(&wrays[id as usize], &lerp_slice(xforms, t)); - } - } else { - for ray in &mut rs[..] { - let id = ray.id; - ray.update_from_world_ray(&wrays[id as usize]); + InstanceType::Assembly => { + self.trace_assembly( + &assembly.assemblies[inst.data_index], + wrays, + ray_set, + ); } } } } - ); + + // Un-transform rays if needed + if let Some(_) = inst.transform_indices { + // Pop transforms off stack + self.xform_stack.pop(); + + // Undo transforms + let xforms = self.xform_stack.top(); + if xforms.len() > 0 { + for ray in &mut rs[..] { + let id = ray.id; + let t = ray.time; + ray.update_from_xformed_world_ray( + &wrays[id as usize], + &lerp_slice(xforms, t), + ); + } + } else { + for ray in &mut rs[..] { + let id = ray.id; + ray.update_from_world_ray(&wrays[id as usize]); + } + } + } + }, + ); } fn trace_object<'b>(&'b mut self, obj: &Object, wrays: &[Ray], rays: &mut [AccelRay]) {