Run new rustfmt on codebase.

This commit is contained in:
Nathan Vegdahl 2018-03-04 13:00:55 -08:00
parent f39589ab72
commit 97d3304149
56 changed files with 719 additions and 837 deletions

8
Cargo.lock generated
View File

@ -1,7 +1,3 @@
[root]
name = "spectra_xyz"
version = "0.1.0"
[[package]] [[package]]
name = "ansi_term" name = "ansi_term"
version = "0.9.0" version = "0.9.0"
@ -207,6 +203,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
name = "sobol" name = "sobol"
version = "0.1.0" version = "0.1.0"
[[package]]
name = "spectra_xyz"
version = "0.1.0"
[[package]] [[package]]
name = "strsim" name = "strsim"
version = "0.6.0" version = "0.6.0"

View File

@ -15,7 +15,6 @@ use super::bvh_base::{BVHBase, BVHBaseNode, BVH_MAX_DEPTH};
use super::ACCEL_TRAV_TIME; use super::ACCEL_TRAV_TIME;
use super::ACCEL_NODE_RAY_TESTS; use super::ACCEL_NODE_RAY_TESTS;
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct BVH<'a> { pub struct BVH<'a> {
root: Option<&'a BVHNode<'a>>, root: Option<&'a BVHNode<'a>>,
@ -175,10 +174,8 @@ impl<'a> BVH<'a> {
} => { } => {
let mut node = unsafe { arena.alloc_uninitialized_with_alignment::<BVHNode>(32) }; let mut node = unsafe { arena.alloc_uninitialized_with_alignment::<BVHNode>(32) };
let bounds = arena.copy_slice_with_alignment( let bounds = arena
&base.bounds[bounds_range.0..bounds_range.1], .copy_slice_with_alignment(&base.bounds[bounds_range.0..bounds_range.1], 32);
32,
);
let child1 = BVH::construct_from_base(arena, base, children_indices.0); let child1 = BVH::construct_from_base(arena, base, children_indices.0);
let child2 = BVH::construct_from_base(arena, base, children_indices.1); let child2 = BVH::construct_from_base(arena, base, children_indices.1);
@ -219,20 +216,18 @@ impl<'a> Boundable for BVH<'a> {
fn bounds(&self) -> &[BBox] { fn bounds(&self) -> &[BBox] {
match self.root { match self.root {
None => &DEGENERATE_BOUNDS[..], None => &DEGENERATE_BOUNDS[..],
Some(root) => { Some(root) => match *root {
match *root { BVHNode::Internal {
BVHNode::Internal { bounds_start,
bounds_start, bounds_len,
bounds_len, ..
..
} |
BVHNode::Leaf {
bounds_start,
bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
} }
} | BVHNode::Leaf {
bounds_start,
bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
},
} }
} }
} }

View File

@ -11,12 +11,11 @@ use lerp::lerp_slice;
use ray::AccelRay; use ray::AccelRay;
use timer::Timer; use timer::Timer;
use bvh_order::{TRAVERSAL_TABLE, SplitAxes, calc_traversal_code}; use bvh_order::{calc_traversal_code, SplitAxes, TRAVERSAL_TABLE};
use super::bvh_base::{BVHBase, BVHBaseNode, BVH_MAX_DEPTH}; use super::bvh_base::{BVHBase, BVHBaseNode, BVH_MAX_DEPTH};
use super::ACCEL_TRAV_TIME; use super::ACCEL_TRAV_TIME;
use super::ACCEL_NODE_RAY_TESTS; use super::ACCEL_NODE_RAY_TESTS;
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct BVH4<'a> { pub struct BVH4<'a> {
root: Option<&'a BVH4Node<'a>>, root: Option<&'a BVH4Node<'a>>,
@ -88,8 +87,8 @@ impl<'a> BVH4<'a> {
rays[0].dir_inv.y() < 0.0, rays[0].dir_inv.y() < 0.0,
rays[0].dir_inv.z() < 0.0, rays[0].dir_inv.z() < 0.0,
]; ];
let ray_code = ray_sign_is_neg[0] as usize + ((ray_sign_is_neg[1] as usize) << 1) + let ray_code = ray_sign_is_neg[0] as usize + ((ray_sign_is_neg[1] as usize) << 1)
((ray_sign_is_neg[2] as usize) << 2); + ((ray_sign_is_neg[2] as usize) << 2);
&TRAVERSAL_TABLE[ray_code] &TRAVERSAL_TABLE[ray_code]
}; };
@ -271,10 +270,8 @@ impl<'a> BVH4<'a> {
} }
// Copy bounds // Copy bounds
let bounds = arena.copy_slice_with_alignment( let bounds = arena
&base.bounds[bounds_range.0..bounds_range.1], .copy_slice_with_alignment(&base.bounds[bounds_range.0..bounds_range.1], 32);
32,
);
// Build children // Build children
let mut children_mem = unsafe { let mut children_mem = unsafe {
@ -317,20 +314,18 @@ impl<'a> Boundable for BVH4<'a> {
fn bounds(&self) -> &[BBox] { fn bounds(&self) -> &[BBox] {
match self.root { match self.root {
None => &DEGENERATE_BOUNDS[..], None => &DEGENERATE_BOUNDS[..],
Some(root) => { Some(root) => match *root {
match *root { BVH4Node::Inner {
BVH4Node::Inner { bounds_start,
bounds_start, bounds_len,
bounds_len, ..
..
} |
BVH4Node::Leaf {
bounds_start,
bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
} }
} | BVH4Node::Leaf {
bounds_start,
bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
},
} }
} }
} }

View File

@ -5,8 +5,7 @@ use bbox::BBox;
use lerp::lerp_slice; use lerp::lerp_slice;
use math::log2_64; use math::log2_64;
use super::objects_split::{sah_split, median_split}; use super::objects_split::{median_split, sah_split};
pub const BVH_MAX_DEPTH: usize = 42; pub const BVH_MAX_DEPTH: usize = 42;
@ -41,8 +40,9 @@ pub enum BVHBaseNode {
impl BVHBaseNode { impl BVHBaseNode {
pub fn bounds_range(&self) -> (usize, usize) { pub fn bounds_range(&self) -> (usize, usize) {
match *self { match *self {
BVHBaseNode::Internal { bounds_range, .. } | BVHBaseNode::Internal { bounds_range, .. } | BVHBaseNode::Leaf { bounds_range, .. } => {
BVHBaseNode::Leaf { bounds_range, .. } => bounds_range, bounds_range
}
} }
} }
} }
@ -119,13 +119,13 @@ impl BVHBase {
// We make sure that it's worth having multiple time samples, and if not // We make sure that it's worth having multiple time samples, and if not
// we reduce to the union of the time samples. // we reduce to the union of the time samples.
self.acc_bounds(objects, bounder); self.acc_bounds(objects, bounder);
let union_bounds = self.bounds_cache.iter().fold( let union_bounds = self.bounds_cache
BBox::new(), .iter()
|b1, b2| (b1 | *b2), .fold(BBox::new(), |b1, b2| (b1 | *b2));
); let average_area = self.bounds_cache
let average_area = self.bounds_cache.iter().fold(0.0, |area, bb| { .iter()
area + bb.surface_area() .fold(0.0, |area, bb| area + bb.surface_area())
}) / self.bounds_cache.len() as f32; / self.bounds_cache.len() as f32;
if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) { if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) {
self.bounds.push(union_bounds); self.bounds.push(union_bounds);
} else { } else {
@ -195,8 +195,8 @@ impl BVHBase {
// We make sure that it's worth having multiple time samples, and if not // We make sure that it's worth having multiple time samples, and if not
// we reduce to the union of the time samples. // we reduce to the union of the time samples.
let union_bounds = merged.iter().fold(BBox::new(), |b1, b2| (b1 | *b2)); let union_bounds = merged.iter().fold(BBox::new(), |b1, b2| (b1 | *b2));
let average_area = merged.iter().fold(0.0, |area, bb| area + bb.surface_area()) / let average_area = merged.iter().fold(0.0, |area, bb| area + bb.surface_area())
merged.len() as f32; / merged.len() as f32;
if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) { if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) {
self.bounds.push(union_bounds); self.bounds.push(union_bounds);
} else { } else {
@ -204,7 +204,6 @@ impl BVHBase {
} }
} }
// Set node // Set node
self.nodes[me] = BVHBaseNode::Internal { self.nodes[me] = BVHBaseNode::Internal {
bounds_range: (bi, self.bounds.len()), bounds_range: (bi, self.bounds.len()),

View File

@ -1,7 +1,7 @@
use mem_arena::MemArena; use mem_arena::MemArena;
use bbox::BBox; use bbox::BBox;
use math::{Vector, Point, Normal}; use math::{Normal, Point, Vector};
use shading::surface_closure::SurfaceClosure; use shading::surface_closure::SurfaceClosure;
use super::LightAccel; use super::LightAccel;

View File

@ -3,18 +3,17 @@ use mem_arena::MemArena;
use algorithm::merge_slices_append; use algorithm::merge_slices_append;
use bbox::BBox; use bbox::BBox;
use lerp::lerp_slice; use lerp::lerp_slice;
use math::{Vector, Point, Normal}; use math::{Normal, Point, Vector};
use shading::surface_closure::SurfaceClosure; use shading::surface_closure::SurfaceClosure;
use super::LightAccel; use super::LightAccel;
use super::objects_split::sah_split; use super::objects_split::sah_split;
const ARITY_LOG2: usize = 3; // Determines how much to collapse the binary tree, const ARITY_LOG2: usize = 3; // Determines how much to collapse the binary tree,
// implicitly defining the light tree's arity. 1 = no collapsing, leave as binary // implicitly defining the light tree's arity. 1 = no collapsing, leave as binary
// tree. // tree.
const ARITY: usize = 1 << ARITY_LOG2; // Arity of the final tree const ARITY: usize = 1 << ARITY_LOG2; // Arity of the final tree
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct LightTree<'a> { pub struct LightTree<'a> {
root: Option<&'a Node<'a>>, root: Option<&'a Node<'a>>,
@ -38,15 +37,13 @@ enum Node<'a> {
impl<'a> Node<'a> { impl<'a> Node<'a> {
fn bounds(&self) -> &'a [BBox] { fn bounds(&self) -> &'a [BBox] {
match *self { match *self {
Node::Inner { bounds, .. } | Node::Inner { bounds, .. } | Node::Leaf { bounds, .. } => bounds,
Node::Leaf { bounds, .. } => bounds,
} }
} }
fn energy(&self) -> f32 { fn energy(&self) -> f32 {
match *self { match *self {
Node::Inner { energy, .. } | Node::Inner { energy, .. } | Node::Leaf { energy, .. } => energy,
Node::Leaf { energy, .. } => energy,
} }
} }
@ -127,7 +124,6 @@ impl<'a> LightTree<'a> {
} }
} }
impl<'a> LightAccel for LightTree<'a> { impl<'a> LightAccel for LightTree<'a> {
fn select( fn select(
&self, &self,
@ -210,7 +206,6 @@ impl<'a> LightAccel for LightTree<'a> {
} }
} }
struct LightTreeBuilder { struct LightTreeBuilder {
nodes: Vec<BuilderNode>, nodes: Vec<BuilderNode>,
bounds: Vec<BBox>, bounds: Vec<BBox>,

View File

@ -7,10 +7,10 @@ mod objects_split;
use std::cell::Cell; use std::cell::Cell;
use math::{Vector, Point, Normal}; use math::{Normal, Point, Vector};
use shading::surface_closure::SurfaceClosure; use shading::surface_closure::SurfaceClosure;
pub use self::bvh::{BVH, BVHNode}; pub use self::bvh::{BVHNode, BVH};
pub use self::bvh4::{BVH4, BVH4Node}; pub use self::bvh4::{BVH4, BVH4Node};
pub use self::light_tree::LightTree; pub use self::light_tree::LightTree;
pub use self::light_array::LightArray; pub use self::light_array::LightArray;

View File

@ -8,14 +8,12 @@ use halton;
use algorithm::{partition, quick_select}; use algorithm::{partition, quick_select};
use bbox::BBox; use bbox::BBox;
use lerp::lerp_slice; use lerp::lerp_slice;
use math::{Vector, dot}; use math::{dot, Vector};
use sampling::uniform_sample_hemisphere; use sampling::uniform_sample_hemisphere;
const SAH_BIN_COUNT: usize = 13; // Prime numbers work best, for some reason const SAH_BIN_COUNT: usize = 13; // Prime numbers work best, for some reason
const SPLIT_PLANE_COUNT: usize = 5; const SPLIT_PLANE_COUNT: usize = 5;
/// Takes a slice of boundable objects and partitions them based on the Surface /// Takes a slice of boundable objects and partitions them based on the Surface
/// Area Heuristic, but using arbitrarily oriented planes. /// Area Heuristic, but using arbitrarily oriented planes.
/// ///
@ -66,8 +64,8 @@ where
// Build SAH bins // Build SAH bins
let sah_bins = { let sah_bins = {
let mut sah_bins = [[(BBox::new(), BBox::new(), 0, 0); SAH_BIN_COUNT - 1]; let mut sah_bins =
SPLIT_PLANE_COUNT]; [[(BBox::new(), BBox::new(), 0, 0); SAH_BIN_COUNT - 1]; SPLIT_PLANE_COUNT];
for obj in objects.iter() { for obj in objects.iter() {
let tb = lerp_slice(bounder(obj), 0.5); let tb = lerp_slice(bounder(obj), 0.5);
let centroid = tb.center().into_vector(); let centroid = tb.center().into_vector();
@ -148,7 +146,6 @@ where
(split_i, approx_axis) (split_i, approx_axis)
} }
/// Takes a slice of boundable objects and partitions them based on the Surface /// Takes a slice of boundable objects and partitions them based on the Surface
/// Area Heuristic. /// Area Heuristic.
/// ///
@ -288,7 +285,6 @@ where
(split_i, split_axis) (split_i, split_axis)
} }
/// Takes a slice of boundable objects and partitions them based on the median heuristic. /// Takes a slice of boundable objects and partitions them based on the median heuristic.
/// ///
/// Returns the index of the partition boundary and the axis that it split on /// Returns the index of the partition boundary and the axis that it split on
@ -321,7 +317,11 @@ where
let place = { let place = {
let place = objects.len() / 2; let place = objects.len() / 2;
if place > 0 { place } else { 1 } if place > 0 {
place
} else {
1
}
}; };
quick_select(objects, place, |a, b| { quick_select(objects, place, |a, b| {
let tb_a = lerp_slice(bounder(a), 0.5); let tb_a = lerp_slice(bounder(a), 0.5);

View File

@ -5,8 +5,7 @@ use std::cmp;
use std::cmp::Ordering; use std::cmp::Ordering;
use hash::hash_u64; use hash::hash_u64;
use lerp::{Lerp, lerp_slice}; use lerp::{lerp_slice, Lerp};
/// Selects an item from a slice based on a weighting function and a /// Selects an item from a slice based on a weighting function and a
/// number (n) between 0.0 and 1.0. Returns the index of the selected /// number (n) between 0.0 and 1.0. Returns the index of the selected
@ -33,7 +32,6 @@ where
unreachable!() unreachable!()
} }
/// Partitions a slice in-place with the given unary predicate, returning /// Partitions a slice in-place with the given unary predicate, returning
/// the index of the first element for which the predicate evaluates /// the index of the first element for which the predicate evaluates
/// false. /// false.
@ -129,7 +127,6 @@ where
} }
} }
/// Partitions two slices in-place in concert based on the given unary /// Partitions two slices in-place in concert based on the given unary
/// predicate, returning the index of the first element for which the /// predicate, returning the index of the first element for which the
/// predicate evaluates false. /// predicate evaluates false.
@ -167,8 +164,7 @@ where
((a1 as usize) - start) / std::mem::size_of::<A>(), ((a1 as usize) - start) / std::mem::size_of::<A>(),
&mut *a1, &mut *a1,
&mut *a2, &mut *a2,
) ) {
{
break; break;
} }
a1 = a1.offset(1); a1 = a1.offset(1);
@ -185,8 +181,7 @@ where
((b1 as usize) - start) / std::mem::size_of::<A>(), ((b1 as usize) - start) / std::mem::size_of::<A>(),
&mut *b1, &mut *b1,
&mut *b2, &mut *b2,
) ) {
{
break; break;
} }
} }
@ -214,11 +209,10 @@ where
let i = left + (hash_u64(right as u64, seed) as usize % (right - left)); let i = left + (hash_u64(right as u64, seed) as usize % (right - left));
slc.swap(i, right - 1); slc.swap(i, right - 1);
let ii = left + let ii = left + {
{ let (val, list) = (&mut slc[left..right]).split_last_mut().unwrap();
let (val, list) = (&mut slc[left..right]).split_last_mut().unwrap(); partition(list, |n| order(n, val) == Ordering::Less)
partition(list, |n| order(n, val) == Ordering::Less) };
};
slc.swap(ii, right - 1); slc.swap(ii, right - 1);
if ii == n { if ii == n {
@ -276,12 +270,10 @@ where
if slice1.is_empty() || slice2.is_empty() { if slice1.is_empty() || slice2.is_empty() {
return; return;
} else if slice1.len() == slice2.len() { } else if slice1.len() == slice2.len() {
for (xfo, (xf1, xf2)) in for (xfo, (xf1, xf2)) in Iterator::zip(
Iterator::zip( slice_out.iter_mut(),
slice_out.iter_mut(), Iterator::zip(slice1.iter(), slice2.iter()),
Iterator::zip(slice1.iter(), slice2.iter()), ) {
)
{
*xfo = merge(xf1, xf2); *xfo = merge(xf1, xf2);
} }
} else if slice1.len() > slice2.len() { } else if slice1.len() > slice2.len() {
@ -305,12 +297,14 @@ mod tests {
use super::*; use super::*;
fn quick_select_ints(list: &mut [i32], i: usize) { fn quick_select_ints(list: &mut [i32], i: usize) {
quick_select(list, i, |a, b| if a < b { quick_select(list, i, |a, b| {
Ordering::Less if a < b {
} else if a == b { Ordering::Less
Ordering::Equal } else if a == b {
} else { Ordering::Equal
Ordering::Greater } else {
Ordering::Greater
}
}); });
} }

View File

@ -5,10 +5,9 @@ use std::iter::Iterator;
use std::ops::{BitOr, BitOrAssign}; use std::ops::{BitOr, BitOrAssign};
use lerp::{lerp, lerp_slice, Lerp}; use lerp::{lerp, lerp_slice, Lerp};
use math::{Point, Matrix4x4, fast_minf32}; use math::{Matrix4x4, Point, fast_minf32};
use ray::AccelRay; use ray::AccelRay;
const BBOX_MAXT_ADJUST: f32 = 1.00000024; const BBOX_MAXT_ADJUST: f32 = 1.00000024;
/// A 3D axis-aligned bounding box. /// A 3D axis-aligned bounding box.
@ -98,15 +97,18 @@ impl BBox {
} }
} }
/// Union of two `BBox`es. /// Union of two `BBox`es.
impl BitOr for BBox { impl BitOr for BBox {
type Output = BBox; type Output = BBox;
fn bitor(self, rhs: BBox) -> BBox { fn bitor(self, rhs: BBox) -> BBox {
BBox::from_points( BBox::from_points(
Point { co: self.min.co.v_min(rhs.min.co) }, Point {
Point { co: self.max.co.v_max(rhs.max.co) }, co: self.min.co.v_min(rhs.min.co),
},
Point {
co: self.max.co.v_max(rhs.max.co),
},
) )
} }
} }
@ -123,8 +125,12 @@ impl BitOr<Point> for BBox {
fn bitor(self, rhs: Point) -> BBox { fn bitor(self, rhs: Point) -> BBox {
BBox::from_points( BBox::from_points(
Point { co: self.min.co.v_min(rhs.co) }, Point {
Point { co: self.max.co.v_max(rhs.co) }, co: self.min.co.v_min(rhs.co),
},
Point {
co: self.max.co.v_max(rhs.co),
},
) )
} }
} }
@ -135,7 +141,6 @@ impl BitOrAssign<Point> for BBox {
} }
} }
impl Lerp for BBox { impl Lerp for BBox {
fn lerp(self, other: BBox, alpha: f32) -> BBox { fn lerp(self, other: BBox, alpha: f32) -> BBox {
BBox { BBox {
@ -145,7 +150,6 @@ impl Lerp for BBox {
} }
} }
pub fn transform_bbox_slice_from(bbs_in: &[BBox], xforms: &[Matrix4x4], bbs_out: &mut Vec<BBox>) { pub fn transform_bbox_slice_from(bbs_in: &[BBox], xforms: &[Matrix4x4], bbs_out: &mut Vec<BBox>) {
bbs_out.clear(); bbs_out.clear();

View File

@ -2,7 +2,6 @@
use bbox::BBox; use bbox::BBox;
pub trait Boundable { pub trait Boundable {
fn bounds(&self) -> &[BBox]; fn bounds(&self) -> &[BBox];
} }

View File

@ -3,11 +3,10 @@
use mem_arena::MemArena; use mem_arena::MemArena;
use lerp::lerp_slice; use lerp::lerp_slice;
use math::{Vector, Point, Matrix4x4}; use math::{Matrix4x4, Point, Vector};
use ray::Ray; use ray::Ray;
use sampling::square_to_circle; use sampling::square_to_circle;
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct Camera<'a> { pub struct Camera<'a> {
transforms: &'a [Matrix4x4], transforms: &'a [Matrix4x4],
@ -36,12 +35,12 @@ impl<'a> Camera<'a> {
if aperture_radii.is_empty() && !focus_distances.is_empty() { if aperture_radii.is_empty() && !focus_distances.is_empty() {
println!( println!(
"WARNING: camera has aperture radius but no focus distance. Disabling \ "WARNING: camera has aperture radius but no focus distance. Disabling \
focal blur." focal blur."
); );
} else if !aperture_radii.is_empty() && focus_distances.is_empty() { } else if !aperture_radii.is_empty() && focus_distances.is_empty() {
println!( println!(
"WARNING: camera has focus distance but no aperture radius. Disabling \ "WARNING: camera has focus distance but no aperture radius. Disabling \
focal blur." focal blur."
); );
} }
} }

View File

@ -1,4 +1,4 @@
use std::ops::{Add, AddAssign, Mul, MulAssign, Div, DivAssign}; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign};
use spectra_xyz::{spectrum_xyz_to_p, EQUAL_ENERGY_REFLECTANCE}; use spectra_xyz::{spectrum_xyz_to_p, EQUAL_ENERGY_REFLECTANCE};
@ -6,8 +6,7 @@ use float4::Float4;
use lerp::Lerp; use lerp::Lerp;
use math::fast_exp; use math::fast_exp;
pub use color_util::{xyz_to_rec709, xyz_to_rec709_e, rec709_to_xyz, rec709_e_to_xyz}; pub use color_util::{rec709_e_to_xyz, rec709_to_xyz, xyz_to_rec709, xyz_to_rec709_e};
// Minimum and maximum wavelength of light we care about, in nanometers // Minimum and maximum wavelength of light we care about, in nanometers
const WL_MIN: f32 = 380.0; const WL_MIN: f32 = 380.0;
@ -38,7 +37,11 @@ pub trait Color {
fn nth_wavelength(hero_wavelength: f32, n: usize) -> f32 { fn nth_wavelength(hero_wavelength: f32, n: usize) -> f32 {
let wl = hero_wavelength + (WL_RANGE_Q * n as f32); let wl = hero_wavelength + (WL_RANGE_Q * n as f32);
if wl > WL_MAX { wl - WL_RANGE } else { wl } if wl > WL_MAX {
wl - WL_RANGE
} else {
wl
}
} }
//---------------------------------------------------------------- //----------------------------------------------------------------
@ -78,7 +81,11 @@ impl SpectralSample {
/// Returns the nth wavelength /// Returns the nth wavelength
fn wl_n(&self, n: usize) -> f32 { fn wl_n(&self, n: usize) -> f32 {
let wl = self.hero_wavelength + (WL_RANGE_Q * n as f32); let wl = self.hero_wavelength + (WL_RANGE_Q * n as f32);
if wl > WL_MAX { wl - WL_RANGE } else { wl } if wl > WL_MAX {
wl - WL_RANGE
} else {
wl
}
} }
} }
@ -278,8 +285,8 @@ pub fn x_1931(wavelength: f32) -> f32 {
let t1 = (wavelength - 442.0) * (if wavelength < 442.0 { 0.0624 } else { 0.0374 }); let t1 = (wavelength - 442.0) * (if wavelength < 442.0 { 0.0624 } else { 0.0374 });
let t2 = (wavelength - 599.8) * (if wavelength < 599.8 { 0.0264 } else { 0.0323 }); let t2 = (wavelength - 599.8) * (if wavelength < 599.8 { 0.0264 } else { 0.0323 });
let t3 = (wavelength - 501.1) * (if wavelength < 501.1 { 0.0490 } else { 0.0382 }); let t3 = (wavelength - 501.1) * (if wavelength < 501.1 { 0.0490 } else { 0.0382 });
(0.362 * fast_exp(-0.5 * t1 * t1)) + (1.056 * fast_exp(-0.5 * t2 * t2)) - (0.362 * fast_exp(-0.5 * t1 * t1)) + (1.056 * fast_exp(-0.5 * t2 * t2))
(0.065 * fast_exp(-0.5 * t3 * t3)) - (0.065 * fast_exp(-0.5 * t3 * t3))
} }
pub fn y_1931(wavelength: f32) -> f32 { pub fn y_1931(wavelength: f32) -> f32 {

View File

@ -3,7 +3,7 @@
//! This is based on the work in section 3.9 of "Physically Based Rendering: //! This is based on the work in section 3.9 of "Physically Based Rendering:
//! From Theory to Implementation" 3rd edition by Pharr et al. //! From Theory to Implementation" 3rd edition by Pharr et al.
use math::{Point, Vector, Normal, dot}; use math::{dot, Normal, Point, Vector};
#[inline(always)] #[inline(always)]
pub fn fp_gamma(n: u32) -> f32 { pub fn fp_gamma(n: u32) -> f32 {
@ -12,7 +12,6 @@ pub fn fp_gamma(n: u32) -> f32 {
(e * n as f32) / (1.0 - (e * n as f32)) (e * n as f32) / (1.0 - (e * n as f32))
} }
pub fn increment_ulp(v: f32) -> f32 { pub fn increment_ulp(v: f32) -> f32 {
// Handle special cases // Handle special cases
if (v.is_infinite() && v > 0.0) || v.is_nan() { if (v.is_infinite() && v > 0.0) || v.is_nan() {
@ -30,7 +29,6 @@ pub fn increment_ulp(v: f32) -> f32 {
} }
} }
pub fn decrement_ulp(v: f32) -> f32 { pub fn decrement_ulp(v: f32) -> f32 {
// Handle special cases // Handle special cases
if (v.is_infinite() && v < 0.0) || v.is_nan() { if (v.is_infinite() && v < 0.0) || v.is_nan() {
@ -53,7 +51,11 @@ pub fn robust_ray_origin(pos: Point, pos_err: f32, nor: Normal, ray_dir: Vector)
// direction as ray_dir. // direction as ray_dir.
let nor = { let nor = {
let nor = nor.into_vector(); let nor = nor.into_vector();
if dot(nor, ray_dir) >= 0.0 { nor } else { -nor } if dot(nor, ray_dir) >= 0.0 {
nor
} else {
-nor
}
}; };
// Calculate offset point // Calculate offset point
@ -83,7 +85,6 @@ pub fn robust_ray_origin(pos: Point, pos_err: f32, nor: Normal, ray_dir: Vector)
Point::new(x, y, z) Point::new(x, y, z)
} }
#[inline(always)] #[inline(always)]
fn f32_to_bits(v: f32) -> u32 { fn f32_to_bits(v: f32) -> u32 {
use std::mem::transmute_copy; use std::mem::transmute_copy;
@ -96,7 +97,6 @@ fn bits_to_f32(bits: u32) -> f32 {
unsafe { transmute_copy::<u32, f32>(&bits) } unsafe { transmute_copy::<u32, f32>(&bits) }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -39,7 +39,6 @@ pub fn xy2d(x: u32, y: u32) -> u32 {
d d
} }
/// Convert hilbert curve index to (x,y). /// Convert hilbert curve index to (x,y).
/// ///
/// d: The hilbert curve index. /// d: The hilbert curve index.

View File

@ -14,8 +14,7 @@ use half::f16;
use png_encode_mini; use png_encode_mini;
use openexr; use openexr;
use color::{XYZ, xyz_to_rec709_e}; use color::{xyz_to_rec709_e, XYZ};
#[derive(Debug)] #[derive(Debug)]
pub struct Image { pub struct Image {
@ -260,8 +259,8 @@ impl<'a> Drop for Bucket<'a> {
// Find matching bucket and remove it // Find matching bucket and remove it
let i = bucket_list.iter().position(|bucket| { let i = bucket_list.iter().position(|bucket| {
(bucket.0).0 == self.min.0 && (bucket.0).1 == self.min.1 && (bucket.0).0 == self.min.0 && (bucket.0).1 == self.min.1 && (bucket.1).0 == self.max.0
(bucket.1).0 == self.max.0 && (bucket.1).1 == self.max.1 && (bucket.1).1 == self.max.1
}); });
bucket_list.swap_remove(i.unwrap()); bucket_list.swap_remove(i.unwrap());
} }

View File

@ -8,7 +8,6 @@ pub trait Lerp {
fn lerp(self, other: Self, alpha: f32) -> Self; fn lerp(self, other: Self, alpha: f32) -> Self;
} }
/// Interpolates between two instances of a Lerp types. /// Interpolates between two instances of a Lerp types.
pub fn lerp<T: Lerp>(a: T, b: T, alpha: f32) -> T { pub fn lerp<T: Lerp>(a: T, b: T, alpha: f32) -> T {
debug_assert!(alpha >= 0.0); debug_assert!(alpha >= 0.0);
@ -17,7 +16,6 @@ pub fn lerp<T: Lerp>(a: T, b: T, alpha: f32) -> T {
a.lerp(b, alpha) a.lerp(b, alpha)
} }
/// Interpolates a slice of data as if each adjecent pair of elements /// Interpolates a slice of data as if each adjecent pair of elements
/// represent a linear segment. /// represent a linear segment.
pub fn lerp_slice<T: Lerp + Copy>(s: &[T], alpha: f32) -> T { pub fn lerp_slice<T: Lerp + Copy>(s: &[T], alpha: f32) -> T {
@ -58,7 +56,6 @@ where
} }
} }
impl Lerp for f32 { impl Lerp for f32 {
fn lerp(self, other: f32, alpha: f32) -> f32 { fn lerp(self, other: f32, alpha: f32) -> f32 {
(self * (1.0 - alpha)) + (other * alpha) (self * (1.0 - alpha)) + (other * alpha)
@ -103,23 +100,22 @@ impl Lerp for Normal {
} }
} }
impl Lerp for Point { impl Lerp for Point {
fn lerp(self, other: Point, alpha: f32) -> Point { fn lerp(self, other: Point, alpha: f32) -> Point {
let s = self.norm(); let s = self.norm();
let o = other.norm(); let o = other.norm();
Point { co: (s.co * (1.0 - alpha)) + (o.co * alpha) } Point {
co: (s.co * (1.0 - alpha)) + (o.co * alpha),
}
} }
} }
impl Lerp for Vector { impl Lerp for Vector {
fn lerp(self, other: Vector, alpha: f32) -> Vector { fn lerp(self, other: Vector, alpha: f32) -> Vector {
(self * (1.0 - alpha)) + (other * alpha) (self * (1.0 - alpha)) + (other * alpha)
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -2,9 +2,9 @@ use std::f64::consts::PI as PI_64;
use mem_arena::MemArena; use mem_arena::MemArena;
use color::{XYZ, SpectralSample, Color}; use color::{Color, SpectralSample, XYZ};
use lerp::lerp_slice; use lerp::lerp_slice;
use math::{Vector, coordinate_system_from_vector}; use math::{coordinate_system_from_vector, Vector};
use sampling::{uniform_sample_cone, uniform_sample_cone_pdf}; use sampling::{uniform_sample_cone, uniform_sample_cone_pdf};
use super::WorldLightSource; use super::WorldLightSource;
@ -87,10 +87,10 @@ impl<'a> WorldLightSource for DistantDiskLight<'a> {
} }
fn approximate_energy(&self) -> f32 { fn approximate_energy(&self) -> f32 {
let color: XYZ = self.colors.iter().fold( let color: XYZ = self.colors
XYZ::new(0.0, 0.0, 0.0), .iter()
|a, &b| a + b, .fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b)
) / self.colors.len() as f32; / self.colors.len() as f32;
color.y color.y
} }
} }

View File

@ -5,14 +5,13 @@ mod sphere_light;
use std::fmt::Debug; use std::fmt::Debug;
use color::SpectralSample; use color::SpectralSample;
use math::{Vector, Normal, Point, Matrix4x4}; use math::{Matrix4x4, Normal, Point, Vector};
use surface::Surface; use surface::Surface;
pub use self::distant_disk_light::DistantDiskLight; pub use self::distant_disk_light::DistantDiskLight;
pub use self::rectangle_light::RectangleLight; pub use self::rectangle_light::RectangleLight;
pub use self::sphere_light::SphereLight; pub use self::sphere_light::SphereLight;
/// A finite light source that can be bounded in space. /// A finite light source that can be bounded in space.
pub trait SurfaceLight: Surface { pub trait SurfaceLight: Surface {
/// Samples the surface given a point to be illuminated. /// Samples the surface given a point to be illuminated.
@ -40,7 +39,6 @@ pub trait SurfaceLight: Surface {
time: f32, time: f32,
) -> (SpectralSample, (Point, Normal, f32), f32); ) -> (SpectralSample, (Point, Normal, f32), f32);
/// Returns whether the light has a delta distribution. /// Returns whether the light has a delta distribution.
/// ///
/// If a light has no chance of a ray hitting it through random process /// If a light has no chance of a ray hitting it through random process
@ -48,7 +46,6 @@ pub trait SurfaceLight: Surface {
/// lights that only emit in a single direction, etc. /// lights that only emit in a single direction, etc.
fn is_delta(&self) -> bool; fn is_delta(&self) -> bool;
/// Returns an approximation of the total energy emitted by the surface. /// Returns an approximation of the total energy emitted by the surface.
/// ///
/// Note: this does not need to be exact, but it does need to be non-zero /// Note: this does not need to be exact, but it does need to be non-zero
@ -57,7 +54,6 @@ pub trait SurfaceLight: Surface {
fn approximate_energy(&self) -> f32; fn approximate_energy(&self) -> f32;
} }
/// An infinite light source that cannot be bounded in space. E.g. /// An infinite light source that cannot be bounded in space. E.g.
/// a sun light source. /// a sun light source.
pub trait WorldLightSource: Debug + Sync { pub trait WorldLightSource: Debug + Sync {
@ -78,7 +74,6 @@ pub trait WorldLightSource: Debug + Sync {
time: f32, time: f32,
) -> (SpectralSample, Vector, f32); ) -> (SpectralSample, Vector, f32);
/// Returns whether the light has a delta distribution. /// Returns whether the light has a delta distribution.
/// ///
/// If a light has no chance of a ray hitting it through random process /// If a light has no chance of a ray hitting it through random process
@ -86,7 +81,6 @@ pub trait WorldLightSource: Debug + Sync {
/// lights that only emit in a single direction, etc. /// lights that only emit in a single direction, etc.
fn is_delta(&self) -> bool; fn is_delta(&self) -> bool;
/// Returns an approximation of the total energy emitted by the light /// Returns an approximation of the total energy emitted by the light
/// source. /// source.
/// ///

View File

@ -2,22 +2,20 @@ use mem_arena::MemArena;
use bbox::BBox; use bbox::BBox;
use boundable::Boundable; use boundable::Boundable;
use color::{XYZ, SpectralSample, Color}; use color::{Color, SpectralSample, XYZ};
use lerp::lerp_slice; use lerp::lerp_slice;
use math::{Vector, Normal, Point, Matrix4x4, cross, dot}; use math::{cross, dot, Matrix4x4, Normal, Point, Vector};
use ray::{Ray, AccelRay}; use ray::{AccelRay, Ray};
use sampling::{spherical_triangle_solid_angle, uniform_sample_spherical_triangle, use sampling::{spherical_triangle_solid_angle, triangle_surface_area,
triangle_surface_area, uniform_sample_triangle}; uniform_sample_spherical_triangle, uniform_sample_triangle};
use shading::surface_closure::{SurfaceClosureUnion, EmitClosure}; use shading::surface_closure::{EmitClosure, SurfaceClosureUnion};
use shading::SurfaceShader; use shading::SurfaceShader;
use surface::{Surface, SurfaceIntersection, SurfaceIntersectionData, triangle}; use surface::{triangle, Surface, SurfaceIntersection, SurfaceIntersectionData};
use super::SurfaceLight; use super::SurfaceLight;
const SIMPLE_SAMPLING_THRESHOLD: f32 = 0.01; const SIMPLE_SAMPLING_THRESHOLD: f32 = 0.01;
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct RectangleLight<'a> { pub struct RectangleLight<'a> {
dimensions: &'a [(f32, f32)], dimensions: &'a [(f32, f32)],
@ -33,11 +31,9 @@ impl<'a> RectangleLight<'a> {
) -> RectangleLight<'b> { ) -> RectangleLight<'b> {
let bbs: Vec<_> = dimensions let bbs: Vec<_> = dimensions
.iter() .iter()
.map(|d| { .map(|d| BBox {
BBox { min: Point::new(d.0 * -0.5, d.1 * -0.5, 0.0),
min: Point::new(d.0 * -0.5, d.1 * -0.5, 0.0), max: Point::new(d.0 * 0.5, d.1 * 0.5, 0.0),
max: Point::new(d.0 * 0.5, d.1 * 0.5, 0.0),
}
}) })
.collect(); .collect();
RectangleLight { RectangleLight {
@ -87,9 +83,9 @@ impl<'a> RectangleLight<'a> {
// PDF // PDF
if (area_1 + area_2) < SIMPLE_SAMPLING_THRESHOLD { if (area_1 + area_2) < SIMPLE_SAMPLING_THRESHOLD {
let area = triangle_surface_area(p2, p1, p3) + triangle_surface_area(p4, p1, p3); let area = triangle_surface_area(p2, p1, p3) + triangle_surface_area(p4, p1, p3);
(hit_point - arr).length2() / (hit_point - arr).length2()
dot(sample_dir.normalized(), normal.into_vector().normalized()).abs() / / dot(sample_dir.normalized(), normal.into_vector().normalized()).abs()
area / area
} else { } else {
1.0 / (area_1 + area_2) 1.0 / (area_1 + area_2)
} }
@ -188,9 +184,9 @@ impl<'a> SurfaceLight for RectangleLight<'a> {
let shadow_vec = sample_point - arr; let shadow_vec = sample_point - arr;
let spectral_sample = let spectral_sample =
(col * surface_area_inv as f32 * 0.5).to_spectral_sample(wavelength); (col * surface_area_inv as f32 * 0.5).to_spectral_sample(wavelength);
let pdf = (sample_point - arr).length2() / let pdf = (sample_point - arr).length2()
dot(shadow_vec.normalized(), normal.into_vector().normalized()).abs() / / dot(shadow_vec.normalized(), normal.into_vector().normalized()).abs()
(surface_area_1 + surface_area_2); / (surface_area_1 + surface_area_2);
let point_err = 0.0001; // TODO: this is a hack, do properly. let point_err = 0.0001; // TODO: this is a hack, do properly.
(spectral_sample, (sample_point, normal, point_err), pdf) (spectral_sample, (sample_point, normal, point_err), pdf)
} else { } else {
@ -246,15 +242,14 @@ impl<'a> SurfaceLight for RectangleLight<'a> {
} }
fn approximate_energy(&self) -> f32 { fn approximate_energy(&self) -> f32 {
let color: XYZ = self.colors.iter().fold( let color: XYZ = self.colors
XYZ::new(0.0, 0.0, 0.0), .iter()
|a, &b| a + b, .fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b)
) / self.colors.len() as f32; / self.colors.len() as f32;
color.y color.y
} }
} }
impl<'a> Surface for RectangleLight<'a> { impl<'a> Surface for RectangleLight<'a> {
fn intersect_rays( fn intersect_rays(
&self, &self,
@ -313,9 +308,9 @@ impl<'a> Surface for RectangleLight<'a> {
let closure = { let closure = {
let inv_surface_area = (1.0 / (dim.0 as f64 * dim.1 as f64)) as f32; let inv_surface_area = (1.0 / (dim.0 as f64 * dim.1 as f64)) as f32;
let color = lerp_slice(self.colors, r.time).to_spectral_sample( let color = lerp_slice(self.colors, r.time)
wr.wavelength, .to_spectral_sample(wr.wavelength)
) * inv_surface_area; * inv_surface_area;
SurfaceClosureUnion::EmitClosure(EmitClosure::new(color)) SurfaceClosureUnion::EmitClosure(EmitClosure::new(color))
}; };

View File

@ -4,12 +4,12 @@ use mem_arena::MemArena;
use bbox::BBox; use bbox::BBox;
use boundable::Boundable; use boundable::Boundable;
use color::{XYZ, SpectralSample, Color}; use color::{Color, SpectralSample, XYZ};
use lerp::lerp_slice; use lerp::lerp_slice;
use math::{Vector, Normal, Point, Matrix4x4, dot, coordinate_system_from_vector}; use math::{coordinate_system_from_vector, dot, Matrix4x4, Normal, Point, Vector};
use ray::{Ray, AccelRay}; use ray::{AccelRay, Ray};
use sampling::{uniform_sample_cone, uniform_sample_cone_pdf, uniform_sample_sphere}; use sampling::{uniform_sample_cone, uniform_sample_cone_pdf, uniform_sample_sphere};
use shading::surface_closure::{SurfaceClosureUnion, EmitClosure}; use shading::surface_closure::{EmitClosure, SurfaceClosureUnion};
use shading::SurfaceShader; use shading::SurfaceShader;
use surface::{Surface, SurfaceIntersection, SurfaceIntersectionData}; use surface::{Surface, SurfaceIntersection, SurfaceIntersectionData};
@ -32,11 +32,9 @@ impl<'a> SphereLight<'a> {
pub fn new<'b>(arena: &'b MemArena, radii: Vec<f32>, colors: Vec<XYZ>) -> SphereLight<'b> { pub fn new<'b>(arena: &'b MemArena, radii: Vec<f32>, colors: Vec<XYZ>) -> SphereLight<'b> {
let bbs: Vec<_> = radii let bbs: Vec<_> = radii
.iter() .iter()
.map(|r| { .map(|r| BBox {
BBox { min: Point::new(-*r, -*r, -*r),
min: Point::new(-*r, -*r, -*r), max: Point::new(*r, *r, *r),
max: Point::new(*r, *r, *r),
}
}) })
.collect(); .collect();
SphereLight { SphereLight {
@ -81,7 +79,6 @@ impl<'a> SphereLight<'a> {
} }
} }
impl<'a> SurfaceLight for SphereLight<'a> { impl<'a> SurfaceLight for SphereLight<'a> {
fn sample_from_point( fn sample_from_point(
&self, &self,
@ -197,15 +194,14 @@ impl<'a> SurfaceLight for SphereLight<'a> {
} }
fn approximate_energy(&self) -> f32 { fn approximate_energy(&self) -> f32 {
let color: XYZ = self.colors.iter().fold( let color: XYZ = self.colors
XYZ::new(0.0, 0.0, 0.0), .iter()
|a, &b| a + b, .fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b)
) / self.colors.len() as f32; / self.colors.len() as f32;
color.y color.y
} }
} }
impl<'a> Surface for SphereLight<'a> { impl<'a> Surface for SphereLight<'a> {
fn intersect_rays( fn intersect_rays(
&self, &self,
@ -260,7 +256,8 @@ impl<'a> Surface for SphereLight<'a> {
// Get our final parametric values // Get our final parametric values
let mut t0 = q / a; let mut t0 = q / a;
let mut t1 = if q != 0.0 { c / q } else { r.max_t }; let mut t1 =
if q != 0.0 { c / q } else { r.max_t };
// Swap them so they are ordered right // Swap them so they are ordered right
if t0 > t1 { if t0 > t1 {
@ -323,11 +320,10 @@ impl<'a> Surface for SphereLight<'a> {
}; };
let closure = { let closure = {
let inv_surface_area = (1.0 / (4.0 * PI_64 * radius as f64 * radius as f64)) as let inv_surface_area =
f32; (1.0 / (4.0 * PI_64 * radius as f64 * radius as f64)) as f32;
let color = lerp_slice(self.colors, r.time).to_spectral_sample( let color = lerp_slice(self.colors, r.time).to_spectral_sample(wr.wavelength)
wr.wavelength, * inv_surface_area;
) * inv_surface_area;
SurfaceClosureUnion::EmitClosure(EmitClosure::new(color)) SurfaceClosureUnion::EmitClosure(EmitClosure::new(color))
}; };
@ -344,7 +340,6 @@ impl<'a> Surface for SphereLight<'a> {
} }
} }
impl<'a> Boundable for SphereLight<'a> { impl<'a> Boundable for SphereLight<'a> {
fn bounds(&self) -> &[BBox] { fn bounds(&self) -> &[BBox] {
self.bounds_ self.bounds_

View File

@ -68,16 +68,13 @@ use clap::{App, Arg};
use mem_arena::MemArena; use mem_arena::MemArena;
use parse::{parse_scene, DataTree}; use parse::{parse_scene, DataTree};
use ray::{Ray, AccelRay}; use ray::{AccelRay, Ray};
use surface::SurfaceIntersection; use surface::SurfaceIntersection;
use renderer::LightPath; use renderer::LightPath;
use bbox::BBox; use bbox::BBox;
use accel::{BVHNode, BVH4Node}; use accel::{BVH4Node, BVHNode};
use timer::Timer; use timer::Timer;
const VERSION: &'static str = env!("CARGO_PKG_VERSION"); const VERSION: &'static str = env!("CARGO_PKG_VERSION");
fn main() { fn main() {
@ -104,10 +101,9 @@ fn main() {
.help("Number of samples per pixel") .help("Number of samples per pixel")
.takes_value(true) .takes_value(true)
.validator(|s| { .validator(|s| {
usize::from_str(&s).and(Ok(())).or(Err( usize::from_str(&s)
"must be an integer" .and(Ok(()))
.to_string(), .or(Err("must be an integer".to_string()))
))
}), }),
) )
.arg( .arg(
@ -115,15 +111,12 @@ fn main() {
.short("b") .short("b")
.long("spb") .long("spb")
.value_name("N") .value_name("N")
.help( .help("Target number of samples per bucket (determines bucket size)")
"Target number of samples per bucket (determines bucket size)",
)
.takes_value(true) .takes_value(true)
.validator(|s| { .validator(|s| {
usize::from_str(&s).and(Ok(())).or(Err( usize::from_str(&s)
"must be an integer" .and(Ok(()))
.to_string(), .or(Err("must be an integer".to_string()))
))
}), }),
) )
.arg( .arg(
@ -132,15 +125,14 @@ fn main() {
.value_name("X1 Y1 X2 Y2") .value_name("X1 Y1 X2 Y2")
.help( .help(
"Only render the image between pixel coordinates (X1, Y1) \ "Only render the image between pixel coordinates (X1, Y1) \
and (X2, Y2). Coordinates are zero-indexed and inclusive.", and (X2, Y2). Coordinates are zero-indexed and inclusive.",
) )
.takes_value(true) .takes_value(true)
.number_of_values(4) .number_of_values(4)
.validator(|s| { .validator(|s| {
usize::from_str(&s).and(Ok(())).or(Err( usize::from_str(&s)
"must be four integers" .and(Ok(()))
.to_string(), .or(Err("must be four integers".to_string()))
))
}), }),
) )
.arg( .arg(
@ -150,22 +142,25 @@ fn main() {
.value_name("N") .value_name("N")
.help( .help(
"Number of threads to render with. Defaults to the number of logical \ "Number of threads to render with. Defaults to the number of logical \
cores on the system.", cores on the system.",
) )
.takes_value(true) .takes_value(true)
.validator(|s| { .validator(|s| {
usize::from_str(&s).and(Ok(())).or(Err( usize::from_str(&s)
"must be an integer" .and(Ok(()))
.to_string(), .or(Err("must be an integer".to_string()))
))
}), }),
) )
.arg(Arg::with_name("stats").long("stats").help( .arg(
"Print additional statistics about rendering", Arg::with_name("stats")
)) .long("stats")
.arg(Arg::with_name("dev").long("dev").help( .help("Print additional statistics about rendering"),
"Show useful dev/debug info.", )
)) .arg(
Arg::with_name("dev")
.long("dev")
.help("Show useful dev/debug info."),
)
.arg( .arg(
Arg::with_name("serialized_output") Arg::with_name("serialized_output")
.long("serialized_output") .long("serialized_output")
@ -213,9 +208,7 @@ fn main() {
// Parse data tree of scene file // Parse data tree of scene file
if !args.is_present("serialized_output") { if !args.is_present("serialized_output") {
println!( println!("Parsing scene file...",);
"Parsing scene file...",
);
} }
t.tick(); t.tick();
let psy_contents = if args.is_present("use_stdin") { let psy_contents = if args.is_present("use_stdin") {
@ -225,9 +218,9 @@ fn main() {
let mut stdin = tmp.lock(); let mut stdin = tmp.lock();
let mut buf = vec![0u8; 4096]; let mut buf = vec![0u8; 4096];
loop { loop {
let count = stdin.read(&mut buf).expect( let count = stdin
"Unexpected end of scene input.", .read(&mut buf)
); .expect("Unexpected end of scene input.");
let start = if input.len() < 11 { let start = if input.len() < 11 {
0 0
} else { } else {
@ -238,8 +231,7 @@ fn main() {
let mut done = false; let mut done = false;
let mut trunc_len = 0; let mut trunc_len = 0;
if let nom::IResult::Done(remaining, _) = if let nom::IResult::Done(remaining, _) = take_until!(&input[start..end], "__PSY_EOF__")
take_until!(&input[start..end], "__PSY_EOF__")
{ {
done = true; done = true;
trunc_len = input.len() - remaining.len(); trunc_len = input.len() - remaining.len();
@ -344,9 +336,9 @@ fn main() {
if !args.is_present("serialized_output") { if !args.is_present("serialized_output") {
println!("Writing image to disk into '{}'...", r.output_file); println!("Writing image to disk into '{}'...", r.output_file);
if r.output_file.ends_with(".png") { if r.output_file.ends_with(".png") {
image.write_png(Path::new(&r.output_file)).expect( image
"Failed to write png...", .write_png(Path::new(&r.output_file))
); .expect("Failed to write png...");
} else if r.output_file.ends_with(".exr") { } else if r.output_file.ends_with(".exr") {
image.write_exr(Path::new(&r.output_file)); image.write_exr(Path::new(&r.output_file));
} else { } else {

View File

@ -2,8 +2,7 @@
use std::f32; use std::f32;
pub use math3d::{Matrix4x4, Normal, Point, Vector, DotProduct, dot, CrossProduct, cross}; pub use math3d::{cross, dot, CrossProduct, DotProduct, Matrix4x4, Normal, Point, Vector};
/// Clamps a value between a min and max. /// Clamps a value between a min and max.
pub fn clamp<T: PartialOrd>(v: T, lower: T, upper: T) -> T { pub fn clamp<T: PartialOrd>(v: T, lower: T, upper: T) -> T {
@ -18,12 +17,20 @@ pub fn clamp<T: PartialOrd>(v: T, lower: T, upper: T) -> T {
// The stdlib min function is slower than a simple if statement for some reason. // The stdlib min function is slower than a simple if statement for some reason.
pub fn fast_minf32(a: f32, b: f32) -> f32 { pub fn fast_minf32(a: f32, b: f32) -> f32 {
if a < b { a } else { b } if a < b {
a
} else {
b
}
} }
// The stdlib max function is slower than a simple if statement for some reason. // The stdlib max function is slower than a simple if statement for some reason.
pub fn fast_maxf32(a: f32, b: f32) -> f32 { pub fn fast_maxf32(a: f32, b: f32) -> f32 {
if a > b { a } else { b } if a > b {
a
} else {
b
}
} }
/// Rounds an integer up to the next power of two. /// Rounds an integer up to the next power of two.
@ -119,7 +126,6 @@ pub fn fast_logit(p: f32, width: f32) -> f32 {
fast_ln(n / (1.0 - n)) * width * (0.6266 / 4.0) fast_ln(n / (1.0 - n)) * width * (0.6266 / 4.0)
} }
//---------------------------------------------------------------- //----------------------------------------------------------------
// Adapted to Rust from https://code.google.com/archive/p/fastapprox/ // Adapted to Rust from https://code.google.com/archive/p/fastapprox/
@ -139,9 +145,9 @@ pub fn fast_pow2(p: f32) -> f32 {
let w: i32 = clipp as i32; let w: i32 = clipp as i32;
let z: f32 = clipp - w as f32 + offset; let z: f32 = clipp - w as f32 + offset;
let i: u32 = ((1 << 23) as f32 * let i: u32 = ((1 << 23) as f32
(clipp + 121.2740575 + 27.7280233 / (4.84252568 - z) - 1.49012907 * z)) as * (clipp + 121.2740575 + 27.7280233 / (4.84252568 - z) - 1.49012907 * z))
u32; as u32;
unsafe { transmute_copy::<u32, f32>(&i) } unsafe { transmute_copy::<u32, f32>(&i) }
} }
@ -177,11 +183,9 @@ pub fn faster_exp(p: f32) -> f32 {
faster_pow2(f32::consts::LOG2_E * p) faster_pow2(f32::consts::LOG2_E * p)
} }
// End of adapted code // End of adapted code
//---------------------------------------------------------------- //----------------------------------------------------------------
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -3,10 +3,9 @@
use std::str; use std::str;
use nom::{IResult, Needed, digit, multispace}; use nom::{digit, multispace, IResult, Needed};
use nom::IResult::*; use nom::IResult::*;
// Parsers for numbers surrounded by whitespace // Parsers for numbers surrounded by whitespace
named!(pub ws_u32<u32>, delimited!(opt!(multispace), u32_utf8, opt!(multispace))); named!(pub ws_u32<u32>, delimited!(opt!(multispace), u32_utf8, opt!(multispace)));
named!(pub ws_u64<u64>, delimited!(opt!(multispace), u64_utf8, opt!(multispace))); named!(pub ws_u64<u64>, delimited!(opt!(multispace), u64_utf8, opt!(multispace)));
@ -17,9 +16,6 @@ named!(pub ws_isize<isize>, delimited!(opt!(multispace), isize_utf8, opt!(multis
named!(pub ws_f32<f32>, delimited!(opt!(multispace), f32_utf8, opt!(multispace))); named!(pub ws_f32<f32>, delimited!(opt!(multispace), f32_utf8, opt!(multispace)));
named!(pub ws_f64<f64>, delimited!(opt!(multispace), f64_utf8, opt!(multispace))); named!(pub ws_f64<f64>, delimited!(opt!(multispace), f64_utf8, opt!(multispace)));
// ======================================================== // ========================================================
named!(pub u32_utf8<u32>, chain!( named!(pub u32_utf8<u32>, chain!(
@ -131,9 +127,6 @@ fn take_decimal_real(i: &[u8]) -> IResult<&[u8], &[u8]> {
} }
} }
// ======================================================== // ========================================================
#[cfg(test)] #[cfg(test)]

View File

@ -4,7 +4,6 @@ use std::iter::Iterator;
use std::result::Result; use std::result::Result;
use std::slice; use std::slice;
#[derive(Debug, Eq, PartialEq)] #[derive(Debug, Eq, PartialEq)]
pub enum DataTree<'a> { pub enum DataTree<'a> {
Internal { Internal {
@ -21,7 +20,6 @@ pub enum DataTree<'a> {
}, },
} }
impl<'a> DataTree<'a> { impl<'a> DataTree<'a> {
pub fn from_str(source_text: &'a str) -> Result<DataTree<'a>, ParseError> { pub fn from_str(source_text: &'a str) -> Result<DataTree<'a>, ParseError> {
let mut items = Vec::new(); let mut items = Vec::new();
@ -49,15 +47,15 @@ impl<'a> DataTree<'a> {
pub fn type_name(&'a self) -> &'a str { pub fn type_name(&'a self) -> &'a str {
match *self { match *self {
DataTree::Internal { type_name, .. } | DataTree::Internal { type_name, .. } | DataTree::Leaf { type_name, .. } => type_name,
DataTree::Leaf { type_name, .. } => type_name,
} }
} }
pub fn byte_offset(&'a self) -> usize { pub fn byte_offset(&'a self) -> usize {
match *self { match *self {
DataTree::Internal { byte_offset, .. } | DataTree::Internal { byte_offset, .. } | DataTree::Leaf { byte_offset, .. } => {
DataTree::Leaf { byte_offset, .. } => byte_offset, byte_offset
}
} }
} }
@ -166,7 +164,6 @@ impl<'a> DataTree<'a> {
} }
} }
/// An iterator over the children of a `DataTree` node that filters out the /// An iterator over the children of a `DataTree` node that filters out the
/// children not matching a specified type name. /// children not matching a specified type name.
pub struct DataTreeFilterIter<'a> { pub struct DataTreeFilterIter<'a> {
@ -192,7 +189,6 @@ impl<'a> Iterator for DataTreeFilterIter<'a> {
} }
} }
/// An iterator over the children of a `DataTree` node that filters out the /// An iterator over the children of a `DataTree` node that filters out the
/// children that aren't internal nodes and that don't match a specified /// children that aren't internal nodes and that don't match a specified
/// type name. /// type name.
@ -208,11 +204,11 @@ impl<'a> Iterator for DataTreeFilterInternalIter<'a> {
loop { loop {
match self.iter.next() { match self.iter.next() {
Some(&DataTree::Internal { Some(&DataTree::Internal {
type_name, type_name,
ident, ident,
ref children, ref children,
byte_offset, byte_offset,
}) => { }) => {
if type_name == self.type_name { if type_name == self.type_name {
return Some((type_name, ident, children, byte_offset)); return Some((type_name, ident, children, byte_offset));
} else { } else {
@ -232,7 +228,6 @@ impl<'a> Iterator for DataTreeFilterInternalIter<'a> {
} }
} }
/// An iterator over the children of a `DataTree` node that filters out the /// An iterator over the children of a `DataTree` node that filters out the
/// children that aren't internal nodes and that don't match a specified /// children that aren't internal nodes and that don't match a specified
/// type name. /// type name.
@ -252,10 +247,10 @@ impl<'a> Iterator for DataTreeFilterLeafIter<'a> {
} }
Some(&DataTree::Leaf { Some(&DataTree::Leaf {
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
}) => { }) => {
if type_name == self.type_name { if type_name == self.type_name {
return Some((type_name, contents, byte_offset)); return Some((type_name, contents, byte_offset));
} else { } else {
@ -271,7 +266,6 @@ impl<'a> Iterator for DataTreeFilterLeafIter<'a> {
} }
} }
#[derive(Copy, Clone, Eq, PartialEq, Debug)] #[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum ParseError { pub enum ParseError {
MissingOpener(usize), MissingOpener(usize),
@ -285,9 +279,6 @@ pub enum ParseError {
Other((usize, &'static str)), Other((usize, &'static str)),
} }
// ================================================================ // ================================================================
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
@ -386,7 +377,6 @@ fn parse_node<'a>(source_text: (usize, &'a str)) -> ParseResult<'a> {
} }
} }
fn parse_leaf_content(source_text: (usize, &str)) -> (&str, (usize, &str)) { fn parse_leaf_content(source_text: (usize, &str)) -> (&str, (usize, &str)) {
let mut si = 1; let mut si = 1;
let mut escaped = false; let mut escaped = false;
@ -407,13 +397,12 @@ fn parse_leaf_content(source_text: (usize, &str)) -> (&str, (usize, &str)) {
si = source_text.1.len(); si = source_text.1.len();
} }
return (&source_text.1[0..si], ( return (
source_text.0 + si, &source_text.1[0..si],
&source_text.1[si..], (source_text.0 + si, &source_text.1[si..]),
)); );
} }
fn next_token<'a>(source_text: (usize, &'a str)) -> (Token<'a>, (usize, &'a str)) { fn next_token<'a>(source_text: (usize, &'a str)) -> (Token<'a>, (usize, &'a str)) {
let text1 = skip_ws_and_comments(source_text); let text1 = skip_ws_and_comments(source_text);
@ -480,13 +469,12 @@ fn next_token<'a>(source_text: (usize, &'a str)) -> (Token<'a>, (usize, &'a str)
si = text1.1.len(); si = text1.1.len();
} }
return (Token::TypeName(&text1.1[0..si]), ( return (
text1.0 + si, Token::TypeName(&text1.1[0..si]),
&text1.1[si..], (text1.0 + si, &text1.1[si..]),
)); );
} }
} }
} }
} else { } else {
return (Token::End, text1); return (Token::End, text1);
@ -576,9 +564,6 @@ fn skip_ws_and_comments(text: (usize, &str)) -> (usize, &str) {
return (offset, remaining_text); return (offset, remaining_text);
} }
// ================================================================ // ================================================================
#[cfg(test)] #[cfg(test)]
@ -623,10 +608,10 @@ mod tests {
fn tokenize_5() { fn tokenize_5() {
let input = (0, " $hi\\ t\\#he\\[re "); let input = (0, " $hi\\ t\\#he\\[re ");
assert_eq!(next_token(input), ( assert_eq!(
Token::Ident("$hi\\ t\\#he\\[re"), next_token(input),
(15, " "), (Token::Ident("$hi\\ t\\#he\\[re"), (15, " "),)
)); );
} }
#[test] #[test]
@ -657,18 +642,24 @@ mod tests {
let (token7, input8) = next_token(input7); let (token7, input8) = next_token(input7);
let (token8, input9) = next_token(input8); let (token8, input9) = next_token(input8);
assert_eq!((token1, input2), (Token::TypeName("Thing"), ( assert_eq!(
5, (token1, input2),
" $yar { # A comment\n\tThing2 []\n}", (
))); Token::TypeName("Thing"),
assert_eq!((token2, input3), (Token::Ident("$yar"), ( (5, " $yar { # A comment\n\tThing2 []\n}",)
10, )
" { # A comment\n\tThing2 []\n}", );
))); assert_eq!(
assert_eq!((token3, input4), (Token::OpenInner, ( (token2, input3),
12, (
" # A comment\n\tThing2 []\n}", Token::Ident("$yar"),
))); (10, " { # A comment\n\tThing2 []\n}",)
)
);
assert_eq!(
(token3, input4),
(Token::OpenInner, (12, " # A comment\n\tThing2 []\n}",))
);
assert_eq!( assert_eq!(
(token4, input5), (token4, input5),
(Token::TypeName("Thing2"), (32, " []\n}")) (Token::TypeName("Thing2"), (32, " []\n}"))

View File

@ -9,29 +9,28 @@ use nom::IResult;
use mem_arena::MemArena; use mem_arena::MemArena;
use camera::Camera; use camera::Camera;
use color::{XYZ, rec709_e_to_xyz}; use color::{rec709_e_to_xyz, XYZ};
use light::WorldLightSource; use light::WorldLightSource;
use math::Matrix4x4; use math::Matrix4x4;
use renderer::Renderer; use renderer::Renderer;
use scene::Scene; use scene::Scene;
use scene::World; use scene::World;
use super::basics::{ws_u32, ws_f32}; use super::basics::{ws_f32, ws_u32};
use super::DataTree; use super::DataTree;
use super::psy_assembly::parse_assembly; use super::psy_assembly::parse_assembly;
use super::psy_light::parse_distant_disk_light; use super::psy_light::parse_distant_disk_light;
#[derive(Debug)] #[derive(Debug)]
pub enum PsyParseError { pub enum PsyParseError {
// The first usize for all errors is their byte offset // The first usize for all errors is their byte offset
// into the psy content where they occured. // into the psy content where they occured.
UnknownError(usize), UnknownError(usize),
UnknownVariant(usize, &'static str), // Error message UnknownVariant(usize, &'static str), // Error message
ExpectedInternalNode(usize, &'static str), // Error message ExpectedInternalNode(usize, &'static str), // Error message
ExpectedLeafNode(usize, &'static str), // Error message ExpectedLeafNode(usize, &'static str), // Error message
MissingNode(usize, &'static str), // Error message MissingNode(usize, &'static str), // Error message
IncorrectLeafData(usize, &'static str), // Error message IncorrectLeafData(usize, &'static str), // Error message
WrongNodeCount(usize, &'static str, usize), // Error message, sections found WrongNodeCount(usize, &'static str, usize), // Error message, sections found
InstancedMissingData(usize, &'static str, String), // Error message, data name InstancedMissingData(usize, &'static str, String), // Error message, data name
} }
@ -43,7 +42,7 @@ impl PsyParseError {
let line = line_count_to_byte_offset(psy_content, offset); let line = line_count_to_byte_offset(psy_content, offset);
println!( println!(
"Line {}: Unknown parse error. If you get this message, please report \ "Line {}: Unknown parse error. If you get this message, please report \
it to the developers so they can improve the error messages.", it to the developers so they can improve the error messages.",
line line
); );
} }
@ -90,7 +89,6 @@ fn line_count_to_byte_offset(text: &str, offset: usize) -> usize {
text[..offset].matches('\n').count() + 1 text[..offset].matches('\n').count() + 1
} }
/// Takes in a `DataTree` representing a Scene node and returns /// Takes in a `DataTree` representing a Scene node and returns
pub fn parse_scene<'a>( pub fn parse_scene<'a>(
arena: &'a MemArena, arena: &'a MemArena,
@ -102,7 +100,7 @@ pub fn parse_scene<'a>(
return Err(PsyParseError::WrongNodeCount( return Err(PsyParseError::WrongNodeCount(
tree.byte_offset(), tree.byte_offset(),
"Scene should have precisely one Output \ "Scene should have precisely one Output \
section.", section.",
count, count,
)); ));
} }
@ -111,7 +109,7 @@ pub fn parse_scene<'a>(
return Err(PsyParseError::WrongNodeCount( return Err(PsyParseError::WrongNodeCount(
tree.byte_offset(), tree.byte_offset(),
"Scene should have precisely one \ "Scene should have precisely one \
RenderSettings section.", RenderSettings section.",
count, count,
)); ));
} }
@ -120,7 +118,7 @@ pub fn parse_scene<'a>(
return Err(PsyParseError::WrongNodeCount( return Err(PsyParseError::WrongNodeCount(
tree.byte_offset(), tree.byte_offset(),
"Scene should have precisely one Camera \ "Scene should have precisely one Camera \
section.", section.",
count, count,
)); ));
} }
@ -137,7 +135,7 @@ pub fn parse_scene<'a>(
return Err(PsyParseError::WrongNodeCount( return Err(PsyParseError::WrongNodeCount(
tree.byte_offset(), tree.byte_offset(),
"Scene should have precisely one Root Assembly \ "Scene should have precisely one Root Assembly \
section.", section.",
count, count,
)); ));
} }
@ -199,9 +197,6 @@ pub fn parse_scene<'a>(
return Ok(renderer); return Ok(renderer);
} }
fn parse_output_info(tree: &DataTree) -> Result<String, PsyParseError> { fn parse_output_info(tree: &DataTree) -> Result<String, PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree { if let DataTree::Internal { ref children, .. } = *tree {
let mut found_path = false; let mut found_path = false;
@ -213,21 +208,22 @@ fn parse_output_info(tree: &DataTree) -> Result<String, PsyParseError> {
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Path" => { } if type_name == "Path" =>
{
// Trim and validate // Trim and validate
let tc = contents.trim(); let tc = contents.trim();
if tc.chars().count() < 2 { if tc.chars().count() < 2 {
return Err(PsyParseError::IncorrectLeafData( return Err(PsyParseError::IncorrectLeafData(
byte_offset, byte_offset,
"File path format is \ "File path format is \
incorrect.", incorrect.",
)); ));
} }
if tc.chars().nth(0).unwrap() != '"' || tc.chars().last().unwrap() != '"' { if tc.chars().nth(0).unwrap() != '"' || tc.chars().last().unwrap() != '"' {
return Err(PsyParseError::IncorrectLeafData( return Err(PsyParseError::IncorrectLeafData(
byte_offset, byte_offset,
"File paths must be \ "File paths must be \
surrounded by quotes.", surrounded by quotes.",
)); ));
} }
let len = tc.len(); let len = tc.len();
@ -255,14 +251,11 @@ fn parse_output_info(tree: &DataTree) -> Result<String, PsyParseError> {
return Err(PsyParseError::ExpectedInternalNode( return Err(PsyParseError::ExpectedInternalNode(
tree.byte_offset(), tree.byte_offset(),
"Output section should be an internal \ "Output section should be an internal \
node, containing at least a Path.", node, containing at least a Path.",
)); ));
}; };
} }
fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyParseError> { fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree { if let DataTree::Internal { ref children, .. } = *tree {
let mut found_res = false; let mut found_res = false;
@ -278,7 +271,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Resolution" => { } if type_name == "Resolution" =>
{
if let IResult::Done(_, (w, h)) = if let IResult::Done(_, (w, h)) =
closure!(terminated!(tuple!(ws_u32, ws_u32), nom::eof))(contents.as_bytes()) closure!(terminated!(tuple!(ws_u32, ws_u32), nom::eof))(contents.as_bytes())
{ {
@ -299,7 +293,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "SamplesPerPixel" => { } if type_name == "SamplesPerPixel" =>
{
if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) { if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) {
found_spp = true; found_spp = true;
spp = n; spp = n;
@ -308,8 +303,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
return Err(PsyParseError::IncorrectLeafData( return Err(PsyParseError::IncorrectLeafData(
byte_offset, byte_offset,
"SamplesPerPixel should be \ "SamplesPerPixel should be \
an integer specified in \ an integer specified in \
the form '[samples]'.", the form '[samples]'.",
)); ));
} }
} }
@ -319,7 +314,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Seed" => { } if type_name == "Seed" =>
{
if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) { if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) {
seed = n; seed = n;
} else { } else {
@ -327,8 +323,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
return Err(PsyParseError::IncorrectLeafData( return Err(PsyParseError::IncorrectLeafData(
byte_offset, byte_offset,
"Seed should be an integer \ "Seed should be an integer \
specified in the form \ specified in the form \
'[samples]'.", '[samples]'.",
)); ));
} }
} }
@ -343,22 +339,19 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
return Err(PsyParseError::MissingNode( return Err(PsyParseError::MissingNode(
tree.byte_offset(), tree.byte_offset(),
"RenderSettings must have both Resolution and \ "RenderSettings must have both Resolution and \
SamplesPerPixel specified.", SamplesPerPixel specified.",
)); ));
} }
} else { } else {
return Err(PsyParseError::ExpectedInternalNode( return Err(PsyParseError::ExpectedInternalNode(
tree.byte_offset(), tree.byte_offset(),
"RenderSettings section should be an \ "RenderSettings section should be an \
internal node, containing at least \ internal node, containing at least \
Resolution and SamplesPerPixel.", Resolution and SamplesPerPixel.",
)); ));
}; };
} }
fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a>, PsyParseError> { fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a>, PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree { if let DataTree::Internal { ref children, .. } = *tree {
let mut mats = Vec::new(); let mut mats = Vec::new();
@ -374,7 +367,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Fov" => { } if type_name == "Fov" =>
{
if let IResult::Done(_, fov) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, fov) = ws_f32(contents.as_bytes()) {
fovs.push(fov * (f32::consts::PI / 180.0)); fovs.push(fov * (f32::consts::PI / 180.0));
} else { } else {
@ -382,8 +376,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
return Err(PsyParseError::IncorrectLeafData( return Err(PsyParseError::IncorrectLeafData(
byte_offset, byte_offset,
"Fov should be a decimal \ "Fov should be a decimal \
number specified in the \ number specified in the \
form '[fov]'.", form '[fov]'.",
)); ));
} }
} }
@ -393,7 +387,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "FocalDistance" => { } if type_name == "FocalDistance" =>
{
if let IResult::Done(_, fd) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, fd) = ws_f32(contents.as_bytes()) {
focus_distances.push(fd); focus_distances.push(fd);
} else { } else {
@ -401,8 +396,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
return Err(PsyParseError::IncorrectLeafData( return Err(PsyParseError::IncorrectLeafData(
byte_offset, byte_offset,
"FocalDistance should be a \ "FocalDistance should be a \
decimal number specified \ decimal number specified \
in the form '[fov]'.", in the form '[fov]'.",
)); ));
} }
} }
@ -412,7 +407,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "ApertureRadius" => { } if type_name == "ApertureRadius" =>
{
if let IResult::Done(_, ar) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, ar) = ws_f32(contents.as_bytes()) {
aperture_radii.push(ar); aperture_radii.push(ar);
} else { } else {
@ -420,8 +416,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
return Err(PsyParseError::IncorrectLeafData( return Err(PsyParseError::IncorrectLeafData(
byte_offset, byte_offset,
"ApertureRadius should be a \ "ApertureRadius should be a \
decimal number specified \ decimal number specified \
in the form '[fov]'.", in the form '[fov]'.",
)); ));
} }
} }
@ -431,7 +427,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Transform" => { } if type_name == "Transform" =>
{
if let Ok(mat) = parse_matrix(contents) { if let Ok(mat) = parse_matrix(contents) {
mats.push(mat); mats.push(mat);
} else { } else {
@ -455,15 +452,12 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
return Err(PsyParseError::ExpectedInternalNode( return Err(PsyParseError::ExpectedInternalNode(
tree.byte_offset(), tree.byte_offset(),
"Camera section should be an internal \ "Camera section should be an internal \
node, containing at least Fov and \ node, containing at least Fov and \
Transform.", Transform.",
)); ));
} }
} }
fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>, PsyParseError> { fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>, PsyParseError> {
if tree.is_internal() { if tree.is_internal() {
let background_color; let background_color;
@ -487,7 +481,7 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::WrongNodeCount( return Err(PsyParseError::WrongNodeCount(
bgs.byte_offset(), bgs.byte_offset(),
"BackgroundShader should have \ "BackgroundShader should have \
precisely one Type specified.", precisely one Type specified.",
bgs.iter_children_with_type("Type").count(), bgs.iter_children_with_type("Type").count(),
)); ));
} }
@ -499,17 +493,17 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::ExpectedLeafNode( return Err(PsyParseError::ExpectedLeafNode(
bgs.byte_offset(), bgs.byte_offset(),
"BackgroundShader's Type should be a \ "BackgroundShader's Type should be a \
leaf node.", leaf node.",
)); ));
} }
}; };
match bgs_type { match bgs_type {
"Color" => { "Color" => {
if let Some(&DataTree::Leaf { if let Some(&DataTree::Leaf {
contents, contents,
byte_offset, byte_offset,
.. ..
}) = bgs.iter_children_with_type("Color").nth(0) }) = bgs.iter_children_with_type("Color").nth(0)
{ {
if let IResult::Done(_, color) = if let IResult::Done(_, color) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.trim().as_bytes()) closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.trim().as_bytes())
@ -521,15 +515,15 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::IncorrectLeafData( return Err(PsyParseError::IncorrectLeafData(
byte_offset, byte_offset,
"Color should be specified \ "Color should be specified \
with three decimal numbers \ with three decimal numbers \
in the form '[R G B]'.", in the form '[R G B]'.",
)); ));
} }
} else { } else {
return Err(PsyParseError::MissingNode( return Err(PsyParseError::MissingNode(
bgs.byte_offset(), bgs.byte_offset(),
"BackgroundShader's Type is Color, \ "BackgroundShader's Type is Color, \
but no Color is specified.", but no Color is specified.",
)); ));
} }
} }
@ -538,7 +532,7 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::UnknownVariant( return Err(PsyParseError::UnknownVariant(
bgs.byte_offset(), bgs.byte_offset(),
"The specified BackgroundShader Type \ "The specified BackgroundShader Type \
isn't a recognized type.", isn't a recognized type.",
)) ))
} }
} }
@ -563,38 +557,34 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::ExpectedInternalNode( return Err(PsyParseError::ExpectedInternalNode(
tree.byte_offset(), tree.byte_offset(),
"World section should be an internal \ "World section should be an internal \
node, containing at least a \ node, containing at least a \
BackgroundShader.", BackgroundShader.",
)); ));
} }
} }
pub fn parse_matrix(contents: &str) -> Result<Matrix4x4, PsyParseError> { pub fn parse_matrix(contents: &str) -> Result<Matrix4x4, PsyParseError> {
if let IResult::Done(_, ns) = if let IResult::Done(_, ns) = closure!(terminated!(
closure!(terminated!( tuple!(
tuple!( ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32,
ws_f32, ws_f32
ws_f32 ),
), nom::eof
nom::eof ))(contents.as_bytes())
))(contents.as_bytes())
{ {
return Ok(Matrix4x4::new_from_values( return Ok(Matrix4x4::new_from_values(
ns.0, ns.0,
@ -623,6 +613,6 @@ pub fn make_transform_format_error(byte_offset: usize) -> PsyParseError {
PsyParseError::IncorrectLeafData( PsyParseError::IncorrectLeafData(
byte_offset, byte_offset,
"Transform should be sixteen integers specified in \ "Transform should be sixteen integers specified in \
the form '[# # # # # # # # # # # # # # # #]'.", the form '[# # # # # # # # # # # # # # # #]'.",
) )
} }

View File

@ -7,12 +7,11 @@ use mem_arena::MemArena;
use scene::{Assembly, AssemblyBuilder, Object}; use scene::{Assembly, AssemblyBuilder, Object};
use super::DataTree; use super::DataTree;
use super::psy_light::{parse_sphere_light, parse_rectangle_light}; use super::psy_light::{parse_rectangle_light, parse_sphere_light};
use super::psy_mesh_surface::parse_mesh_surface; use super::psy_mesh_surface::parse_mesh_surface;
use super::psy_surface_shader::parse_surface_shader; use super::psy_surface_shader::parse_surface_shader;
use super::psy::{parse_matrix, PsyParseError}; use super::psy::{parse_matrix, PsyParseError};
pub fn parse_assembly<'a>( pub fn parse_assembly<'a>(
arena: &'a MemArena, arena: &'a MemArena,
tree: &'a DataTree, tree: &'a DataTree,
@ -24,7 +23,10 @@ pub fn parse_assembly<'a>(
match child.type_name() { match child.type_name() {
// Sub-Assembly // Sub-Assembly
"Assembly" => { "Assembly" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child { if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_assembly(ident, parse_assembly(arena, child)?); builder.add_assembly(ident, parse_assembly(arena, child)?);
} else { } else {
return Err(PsyParseError::UnknownError(child.byte_offset())); return Err(PsyParseError::UnknownError(child.byte_offset()));
@ -75,9 +77,9 @@ pub fn parse_assembly<'a>(
return Err(PsyParseError::InstancedMissingData( return Err(PsyParseError::InstancedMissingData(
child.iter_leaf_children_with_type("Data").nth(0).unwrap().2, child.iter_leaf_children_with_type("Data").nth(0).unwrap().2,
"Attempted to add \ "Attempted to add \
instance for data with \ instance for data with \
a name that doesn't \ a name that doesn't \
exist.", exist.",
name.to_string(), name.to_string(),
)); ));
} }
@ -85,13 +87,16 @@ pub fn parse_assembly<'a>(
// SurfaceShader // SurfaceShader
"SurfaceShader" => { "SurfaceShader" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child { if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_surface_shader(ident, parse_surface_shader(arena, child)?); builder.add_surface_shader(ident, parse_surface_shader(arena, child)?);
} else { } else {
// TODO: error condition of some kind, because no ident // TODO: error condition of some kind, because no ident
panic!( panic!(
"SurfaceShader encountered that was a leaf, but SurfaceShaders cannot \ "SurfaceShader encountered that was a leaf, but SurfaceShaders cannot \
be a leaf: {}", be a leaf: {}",
child.byte_offset() child.byte_offset()
); );
} }
@ -99,7 +104,10 @@ pub fn parse_assembly<'a>(
// MeshSurface // MeshSurface
"MeshSurface" => { "MeshSurface" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child { if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_object( builder.add_object(
ident, ident,
Object::Surface(arena.alloc(parse_mesh_surface(arena, child)?)), Object::Surface(arena.alloc(parse_mesh_surface(arena, child)?)),
@ -108,7 +116,7 @@ pub fn parse_assembly<'a>(
// TODO: error condition of some kind, because no ident // TODO: error condition of some kind, because no ident
panic!( panic!(
"MeshSurface encountered that was a leaf, but MeshSurfaces cannot \ "MeshSurface encountered that was a leaf, but MeshSurfaces cannot \
be a leaf: {}", be a leaf: {}",
child.byte_offset() child.byte_offset()
); );
} }
@ -116,7 +124,10 @@ pub fn parse_assembly<'a>(
// Sphere Light // Sphere Light
"SphereLight" => { "SphereLight" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child { if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_object( builder.add_object(
ident, ident,
Object::SurfaceLight(arena.alloc(parse_sphere_light(arena, child)?)), Object::SurfaceLight(arena.alloc(parse_sphere_light(arena, child)?)),
@ -129,12 +140,13 @@ pub fn parse_assembly<'a>(
// Rectangle Light // Rectangle Light
"RectangleLight" => { "RectangleLight" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child { if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_object( builder.add_object(
ident, ident,
Object::SurfaceLight( Object::SurfaceLight(arena.alloc(parse_rectangle_light(arena, child)?)),
arena.alloc(parse_rectangle_light(arena, child)?),
),
); );
} else { } else {
// No ident // No ident
@ -144,27 +156,25 @@ pub fn parse_assembly<'a>(
_ => { _ => {
// TODO: some kind of error, because not a known type name // TODO: some kind of error, because not a known type name
} } // // Bilinear Patch
// "BilinearPatch" => {
// // Bilinear Patch // assembly->add_object(child.name, parse_bilinear_patch(child));
// "BilinearPatch" => { // }
// assembly->add_object(child.name, parse_bilinear_patch(child)); //
// } // // Bicubic Patch
// // else if (child.type == "BicubicPatch") {
// // Bicubic Patch // assembly->add_object(child.name, parse_bicubic_patch(child));
// else if (child.type == "BicubicPatch") { // }
// assembly->add_object(child.name, parse_bicubic_patch(child)); //
// } // // Subdivision surface
// // else if (child.type == "SubdivisionSurface") {
// // Subdivision surface // assembly->add_object(child.name, parse_subdivision_surface(child));
// else if (child.type == "SubdivisionSurface") { // }
// assembly->add_object(child.name, parse_subdivision_surface(child)); //
// } // // Sphere
// // else if (child.type == "Sphere") {
// // Sphere // assembly->add_object(child.name, parse_sphere(child));
// else if (child.type == "Sphere") { // }
// assembly->add_object(child.name, parse_sphere(child));
// }
} }
} }
} else { } else {

View File

@ -7,14 +7,13 @@ use nom::IResult;
use mem_arena::MemArena; use mem_arena::MemArena;
use math::Vector; use math::Vector;
use color::{XYZ, rec709_e_to_xyz}; use color::{rec709_e_to_xyz, XYZ};
use light::{DistantDiskLight, SphereLight, RectangleLight}; use light::{DistantDiskLight, RectangleLight, SphereLight};
use super::basics::ws_f32; use super::basics::ws_f32;
use super::DataTree; use super::DataTree;
use super::psy::PsyParseError; use super::psy::PsyParseError;
pub fn parse_distant_disk_light<'a>( pub fn parse_distant_disk_light<'a>(
arena: &'a MemArena, arena: &'a MemArena,
tree: &'a DataTree, tree: &'a DataTree,
@ -32,7 +31,8 @@ pub fn parse_distant_disk_light<'a>(
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Radius" => { } if type_name == "Radius" =>
{
if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) {
radii.push(radius); radii.push(radius);
} else { } else {
@ -46,7 +46,8 @@ pub fn parse_distant_disk_light<'a>(
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Direction" => { } if type_name == "Direction" =>
{
if let IResult::Done(_, direction) = if let IResult::Done(_, direction) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes())
{ {
@ -62,7 +63,8 @@ pub fn parse_distant_disk_light<'a>(
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Color" => { } if type_name == "Color" =>
{
if let IResult::Done(_, color) = if let IResult::Done(_, color) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes())
{ {
@ -86,7 +88,6 @@ pub fn parse_distant_disk_light<'a>(
} }
} }
pub fn parse_sphere_light<'a>( pub fn parse_sphere_light<'a>(
arena: &'a MemArena, arena: &'a MemArena,
tree: &'a DataTree, tree: &'a DataTree,
@ -103,7 +104,8 @@ pub fn parse_sphere_light<'a>(
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Radius" => { } if type_name == "Radius" =>
{
if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) { if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) {
radii.push(radius); radii.push(radius);
} else { } else {
@ -117,7 +119,8 @@ pub fn parse_sphere_light<'a>(
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Color" => { } if type_name == "Color" =>
{
if let IResult::Done(_, color) = if let IResult::Done(_, color) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes())
{ {
@ -157,7 +160,8 @@ pub fn parse_rectangle_light<'a>(
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Dimensions" => { } if type_name == "Dimensions" =>
{
if let IResult::Done(_, radius) = if let IResult::Done(_, radius) =
closure!(tuple!(ws_f32, ws_f32))(contents.as_bytes()) closure!(tuple!(ws_f32, ws_f32))(contents.as_bytes())
{ {
@ -173,7 +177,8 @@ pub fn parse_rectangle_light<'a>(
type_name, type_name,
contents, contents,
byte_offset, byte_offset,
} if type_name == "Color" => { } if type_name == "Color" =>
{
if let IResult::Done(_, color) = if let IResult::Done(_, color) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes()) closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes())
{ {

View File

@ -6,14 +6,13 @@ use nom::IResult;
use mem_arena::MemArena; use mem_arena::MemArena;
use math::{Point, Normal}; use math::{Normal, Point};
use surface::triangle_mesh::TriangleMesh; use surface::triangle_mesh::TriangleMesh;
use super::basics::{ws_usize, ws_f32}; use super::basics::{ws_usize, ws_f32};
use super::DataTree; use super::DataTree;
use super::psy::PsyParseError; use super::psy::PsyParseError;
// pub struct TriangleMesh { // pub struct TriangleMesh {
// time_samples: usize, // time_samples: usize,
// geo: Vec<(Point, Point, Point)>, // geo: Vec<(Point, Point, Point)>,
@ -61,8 +60,7 @@ pub fn parse_mesh_surface<'a>(
// Collect verts for this time sample // Collect verts for this time sample
let mut tnormals = Vec::new(); let mut tnormals = Vec::new();
while let IResult::Done(remaining, nor) = while let IResult::Done(remaining, nor) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(raw_text)
closure!(tuple!(ws_f32, ws_f32, ws_f32))(raw_text)
{ {
raw_text = remaining; raw_text = remaining;

View File

@ -6,14 +6,13 @@ use nom::IResult;
use mem_arena::MemArena; use mem_arena::MemArena;
use color::{XYZ, rec709_e_to_xyz}; use color::{rec709_e_to_xyz, XYZ};
use shading::{SurfaceShader, SimpleSurfaceShader}; use shading::{SimpleSurfaceShader, SurfaceShader};
use super::basics::ws_f32; use super::basics::ws_f32;
use super::DataTree; use super::DataTree;
use super::psy::PsyParseError; use super::psy::PsyParseError;
// pub struct TriangleMesh { // pub struct TriangleMesh {
// time_samples: usize, // time_samples: usize,
// geo: Vec<(Point, Point, Point)>, // geo: Vec<(Point, Point, Point)>,

View File

@ -3,8 +3,7 @@
use std; use std;
use float4::Float4; use float4::Float4;
use math::{Vector, Point, Matrix4x4}; use math::{Matrix4x4, Point, Vector};
const OCCLUSION_FLAG: u32 = 1; const OCCLUSION_FLAG: u32 = 1;
const DONE_FLAG: u32 = 1 << 1; const DONE_FLAG: u32 = 1 << 1;
@ -52,7 +51,6 @@ impl Ray {
} }
} }
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct AccelRay { pub struct AccelRay {
pub orig: Point, pub orig: Point,
@ -67,7 +65,9 @@ impl AccelRay {
pub fn new(ray: &Ray, id: u32) -> AccelRay { pub fn new(ray: &Ray, id: u32) -> AccelRay {
AccelRay { AccelRay {
orig: ray.orig, orig: ray.orig,
dir_inv: Vector { co: Float4::new(1.0, 1.0, 1.0, 1.0) / ray.dir.co }, dir_inv: Vector {
co: Float4::new(1.0, 1.0, 1.0, 1.0) / ray.dir.co,
},
max_t: ray.max_t, max_t: ray.max_t,
time: ray.time, time: ray.time,
flags: ray.flags, flags: ray.flags,
@ -77,12 +77,16 @@ impl AccelRay {
pub fn update_from_world_ray(&mut self, wr: &Ray) { pub fn update_from_world_ray(&mut self, wr: &Ray) {
self.orig = wr.orig; self.orig = wr.orig;
self.dir_inv = Vector { co: Float4::new(1.0, 1.0, 1.0, 1.0) / wr.dir.co }; self.dir_inv = Vector {
co: Float4::new(1.0, 1.0, 1.0, 1.0) / wr.dir.co,
};
} }
pub fn update_from_xformed_world_ray(&mut self, wr: &Ray, mat: &Matrix4x4) { pub fn update_from_xformed_world_ray(&mut self, wr: &Ray, mat: &Matrix4x4) {
self.orig = wr.orig * *mat; self.orig = wr.orig * *mat;
self.dir_inv = Vector { co: Float4::new(1.0, 1.0, 1.0, 1.0) / (wr.dir * *mat).co }; self.dir_inv = Vector {
co: Float4::new(1.0, 1.0, 1.0, 1.0) / (wr.dir * *mat).co,
};
} }
pub fn is_occlusion(&self) -> bool { pub fn is_occlusion(&self) -> bool {

View File

@ -3,16 +3,16 @@ use std::cell::Cell;
use std::cmp; use std::cmp;
use std::cmp::min; use std::cmp::min;
use std::io::{self, Write}; use std::io::{self, Write};
use std::sync::{RwLock, Mutex}; use std::sync::{Mutex, RwLock};
use crossbeam::sync::MsQueue; use crossbeam::sync::MsQueue;
use scoped_threadpool::Pool; use scoped_threadpool::Pool;
use halton; use halton;
use accel::{ACCEL_TRAV_TIME, ACCEL_NODE_RAY_TESTS}; use accel::{ACCEL_NODE_RAY_TESTS, ACCEL_TRAV_TIME};
use algorithm::partition_pair; use algorithm::partition_pair;
use color::{Color, XYZ, SpectralSample, map_0_1_to_wavelength}; use color::{Color, SpectralSample, map_0_1_to_wavelength, XYZ};
use float4::Float4; use float4::Float4;
use fp_utils::robust_ray_origin; use fp_utils::robust_ray_origin;
use hash::hash_u32; use hash::hash_u32;
@ -27,7 +27,6 @@ use timer::Timer;
use tracer::Tracer; use tracer::Tracer;
use transform_stack::TransformStack; use transform_stack::TransformStack;
#[derive(Debug)] #[derive(Debug)]
pub struct Renderer<'a> { pub struct Renderer<'a> {
pub output_file: String, pub output_file: String,
@ -184,9 +183,7 @@ impl<'a> Renderer<'a> {
}); });
// Clear percentage progress print // Clear percentage progress print
print!( print!("\r \r",);
"\r \r",
);
// Return the rendered image and stats // Return the rendered image and stats
return (image, *collective_stats.read().unwrap()); return (image, *collective_stats.read().unwrap());
@ -353,7 +350,6 @@ impl<'a> Renderer<'a> {
} }
} }
#[derive(Debug)] #[derive(Debug)]
enum LightPathEvent { enum LightPathEvent {
CameraRay, CameraRay,
@ -410,7 +406,6 @@ impl LightPath {
pending_color_addition: Float4::splat(0.0), pending_color_addition: Float4::splat(0.0),
color: Float4::splat(0.0), color: Float4::splat(0.0),
}, },
scene.camera.generate_ray( scene.camera.generate_ray(
image_plane_co.0, image_plane_co.0,
image_plane_co.1, image_plane_co.1,
@ -438,8 +433,7 @@ impl LightPath {
match self.event { match self.event {
//-------------------------------------------------------------------- //--------------------------------------------------------------------
// Result of Camera or bounce ray, prepare next bounce and light rays // Result of Camera or bounce ray, prepare next bounce and light rays
LightPathEvent::CameraRay | LightPathEvent::CameraRay | LightPathEvent::BounceRay => {
LightPathEvent::BounceRay => {
if let surface::SurfaceIntersection::Hit { if let surface::SurfaceIntersection::Hit {
intersection_data: ref idata, intersection_data: ref idata,
ref closure, ref closure,
@ -482,8 +476,8 @@ impl LightPath {
self.time, self.time,
isect, isect,
); );
let found_light = if light_info.is_none() || light_info.pdf() <= 0.0 || let found_light = if light_info.is_none() || light_info.pdf() <= 0.0
light_info.selection_pdf() <= 0.0 || light_info.selection_pdf() <= 0.0
{ {
false false
} else { } else {
@ -564,9 +558,9 @@ impl LightPath {
// Calculate and store the light that will be contributed // Calculate and store the light that will be contributed
// to the film plane if the light is not in shadow. // to the film plane if the light is not in shadow.
let light_mis_pdf = power_heuristic(light_pdf, closure_pdf); let light_mis_pdf = power_heuristic(light_pdf, closure_pdf);
self.pending_color_addition = light_info.color().e * attenuation.e * self.pending_color_addition = light_info.color().e * attenuation.e
self.light_attenuation / * self.light_attenuation
(light_mis_pdf * light_sel_pdf); / (light_mis_pdf * light_sel_pdf);
*ray = shadow_ray; *ray = shadow_ray;
@ -630,8 +624,8 @@ impl LightPath {
.world .world
.background_color .background_color
.to_spectral_sample(self.wavelength) .to_spectral_sample(self.wavelength)
.e * self.light_attenuation / .e * self.light_attenuation
self.closure_sample_pdf; / self.closure_sample_pdf;
return false; return false;
} }
} }
@ -672,7 +666,6 @@ fn get_sample(dimension: u32, i: u32) -> f32 {
} }
} }
#[derive(Debug)] #[derive(Debug)]
struct BucketJob { struct BucketJob {
x: u32, x: u32,

View File

@ -1,6 +1,7 @@
mod monte_carlo; mod monte_carlo;
pub use self::monte_carlo::{square_to_circle, cosine_sample_hemisphere, uniform_sample_hemisphere, pub use self::monte_carlo::{cosine_sample_hemisphere, spherical_triangle_solid_angle,
uniform_sample_sphere, uniform_sample_cone, uniform_sample_cone_pdf, square_to_circle, triangle_surface_area, uniform_sample_cone,
uniform_sample_triangle, triangle_surface_area, uniform_sample_cone_pdf, uniform_sample_hemisphere,
spherical_triangle_solid_angle, uniform_sample_spherical_triangle}; uniform_sample_sphere, uniform_sample_spherical_triangle,
uniform_sample_triangle};

View File

@ -4,8 +4,7 @@ use std::f32::consts::FRAC_PI_4 as QPI_32;
use std::f32::consts::PI as PI_32; use std::f32::consts::PI as PI_32;
use std::f64::consts::PI as PI_64; use std::f64::consts::PI as PI_64;
use math::{Vector, Point, cross, dot}; use math::{cross, dot, Point, Vector};
/// Maps the unit square to the unit circle. /// Maps the unit square to the unit circle.
/// NOTE: x and y should be distributed within [-1, 1], /// NOTE: x and y should be distributed within [-1, 1],
@ -194,8 +193,8 @@ pub fn uniform_sample_spherical_triangle(
let q_bottom = ((v * s) + (u * t)) * sin_va; let q_bottom = ((v * s) + (u * t)) * sin_va;
let q = q_top / q_bottom; let q = q_top / q_bottom;
let vc_2 = (va * q as f32) + let vc_2 =
((vc - (va * dot(vc, va))).normalized() * (1.0 - (q * q)).sqrt() as f32); (va * q as f32) + ((vc - (va * dot(vc, va))).normalized() * (1.0 - (q * q)).sqrt() as f32);
let z = 1.0 - (j * (1.0 - dot(vc_2, vb))); let z = 1.0 - (j * (1.0 - dot(vc_2, vb)));

View File

@ -4,7 +4,7 @@ use mem_arena::MemArena;
use accel::{LightAccel, LightTree}; use accel::{LightAccel, LightTree};
use accel::BVH4; use accel::BVH4;
use bbox::{BBox, transform_bbox_slice_from}; use bbox::{transform_bbox_slice_from, BBox};
use boundable::Boundable; use boundable::Boundable;
use color::SpectralSample; use color::SpectralSample;
use lerp::lerp_slice; use lerp::lerp_slice;
@ -14,7 +14,6 @@ use surface::{Surface, SurfaceIntersection};
use shading::SurfaceShader; use shading::SurfaceShader;
use transform_stack::TransformStack; use transform_stack::TransformStack;
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct Assembly<'a> { pub struct Assembly<'a> {
// Instance list // Instance list
@ -59,20 +58,17 @@ impl<'a> Assembly<'a> {
} else { } else {
Matrix4x4::new() Matrix4x4::new()
}; };
if let Some((light_i, sel_pdf, whittled_n)) = if let Some((light_i, sel_pdf, whittled_n)) = self.light_accel.select(
self.light_accel.select( idata.incoming * sel_xform,
idata.incoming * sel_xform, idata.pos * sel_xform,
idata.pos * sel_xform, idata.nor * sel_xform,
idata.nor * sel_xform, idata.nor_g * sel_xform,
idata.nor_g * sel_xform, closure.as_surface_closure(),
closure.as_surface_closure(), time,
time, n,
n, ) {
)
{
let inst = self.light_instances[light_i]; let inst = self.light_instances[light_i];
match inst.instance_type { match inst.instance_type {
InstanceType::Object => { InstanceType::Object => {
match self.objects[inst.data_index] { match self.objects[inst.data_index] {
Object::SurfaceLight(light) => { Object::SurfaceLight(light) => {
@ -151,7 +147,6 @@ impl<'a> Boundable for Assembly<'a> {
} }
} }
#[derive(Debug)] #[derive(Debug)]
pub struct AssemblyBuilder<'a> { pub struct AssemblyBuilder<'a> {
arena: &'a MemArena, arena: &'a MemArena,
@ -173,7 +168,6 @@ pub struct AssemblyBuilder<'a> {
assembly_map: HashMap<String, usize>, // map Name -> Index assembly_map: HashMap<String, usize>, // map Name -> Index
} }
impl<'a> AssemblyBuilder<'a> { impl<'a> AssemblyBuilder<'a> {
pub fn new(arena: &'a MemArena) -> AssemblyBuilder<'a> { pub fn new(arena: &'a MemArena) -> AssemblyBuilder<'a> {
AssemblyBuilder { AssemblyBuilder {
@ -196,10 +190,8 @@ impl<'a> AssemblyBuilder<'a> {
} }
// Add shader // Add shader
self.surface_shader_map.insert( self.surface_shader_map
name.to_string(), .insert(name.to_string(), self.surface_shaders.len());
self.surface_shaders.len(),
);
self.surface_shaders.push(shader); self.surface_shaders.push(shader);
} }
@ -219,15 +211,13 @@ impl<'a> AssemblyBuilder<'a> {
if self.name_exists(name) { if self.name_exists(name) {
panic!( panic!(
"Attempted to add assembly to another assembly with a name that already \ "Attempted to add assembly to another assembly with a name that already \
exists." exists."
); );
} }
// Add assembly // Add assembly
self.assembly_map.insert( self.assembly_map
name.to_string(), .insert(name.to_string(), self.assemblies.len());
self.assemblies.len(),
);
self.assemblies.push(asmb); self.assemblies.push(asmb);
} }
@ -244,7 +234,11 @@ impl<'a> AssemblyBuilder<'a> {
// Map zero-length transforms to None // Map zero-length transforms to None
let xforms = if let Some(xf) = xforms { let xforms = if let Some(xf) = xforms {
if !xf.is_empty() { Some(xf) } else { None } if !xf.is_empty() {
Some(xf)
} else {
None
}
} else { } else {
None None
}; };
@ -255,30 +249,26 @@ impl<'a> AssemblyBuilder<'a> {
instance_type: InstanceType::Object, instance_type: InstanceType::Object,
data_index: self.object_map[name], data_index: self.object_map[name],
surface_shader_index: surface_shader_name.map(|name| { surface_shader_index: surface_shader_name.map(|name| {
*self.surface_shader_map.get(name).expect(&format!( *self.surface_shader_map
"Unknown surface shader '{}'.", .get(name)
name .expect(&format!("Unknown surface shader '{}'.", name))
))
}), }),
id: self.instances.len(), id: self.instances.len(),
transform_indices: xforms.map( transform_indices: xforms
|xf| (self.xforms.len(), self.xforms.len() + xf.len()), .map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())),
),
} }
} else { } else {
Instance { Instance {
instance_type: InstanceType::Assembly, instance_type: InstanceType::Assembly,
data_index: self.assembly_map[name], data_index: self.assembly_map[name],
surface_shader_index: surface_shader_name.map(|name| { surface_shader_index: surface_shader_name.map(|name| {
*self.surface_shader_map.get(name).expect(&format!( *self.surface_shader_map
"Unknown surface shader '{}'.", .get(name)
name .expect(&format!("Unknown surface shader '{}'.", name))
))
}), }),
id: self.instances.len(), id: self.instances.len(),
transform_indices: xforms.map( transform_indices: xforms
|xf| (self.xforms.len(), self.xforms.len() + xf.len()), .map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())),
),
} }
}; };
@ -337,11 +327,9 @@ impl<'a> AssemblyBuilder<'a> {
} }
} }
InstanceType::Assembly => { InstanceType::Assembly => self.assemblies[inst.data_index]
self.assemblies[inst.data_index] .light_accel
.light_accel .approximate_energy(),
.approximate_energy()
}
}; };
(bounds, energy) (bounds, energy)
}); });
@ -358,7 +346,6 @@ impl<'a> AssemblyBuilder<'a> {
} }
} }
/// Returns a pair of vectors with the bounds of all instances. /// Returns a pair of vectors with the bounds of all instances.
/// This is used for building the assembly's BVH4. /// This is used for building the assembly's BVH4.
fn instance_bounds(&self) -> (Vec<usize>, Vec<BBox>) { fn instance_bounds(&self) -> (Vec<usize>, Vec<BBox>) {
@ -405,15 +392,12 @@ impl<'a> AssemblyBuilder<'a> {
} }
} }
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub enum Object<'a> { pub enum Object<'a> {
Surface(&'a Surface), Surface(&'a Surface),
SurfaceLight(&'a SurfaceLight), SurfaceLight(&'a SurfaceLight),
} }
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct Instance { pub struct Instance {
pub instance_type: InstanceType, pub instance_type: InstanceType,

View File

@ -2,6 +2,6 @@ mod assembly;
mod scene; mod scene;
mod world; mod world;
pub use self::assembly::{Assembly, AssemblyBuilder, Object, InstanceType}; pub use self::assembly::{Assembly, AssemblyBuilder, InstanceType, Object};
pub use self::scene::{Scene, SceneLightSample}; pub use self::scene::{Scene, SceneLightSample};
pub use self::world::World; pub use self::world::World;

View File

@ -2,14 +2,13 @@ use accel::LightAccel;
use algorithm::weighted_choice; use algorithm::weighted_choice;
use camera::Camera; use camera::Camera;
use color::SpectralSample; use color::SpectralSample;
use math::{Vector, Normal, Point}; use math::{Normal, Point, Vector};
use surface::SurfaceIntersection; use surface::SurfaceIntersection;
use transform_stack::TransformStack; use transform_stack::TransformStack;
use super::Assembly; use super::Assembly;
use super::World; use super::World;
#[derive(Debug)] #[derive(Debug)]
pub struct Scene<'a> { pub struct Scene<'a> {
pub name: Option<String>, pub name: Option<String>,
@ -35,9 +34,11 @@ impl<'a> Scene<'a> {
// Calculate relative probabilities of traversing into world lights // Calculate relative probabilities of traversing into world lights
// or local lights. // or local lights.
let wl_energy = if self.world.lights.iter().fold(0.0, |energy, light| { let wl_energy = if self.world
energy + light.approximate_energy() .lights
}) <= 0.0 .iter()
.fold(0.0, |energy, light| energy + light.approximate_energy())
<= 0.0
{ {
0.0 0.0
} else { } else {
@ -73,14 +74,8 @@ impl<'a> Scene<'a> {
let n = (n - wl_prob) / (1.0 - wl_prob); let n = (n - wl_prob) / (1.0 - wl_prob);
if let Some((ss, sgeo, pdf, spdf)) = if let Some((ss, sgeo, pdf, spdf)) =
self.root.sample_lights( self.root
xform_stack, .sample_lights(xform_stack, n, uvw, wavelength, time, intr)
n,
uvw,
wavelength,
time,
intr,
)
{ {
return SceneLightSample::Surface { return SceneLightSample::Surface {
color: ss, color: ss,
@ -96,7 +91,6 @@ impl<'a> Scene<'a> {
} }
} }
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub enum SceneLightSample { pub enum SceneLightSample {
None, None,

View File

@ -2,8 +2,8 @@ pub mod surface_closure;
use std::fmt::Debug; use std::fmt::Debug;
use color::{XYZ, Color}; use color::{Color, XYZ};
use self::surface_closure::{SurfaceClosureUnion, EmitClosure, LambertClosure, GTRClosure}; use self::surface_closure::{EmitClosure, GTRClosure, LambertClosure, SurfaceClosureUnion};
use surface::SurfaceIntersectionData; use surface::SurfaceIntersectionData;
/// Trait for surface shaders. /// Trait for surface shaders.
@ -31,8 +31,12 @@ pub trait SurfaceShader: Debug + Sync {
/// building. /// building.
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub enum SimpleSurfaceShader { pub enum SimpleSurfaceShader {
Emit { color: XYZ }, Emit {
Lambert { color: XYZ }, color: XYZ,
},
Lambert {
color: XYZ,
},
GTR { GTR {
color: XYZ, color: XYZ,
roughness: f32, roughness: f32,
@ -51,29 +55,23 @@ impl SurfaceShader for SimpleSurfaceShader {
let _ = (data, time); // Silence "unused" compiler warning let _ = (data, time); // Silence "unused" compiler warning
match *self { match *self {
SimpleSurfaceShader::Emit { color } => { SimpleSurfaceShader::Emit { color } => SurfaceClosureUnion::EmitClosure(
SurfaceClosureUnion::EmitClosure( EmitClosure::new(color.to_spectral_sample(wavelength)),
EmitClosure::new(color.to_spectral_sample(wavelength)), ),
) SimpleSurfaceShader::Lambert { color } => SurfaceClosureUnion::LambertClosure(
} LambertClosure::new(color.to_spectral_sample(wavelength)),
SimpleSurfaceShader::Lambert { color } => { ),
SurfaceClosureUnion::LambertClosure(
LambertClosure::new(color.to_spectral_sample(wavelength)),
)
}
SimpleSurfaceShader::GTR { SimpleSurfaceShader::GTR {
color, color,
roughness, roughness,
tail_shape, tail_shape,
fresnel, fresnel,
} => { } => SurfaceClosureUnion::GTRClosure(GTRClosure::new(
SurfaceClosureUnion::GTRClosure(GTRClosure::new( color.to_spectral_sample(wavelength),
color.to_spectral_sample(wavelength), roughness,
roughness, tail_shape,
tail_shape, fresnel,
fresnel, )),
))
}
} }
} }
} }

View File

@ -3,11 +3,10 @@
use std::f32::consts::PI as PI_32; use std::f32::consts::PI as PI_32;
use color::SpectralSample; use color::SpectralSample;
use math::{Vector, Normal, dot, clamp, zup_to_vec}; use math::{clamp, dot, zup_to_vec, Normal, Vector};
use sampling::cosine_sample_hemisphere; use sampling::cosine_sample_hemisphere;
use lerp::lerp; use lerp::lerp;
const INV_PI: f32 = 1.0 / PI_32; const INV_PI: f32 = 1.0 / PI_32;
const H_PI: f32 = PI_32 / 2.0; const H_PI: f32 = PI_32 / 2.0;
@ -90,7 +89,6 @@ pub trait SurfaceClosure {
) -> f32; ) -> f32;
} }
/// Utility function that calculates the fresnel reflection factor of a given /// Utility function that calculates the fresnel reflection factor of a given
/// incoming ray against a surface with the given ior outside/inside ratio. /// incoming ray against a surface with the given ior outside/inside ratio.
/// ///
@ -114,7 +112,6 @@ fn dielectric_fresnel(ior_ratio: f32, c: f32) -> f32 {
0.5 * f3 * f6 0.5 * f3 * f6
} }
/// Schlick's approximation of the fresnel reflection factor. /// Schlick's approximation of the fresnel reflection factor.
/// ///
/// Same interface as `dielectric_fresnel()`, above. /// Same interface as `dielectric_fresnel()`, above.
@ -128,7 +125,6 @@ fn schlick_fresnel(ior_ratio: f32, c: f32) -> f32 {
f2 + ((1.0 - f2) * c1 * c2 * c2) f2 + ((1.0 - f2) * c1 * c2 * c2)
} }
/// Utility function that calculates the fresnel reflection factor of a given /// Utility function that calculates the fresnel reflection factor of a given
/// incoming ray against a surface with the given normal-reflectance factor. /// incoming ray against a surface with the given normal-reflectance factor.
/// ///
@ -154,7 +150,6 @@ fn dielectric_fresnel_from_fac(fresnel_fac: f32, c: f32) -> f32 {
dielectric_fresnel(ior_ratio, c) dielectric_fresnel(ior_ratio, c)
} }
/// Schlick's approximation version of `dielectric_fresnel_from_fac()` above. /// Schlick's approximation version of `dielectric_fresnel_from_fac()` above.
#[allow(dead_code)] #[allow(dead_code)]
fn schlick_fresnel_from_fac(frensel_fac: f32, c: f32) -> f32 { fn schlick_fresnel_from_fac(frensel_fac: f32, c: f32) -> f32 {
@ -163,7 +158,6 @@ fn schlick_fresnel_from_fac(frensel_fac: f32, c: f32) -> f32 {
frensel_fac + ((1.0 - frensel_fac) * c1 * c2 * c2) frensel_fac + ((1.0 - frensel_fac) * c1 * c2 * c2)
} }
/// Emit closure. /// Emit closure.
/// ///
/// NOTE: this needs to be handled specially by the integrator! It does not /// NOTE: this needs to be handled specially by the integrator! It does not
@ -228,7 +222,6 @@ impl SurfaceClosure for EmitClosure {
} }
} }
/// Lambertian surface closure /// Lambertian surface closure
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct LambertClosure { pub struct LambertClosure {
@ -368,7 +361,6 @@ impl SurfaceClosure for LambertClosure {
} }
} }
/// The GTR microfacet BRDF from the Disney Principled BRDF paper. /// The GTR microfacet BRDF from the Disney Principled BRDF paper.
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct GTRClosure { pub struct GTRClosure {
@ -438,8 +430,8 @@ impl GTRClosure {
let roughness2 = self.roughness * self.roughness; let roughness2 = self.roughness * self.roughness;
// Calculate top half of equation // Calculate top half of equation
let top = 1.0 - let top = 1.0
((roughness2.powf(1.0 - self.tail_shape) * (1.0 - u)) + u) - ((roughness2.powf(1.0 - self.tail_shape) * (1.0 - u)) + u)
.powf(1.0 / (1.0 - self.tail_shape)); .powf(1.0 / (1.0 - self.tail_shape));
// Calculate bottom half of equation // Calculate bottom half of equation
@ -470,7 +462,6 @@ impl SurfaceClosure for GTRClosure {
self.roughness == 0.0 self.roughness == 0.0
} }
fn sample( fn sample(
&self, &self,
inc: Vector, inc: Vector,
@ -505,7 +496,6 @@ impl SurfaceClosure for GTRClosure {
} }
} }
fn evaluate(&self, inc: Vector, out: Vector, nor: Normal, nor_g: Normal) -> SpectralSample { fn evaluate(&self, inc: Vector, out: Vector, nor: Normal, nor_g: Normal) -> SpectralSample {
// Calculate needed vectors, normalized // Calculate needed vectors, normalized
let aa = -inc.normalized(); // Vector pointing to where "in" came from let aa = -inc.normalized(); // Vector pointing to where "in" came from
@ -605,7 +595,6 @@ impl SurfaceClosure for GTRClosure {
} }
} }
fn sample_pdf(&self, inc: Vector, out: Vector, nor: Normal, nor_g: Normal) -> f32 { fn sample_pdf(&self, inc: Vector, out: Vector, nor: Normal, nor_g: Normal) -> f32 {
// Calculate needed vectors, normalized // Calculate needed vectors, normalized
let aa = -inc.normalized(); // Vector pointing to where "in" came from let aa = -inc.normalized(); // Vector pointing to where "in" came from
@ -630,7 +619,6 @@ impl SurfaceClosure for GTRClosure {
self.dist(nh, self.roughness) * INV_PI self.dist(nh, self.roughness) * INV_PI
} }
fn estimate_eval_over_sphere_light( fn estimate_eval_over_sphere_light(
&self, &self,
inc: Vector, inc: Vector,

View File

@ -6,12 +6,11 @@ pub mod triangle_mesh;
use std::fmt::Debug; use std::fmt::Debug;
use boundable::Boundable; use boundable::Boundable;
use math::{Point, Vector, Normal, Matrix4x4}; use math::{Matrix4x4, Normal, Point, Vector};
use ray::{Ray, AccelRay}; use ray::{AccelRay, Ray};
use shading::surface_closure::SurfaceClosureUnion; use shading::surface_closure::SurfaceClosureUnion;
use shading::SurfaceShader; use shading::SurfaceShader;
pub trait Surface: Boundable + Debug + Sync { pub trait Surface: Boundable + Debug + Sync {
fn intersect_rays( fn intersect_rays(
&self, &self,
@ -23,7 +22,6 @@ pub trait Surface: Boundable + Debug + Sync {
); );
} }
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub enum SurfaceIntersection { pub enum SurfaceIntersection {
Miss, Miss,
@ -34,17 +32,16 @@ pub enum SurfaceIntersection {
}, },
} }
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct SurfaceIntersectionData { pub struct SurfaceIntersectionData {
pub incoming: Vector, // Direction of the incoming ray pub incoming: Vector, // Direction of the incoming ray
pub pos: Point, // Position of the intersection pub pos: Point, // Position of the intersection
pub pos_err: f32, // Error magnitude of the intersection position. Imagine pub pos_err: f32, // Error magnitude of the intersection position. Imagine
// a cube centered around `pos` with dimensions of `2 * pos_err`. // a cube centered around `pos` with dimensions of `2 * pos_err`.
pub nor: Normal, // Shading normal pub nor: Normal, // Shading normal
pub nor_g: Normal, // True geometric normal pub nor_g: Normal, // True geometric normal
pub local_space: Matrix4x4, // Matrix from global space to local space pub local_space: Matrix4x4, // Matrix from global space to local space
pub t: f32, // Ray t-value at the intersection point pub t: f32, // Ray t-value at the intersection point
pub uv: (f32, f32), // 2d surface parameters pub uv: (f32, f32), // 2d surface parameters
pub sample_pdf: f32, // The PDF of getting this point by explicitly sampling the surface pub sample_pdf: f32, // The PDF of getting this point by explicitly sampling the surface
} }

View File

@ -4,7 +4,6 @@ use fp_utils::fp_gamma;
use math::Point; use math::Point;
use ray::Ray; use ray::Ray;
/// Intersects `ray` with `tri`, returning `Some((t, b0, b1, b2))`, or `None` /// Intersects `ray` with `tri`, returning `Some((t, b0, b1, b2))`, or `None`
/// if no intersection. /// if no intersection.
/// ///
@ -83,8 +82,8 @@ pub fn intersect_ray(ray: &Ray, tri: (Point, Point, Point)) -> Option<(f32, f32,
let t_scaled = (e0 * p0z) + (e1 * p1z) + (e2 * p2z); let t_scaled = (e0 * p0z) + (e1 * p1z) + (e2 * p2z);
// Check if the hitpoint t is within ray min/max t. // Check if the hitpoint t is within ray min/max t.
if (det > 0.0 && (t_scaled <= 0.0 || t_scaled > (ray.max_t * det))) || if (det > 0.0 && (t_scaled <= 0.0 || t_scaled > (ray.max_t * det)))
(det < 0.0 && (t_scaled >= 0.0 || t_scaled < (ray.max_t * det))) || (det < 0.0 && (t_scaled >= 0.0 || t_scaled < (ray.max_t * det)))
{ {
return None; return None;
} }
@ -115,8 +114,8 @@ pub fn intersect_ray(ray: &Ray, tri: (Point, Point, Point)) -> Option<(f32, f32,
// Calculate delta t // Calculate delta t
let max_e = max_abs_3(e0, e1, e2); let max_e = max_abs_3(e0, e1, e2);
let dt = 3.0 * ((fp_gamma(3) * max_e * max_zt) + (de * max_zt + dz * max_e)) * let dt =
inv_det.abs(); 3.0 * ((fp_gamma(3) * max_e * max_zt) + (de * max_zt + dz * max_e)) * inv_det.abs();
// Finally, do the check // Finally, do the check
if t <= dt { if t <= dt {
@ -133,12 +132,13 @@ pub fn intersect_ray(ray: &Ray, tri: (Point, Point, Point)) -> Option<(f32, f32,
/// ///
/// Returns the point and the error magnitude of the point. /// Returns the point and the error magnitude of the point.
pub fn surface_point(tri: (Point, Point, Point), bary: (f32, f32, f32)) -> (Point, f32) { pub fn surface_point(tri: (Point, Point, Point), bary: (f32, f32, f32)) -> (Point, f32) {
let pos = ((tri.0.into_vector() * bary.0) + (tri.1.into_vector() * bary.1) + let pos = ((tri.0.into_vector() * bary.0) + (tri.1.into_vector() * bary.1)
(tri.2.into_vector() * bary.2)) + (tri.2.into_vector() * bary.2))
.into_point(); .into_point();
let pos_err = (((tri.0.into_vector().abs() * bary.0) + (tri.1.into_vector().abs() * bary.1) + let pos_err = (((tri.0.into_vector().abs() * bary.0) + (tri.1.into_vector().abs() * bary.1)
(tri.2.into_vector().abs() * bary.2)) * fp_gamma(7)).co + (tri.2.into_vector().abs() * bary.2)) * fp_gamma(7))
.co
.h_max(); .h_max();
(pos, pos_err) (pos, pos_err)

View File

@ -6,14 +6,13 @@ use accel::BVH4;
use bbox::BBox; use bbox::BBox;
use boundable::Boundable; use boundable::Boundable;
use lerp::lerp_slice; use lerp::lerp_slice;
use math::{Point, Normal, Matrix4x4, dot, cross}; use math::{cross, dot, Matrix4x4, Normal, Point};
use ray::{Ray, AccelRay}; use ray::{AccelRay, Ray};
use shading::SurfaceShader; use shading::SurfaceShader;
use super::{Surface, SurfaceIntersection, SurfaceIntersectionData}; use super::{Surface, SurfaceIntersection, SurfaceIntersectionData};
use super::triangle; use super::triangle;
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct TriangleMesh<'a> { pub struct TriangleMesh<'a> {
time_sample_count: usize, time_sample_count: usize,
@ -94,8 +93,8 @@ impl<'a> TriangleMesh<'a> {
// Build BVH // Build BVH
let accel = BVH4::from_objects(arena, &mut indices[..], 3, |tri| { let accel = BVH4::from_objects(arena, &mut indices[..], 3, |tri| {
&bounds[(tri.3 as usize * time_sample_count).. &bounds
((tri.3 as usize + 1) * time_sample_count)] [(tri.3 as usize * time_sample_count)..((tri.3 as usize + 1) * time_sample_count)]
}); });
TriangleMesh { TriangleMesh {
@ -114,7 +113,6 @@ impl<'a> Boundable for TriangleMesh<'a> {
} }
} }
impl<'a> Surface for TriangleMesh<'a> { impl<'a> Surface for TriangleMesh<'a> {
fn intersect_rays( fn intersect_rays(
&self, &self,
@ -131,27 +129,25 @@ impl<'a> Surface for TriangleMesh<'a> {
Matrix4x4::new() Matrix4x4::new()
}; };
self.accel.traverse( self.accel
&mut accel_rays[..], .traverse(&mut accel_rays[..], self.indices, |tri_indices, rs| {
self.indices,
|tri_indices, rs| {
for r in rs { for r in rs {
let wr = &wrays[r.id as usize]; let wr = &wrays[r.id as usize];
// Get triangle // Get triangle
let tri = { let tri = {
let p0_slice = &self.vertices[(tri_indices.0 as usize * let p0_slice = &self.vertices[(tri_indices.0 as usize
self.time_sample_count).. * self.time_sample_count)
((tri_indices.0 as usize + 1) * ..((tri_indices.0 as usize + 1)
self.time_sample_count)]; * self.time_sample_count)];
let p1_slice = &self.vertices[(tri_indices.1 as usize * let p1_slice = &self.vertices[(tri_indices.1 as usize
self.time_sample_count).. * self.time_sample_count)
((tri_indices.1 as usize + 1) * ..((tri_indices.1 as usize + 1)
self.time_sample_count)]; * self.time_sample_count)];
let p2_slice = &self.vertices[(tri_indices.2 as usize * let p2_slice = &self.vertices[(tri_indices.2 as usize
self.time_sample_count).. * self.time_sample_count)
((tri_indices.2 as usize + 1) * ..((tri_indices.2 as usize + 1)
self.time_sample_count)]; * self.time_sample_count)];
let p0 = lerp_slice(p0_slice, wr.time); let p0 = lerp_slice(p0_slice, wr.time);
let p1 = lerp_slice(p1_slice, wr.time); let p1 = lerp_slice(p1_slice, wr.time);
@ -166,18 +162,20 @@ impl<'a> Surface for TriangleMesh<'a> {
if space.len() > 1 { if space.len() > 1 {
// Per-ray transform, for motion blur // Per-ray transform, for motion blur
let mat_space = lerp_slice(space, wr.time).inverse(); let mat_space = lerp_slice(space, wr.time).inverse();
(mat_space, ( (
tri.0 * mat_space, mat_space,
tri.1 * mat_space, (tri.0 * mat_space, tri.1 * mat_space, tri.2 * mat_space),
tri.2 * mat_space, )
))
} else { } else {
// Same transform for all rays // Same transform for all rays
(static_mat_space, ( (
tri.0 * static_mat_space, static_mat_space,
tri.1 * static_mat_space, (
tri.2 * static_mat_space, tri.0 * static_mat_space,
)) tri.1 * static_mat_space,
tri.2 * static_mat_space,
),
)
} }
} else { } else {
// No transforms // No transforms
@ -199,18 +197,18 @@ impl<'a> Surface for TriangleMesh<'a> {
// Calculate interpolated surface normal, if any // Calculate interpolated surface normal, if any
let shading_normal = if let Some(normals) = self.normals { let shading_normal = if let Some(normals) = self.normals {
let n0_slice = &normals[(tri_indices.0 as usize * let n0_slice = &normals[(tri_indices.0 as usize
self.time_sample_count).. * self.time_sample_count)
((tri_indices.0 as usize + 1) * ..((tri_indices.0 as usize + 1)
self.time_sample_count)]; * self.time_sample_count)];
let n1_slice = &normals[(tri_indices.1 as usize * let n1_slice = &normals[(tri_indices.1 as usize
self.time_sample_count).. * self.time_sample_count)
((tri_indices.1 as usize + 1) * ..((tri_indices.1 as usize + 1)
self.time_sample_count)]; * self.time_sample_count)];
let n2_slice = &normals[(tri_indices.2 as usize * let n2_slice = &normals[(tri_indices.2 as usize
self.time_sample_count).. * self.time_sample_count)
((tri_indices.2 as usize + 1) * ..((tri_indices.2 as usize + 1)
self.time_sample_count)]; * self.time_sample_count)];
let n0 = lerp_slice(n0_slice, wr.time).normalized(); let n0 = lerp_slice(n0_slice, wr.time).normalized();
let n1 = lerp_slice(n1_slice, wr.time).normalized(); let n1 = lerp_slice(n1_slice, wr.time).normalized();
@ -252,7 +250,6 @@ impl<'a> Surface for TriangleMesh<'a> {
} }
} }
} }
}, });
);
} }
} }

View File

@ -5,7 +5,6 @@ use std::time::Duration;
use time; use time;
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
pub struct Timer { pub struct Timer {
last_time: u64, last_time: u64,
@ -13,7 +12,9 @@ pub struct Timer {
impl Timer { impl Timer {
pub fn new() -> Timer { pub fn new() -> Timer {
Timer { last_time: time::precise_time_ns() } Timer {
last_time: time::precise_time_ns(),
}
} }
/// Marks a new tick time and returns the time elapsed in seconds since /// Marks a new tick time and returns the time elapsed in seconds since

View File

@ -2,13 +2,12 @@ use std::iter;
use algorithm::partition; use algorithm::partition;
use lerp::lerp_slice; use lerp::lerp_slice;
use ray::{Ray, AccelRay}; use ray::{AccelRay, Ray};
use scene::{Assembly, Object, InstanceType}; use scene::{Assembly, InstanceType, Object};
use surface::SurfaceIntersection; use surface::SurfaceIntersection;
use transform_stack::TransformStack; use transform_stack::TransformStack;
use shading::{SurfaceShader, SimpleSurfaceShader}; use shading::{SimpleSurfaceShader, SurfaceShader};
use color::{XYZ, rec709_to_xyz}; use color::{rec709_to_xyz, XYZ};
pub struct Tracer<'a> { pub struct Tracer<'a> {
rays: Vec<AccelRay>, rays: Vec<AccelRay>,
@ -31,9 +30,11 @@ impl<'a> Tracer<'a> {
self.rays.clear(); self.rays.clear();
self.rays.reserve(wrays.len()); self.rays.reserve(wrays.len());
let mut ids = 0..(wrays.len() as u32); let mut ids = 0..(wrays.len() as u32);
self.rays.extend(wrays.iter().map( self.rays.extend(
|wr| AccelRay::new(wr, ids.next().unwrap()), wrays
)); .iter()
.map(|wr| AccelRay::new(wr, ids.next().unwrap())),
);
self.inner.trace(wrays, &mut self.rays[..]) self.inner.trace(wrays, &mut self.rays[..])
} }
@ -50,12 +51,8 @@ impl<'a> TracerInner<'a> {
// Ready the isects // Ready the isects
self.isects.clear(); self.isects.clear();
self.isects.reserve(wrays.len()); self.isects.reserve(wrays.len());
self.isects.extend( self.isects
iter::repeat(SurfaceIntersection::Miss).take( .extend(iter::repeat(SurfaceIntersection::Miss).take(wrays.len()));
wrays
.len(),
),
);
let mut ray_sets = split_rays_by_direction(&mut rays[..]); let mut ray_sets = split_rays_by_direction(&mut rays[..]);
for ray_set in ray_sets.iter_mut().filter(|ray_set| !ray_set.is_empty()) { for ray_set in ray_sets.iter_mut().filter(|ray_set| !ray_set.is_empty()) {
@ -71,10 +68,9 @@ impl<'a> TracerInner<'a> {
wrays: &[Ray], wrays: &[Ray],
accel_rays: &mut [AccelRay], accel_rays: &mut [AccelRay],
) { ) {
assembly.object_accel.traverse( assembly
&mut accel_rays[..], .object_accel
&assembly.instances[..], .traverse(&mut accel_rays[..], &assembly.instances[..], |inst, rs| {
|inst, rs| {
// Transform rays if needed // Transform rays if needed
if let Some((xstart, xend)) = inst.transform_indices { if let Some((xstart, xend)) = inst.transform_indices {
// Push transforms to stack // Push transforms to stack
@ -130,9 +126,8 @@ impl<'a> TracerInner<'a> {
InstanceType::Object => { InstanceType::Object => {
self.trace_object( self.trace_object(
&assembly.objects[inst.data_index], &assembly.objects[inst.data_index],
inst.surface_shader_index.map( inst.surface_shader_index
|i| assembly.surface_shaders[i], .map(|i| assembly.surface_shaders[i]),
),
wrays, wrays,
ray_set, ray_set,
); );
@ -172,8 +167,7 @@ impl<'a> TracerInner<'a> {
} }
} }
} }
}, });
);
} }
fn trace_object<'b>( fn trace_object<'b>(
@ -217,7 +211,6 @@ impl<'a> TracerInner<'a> {
} }
} }
fn split_rays_by_direction(rays: &mut [AccelRay]) -> [&mut [AccelRay]; 8] { fn split_rays_by_direction(rays: &mut [AccelRay]) -> [&mut [AccelRay]; 8] {
// | | | | | | | | | // | | | | | | | | |
// s1 s2 s3 s4 s5 s6 s7 // s1 s2 s3 s4 s5 s6 s7

View File

@ -3,7 +3,6 @@ use std::cmp;
use algorithm::merge_slices_to; use algorithm::merge_slices_to;
use math::Matrix4x4; use math::Matrix4x4;
pub struct TransformStack { pub struct TransformStack {
stack: Vec<Matrix4x4>, stack: Vec<Matrix4x4>,
stack_indices: Vec<usize>, stack_indices: Vec<usize>,

View File

@ -34,10 +34,8 @@ fn main() {
perm.swap(0, 2); perm.swap(0, 2);
perm.swap(1, 3); perm.swap(1, 3);
} }
traversal_table[raydir].push( traversal_table[raydir]
perm[0] + (perm[1] << 2) + (perm[2] << 4) + .push(perm[0] + (perm[1] << 2) + (perm[2] << 4) + (perm[3] << 6));
(perm[3] << 6),
);
} }
} }
} }

View File

@ -19,9 +19,9 @@ include!(concat!(env!("OUT_DIR"), "/table_inc.rs"));
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub enum SplitAxes { pub enum SplitAxes {
Full((u8, u8, u8)), // top, left, right Full((u8, u8, u8)), // top, left, right
Left((u8, u8)), // top, left Left((u8, u8)), // top, left
Right((u8, u8)), // top, right Right((u8, u8)), // top, right
TopOnly(u8), // top TopOnly(u8), // top
} }
/// Calculates the traversal code for a BVH4 node based on the splits and /// Calculates the traversal code for a BVH4 node based on the splits and

View File

@ -3,7 +3,6 @@ use std::fs::File;
use std::io::Write; use std::io::Write;
use std::path::Path; use std::path::Path;
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
struct Chromaticities { struct Chromaticities {
r: (f64, f64), r: (f64, f64),
@ -12,7 +11,6 @@ struct Chromaticities {
w: (f64, f64), w: (f64, f64),
} }
fn main() { fn main() {
let out_dir = env::var("OUT_DIR").unwrap(); let out_dir = env::var("OUT_DIR").unwrap();
@ -77,10 +75,8 @@ fn main() {
} }
} }
/// Generates conversion functions for the given rgb to xyz transform matrix. /// Generates conversion functions for the given rgb to xyz transform matrix.
fn write_conversion_functions(space_name: &str, to_xyz: [[f64; 3]; 3], f: &mut File) { fn write_conversion_functions(space_name: &str, to_xyz: [[f64; 3]; 3], f: &mut File) {
f.write_all( f.write_all(
format!( format!(
r#" r#"
@ -185,7 +181,6 @@ pub fn xyz_to_{}_e(xyz: (f32, f32, f32)) -> (f32, f32, f32) {{
).unwrap(); ).unwrap();
} }
/// Port of the RGBtoXYZ function from the ACES CTL reference implementation. /// Port of the RGBtoXYZ function from the ACES CTL reference implementation.
/// See lib/IlmCtlMath/CtlColorSpace.cpp in the CTL reference implementation. /// See lib/IlmCtlMath/CtlColorSpace.cpp in the CTL reference implementation.
/// ///
@ -200,20 +195,20 @@ fn rgb_to_xyz(chroma: Chromaticities, y: f64) -> [[f64; 3]; 3] {
let z = (1.0 - chroma.w.0 - chroma.w.1) * y / chroma.w.1; let z = (1.0 - chroma.w.0 - chroma.w.1) * y / chroma.w.1;
// Scale factors for matrix rows // Scale factors for matrix rows
let d = chroma.r.0 * (chroma.b.1 - chroma.g.1) + chroma.b.0 * (chroma.g.1 - chroma.r.1) + let d = chroma.r.0 * (chroma.b.1 - chroma.g.1) + chroma.b.0 * (chroma.g.1 - chroma.r.1)
chroma.g.0 * (chroma.r.1 - chroma.b.1); + chroma.g.0 * (chroma.r.1 - chroma.b.1);
let sr = (x * (chroma.b.1 - chroma.g.1) - let sr = (x * (chroma.b.1 - chroma.g.1)
chroma.g.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z)) + - chroma.g.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z))
chroma.b.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z))) / d; + chroma.b.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z))) / d;
let sg = (x * (chroma.r.1 - chroma.b.1) + let sg = (x * (chroma.r.1 - chroma.b.1)
chroma.r.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z)) - + chroma.r.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z))
chroma.b.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d; - chroma.b.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
let sb = (x * (chroma.g.1 - chroma.r.1) - let sb = (x * (chroma.g.1 - chroma.r.1)
chroma.r.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z)) + - chroma.r.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z))
chroma.g.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d; + chroma.g.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
// Assemble the matrix // Assemble the matrix
let mut mat = [[0.0; 3]; 3]; let mut mat = [[0.0; 3]; 3];
@ -233,7 +228,6 @@ fn rgb_to_xyz(chroma: Chromaticities, y: f64) -> [[f64; 3]; 3] {
mat mat
} }
/// Chromatically adapts a matrix from `rgb_to_xyz` to a whitepoint of E. /// Chromatically adapts a matrix from `rgb_to_xyz` to a whitepoint of E.
/// ///
/// In other words, makes it so that RGB (1,1,1) maps to XYZ (1,1,1). /// In other words, makes it so that RGB (1,1,1) maps to XYZ (1,1,1).
@ -259,7 +253,6 @@ fn adapt_to_e(mat: [[f64; 3]; 3], y: f64) -> [[f64; 3]; 3] {
mat2 mat2
} }
/// Calculates the inverse of the given 3x3 matrix. /// Calculates the inverse of the given 3x3 matrix.
/// ///
/// Ported to Rust from `gjInverse()` in IlmBase's Imath/ImathMatrix.h /// Ported to Rust from `gjInverse()` in IlmBase's Imath/ImathMatrix.h

View File

@ -27,7 +27,6 @@ use std::fs::File;
use std::io::Write; use std::io::Write;
use std::path::Path; use std::path::Path;
/// How many components to generate. /// How many components to generate.
const NUM_DIMENSIONS: usize = 128; const NUM_DIMENSIONS: usize = 128;
@ -111,8 +110,7 @@ pub fn sample(dimension: u32, index: u32) -> f32 {{
format!( format!(
r#" r#"
{} => halton{}(index),"#, {} => halton{}(index),"#,
i, i, primes[i]
primes[i]
).as_bytes(), ).as_bytes(),
).unwrap(); ).unwrap();
} }
@ -127,7 +125,6 @@ pub fn sample(dimension: u32, index: u32) -> f32 {{
).as_bytes(), ).as_bytes(),
).unwrap(); ).unwrap();
// Write the special-cased first dimension // Write the special-cased first dimension
f.write_all( f.write_all(
format!( format!(
@ -196,9 +193,7 @@ fn halton{}(index: u32) -> f32 {{
format!( format!(
r#" r#"
return (unsafe{{*PERM{}.get_unchecked((index % {}) as usize)}} as u32 * {} +"#, return (unsafe{{*PERM{}.get_unchecked((index % {}) as usize)}} as u32 * {} +"#,
base, base, pow_base, power
pow_base,
power
).as_bytes(), ).as_bytes(),
).unwrap();; ).unwrap();;
@ -211,10 +206,7 @@ fn halton{}(index: u32) -> f32 {{
format!( format!(
r#" r#"
unsafe{{*PERM{}.get_unchecked(((index / {}) % {}) as usize)}} as u32 * {} +"#, unsafe{{*PERM{}.get_unchecked(((index / {}) % {}) as usize)}} as u32 * {} +"#,
base, base, div, pow_base, power
div,
pow_base,
power
).as_bytes(), ).as_bytes(),
).unwrap();; ).unwrap();;
} }
@ -235,7 +227,6 @@ fn halton{}(index: u32) -> f32 {{
} }
} }
/// Check primality. Not optimized, since it's not performance-critical. /// Check primality. Not optimized, since it's not performance-critical.
fn is_prime(p: usize) -> bool { fn is_prime(p: usize) -> bool {
for i in 2..p { for i in 2..p {
@ -271,10 +262,12 @@ fn get_faure_permutation(faure: &Vec<Vec<usize>>, b: usize) -> Vec<usize> {
let c = b / 2; let c = b / 2;
return (0..b) return (0..b)
.map(|i| if i < c { .map(|i| {
2 * faure[c][i] if i < c {
} else { 2 * faure[c][i]
2 * faure[c][i - c] + 1 } else {
2 * faure[c][i - c] + 1
}
}) })
.collect(); .collect();
} }

View File

@ -23,7 +23,6 @@ pub fn dot<T: DotProduct>(a: T, b: T) -> f32 {
a.dot(b) a.dot(b)
} }
/// Trait for calculating cross products. /// Trait for calculating cross products.
pub trait CrossProduct { pub trait CrossProduct {
#[inline] #[inline]

View File

@ -7,14 +7,12 @@ use float4::Float4;
use super::Point; use super::Point;
/// A 4x4 matrix, used for transforms /// A 4x4 matrix, used for transforms
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct Matrix4x4 { pub struct Matrix4x4 {
pub values: [Float4; 4], pub values: [Float4; 4],
} }
impl Matrix4x4 { impl Matrix4x4 {
/// Creates a new identity matrix /// Creates a new identity matrix
#[inline] #[inline]
@ -143,7 +141,6 @@ impl Matrix4x4 {
} }
} }
/// Returns the inverse of the Matrix /// Returns the inverse of the Matrix
#[inline] #[inline]
pub fn inverse(&self) -> Matrix4x4 { pub fn inverse(&self) -> Matrix4x4 {
@ -169,31 +166,44 @@ impl Matrix4x4 {
values: { values: {
[ [
Float4::new( Float4::new(
((self[1].get_1() * c5) - (self[1].get_2() * c4) + (self[1].get_3() * c3)) * invdet, ((self[1].get_1() * c5) - (self[1].get_2() * c4) + (self[1].get_3() * c3))
((-self[0].get_1() * c5) + (self[0].get_2() * c4) - (self[0].get_3() * c3)) * invdet, * invdet,
((self[3].get_1() * s5) - (self[3].get_2() * s4) + (self[3].get_3() * s3)) * invdet, ((-self[0].get_1() * c5) + (self[0].get_2() * c4) - (self[0].get_3() * c3))
((-self[2].get_1() * s5) + (self[2].get_2() * s4) - (self[2].get_3() * s3)) * invdet, * invdet,
((self[3].get_1() * s5) - (self[3].get_2() * s4) + (self[3].get_3() * s3))
* invdet,
((-self[2].get_1() * s5) + (self[2].get_2() * s4) - (self[2].get_3() * s3))
* invdet,
), ),
Float4::new( Float4::new(
((-self[1].get_0() * c5) + (self[1].get_2() * c2) - (self[1].get_3() * c1)) * invdet, ((-self[1].get_0() * c5) + (self[1].get_2() * c2) - (self[1].get_3() * c1))
((self[0].get_0() * c5) - (self[0].get_2() * c2) + (self[0].get_3() * c1)) * invdet, * invdet,
((-self[3].get_0() * s5) + (self[3].get_2() * s2) - (self[3].get_3() * s1)) * invdet, ((self[0].get_0() * c5) - (self[0].get_2() * c2) + (self[0].get_3() * c1))
((self[2].get_0() * s5) - (self[2].get_2() * s2) + (self[2].get_3() * s1)) * invdet, * invdet,
((-self[3].get_0() * s5) + (self[3].get_2() * s2) - (self[3].get_3() * s1))
* invdet,
((self[2].get_0() * s5) - (self[2].get_2() * s2) + (self[2].get_3() * s1))
* invdet,
), ),
Float4::new( Float4::new(
((self[1].get_0() * c4) - (self[1].get_1() * c2) + (self[1].get_3() * c0)) * invdet, ((self[1].get_0() * c4) - (self[1].get_1() * c2) + (self[1].get_3() * c0))
((-self[0].get_0() * c4) + (self[0].get_1() * c2) - (self[0].get_3() * c0)) * invdet, * invdet,
((self[3].get_0() * s4) - (self[3].get_1() * s2) + (self[3].get_3() * s0)) * invdet, ((-self[0].get_0() * c4) + (self[0].get_1() * c2) - (self[0].get_3() * c0))
((-self[2].get_0() * s4) + (self[2].get_1() * s2) - (self[2].get_3() * s0)) * invdet, * invdet,
((self[3].get_0() * s4) - (self[3].get_1() * s2) + (self[3].get_3() * s0))
* invdet,
((-self[2].get_0() * s4) + (self[2].get_1() * s2) - (self[2].get_3() * s0))
* invdet,
), ),
Float4::new( Float4::new(
((-self[1].get_0() * c3) + (self[1].get_1() * c1) - (self[1].get_2() * c0)) * invdet, ((-self[1].get_0() * c3) + (self[1].get_1() * c1) - (self[1].get_2() * c0))
((self[0].get_0() * c3) - (self[0].get_1() * c1) + (self[0].get_2() * c0)) * invdet, * invdet,
((-self[3].get_0() * s3) + (self[3].get_1() * s1) - (self[3].get_2() * s0)) * invdet, ((self[0].get_0() * c3) - (self[0].get_1() * c1) + (self[0].get_2() * c0))
((self[2].get_0() * s3) - (self[2].get_1() * s1) + (self[2].get_2() * s0)) * invdet, * invdet,
((-self[3].get_0() * s3) + (self[3].get_1() * s1) - (self[3].get_2() * s0))
* invdet,
((self[2].get_0() * s3) - (self[2].get_1() * s1) + (self[2].get_2() * s0))
* invdet,
), ),
] ]
}, },
@ -201,7 +211,6 @@ impl Matrix4x4 {
} }
} }
impl Index<usize> for Matrix4x4 { impl Index<usize> for Matrix4x4 {
type Output = Float4; type Output = Float4;
@ -211,7 +220,6 @@ impl Index<usize> for Matrix4x4 {
} }
} }
impl IndexMut<usize> for Matrix4x4 { impl IndexMut<usize> for Matrix4x4 {
#[inline(always)] #[inline(always)]
fn index_mut<'a>(&'a mut self, _index: usize) -> &'a mut Float4 { fn index_mut<'a>(&'a mut self, _index: usize) -> &'a mut Float4 {
@ -219,7 +227,6 @@ impl IndexMut<usize> for Matrix4x4 {
} }
} }
impl PartialEq for Matrix4x4 { impl PartialEq for Matrix4x4 {
#[inline] #[inline]
fn eq(&self, other: &Matrix4x4) -> bool { fn eq(&self, other: &Matrix4x4) -> bool {
@ -235,7 +242,6 @@ impl PartialEq for Matrix4x4 {
} }
} }
/// Multiply two matrices together /// Multiply two matrices together
impl Mul<Matrix4x4> for Matrix4x4 { impl Mul<Matrix4x4> for Matrix4x4 {
type Output = Matrix4x4; type Output = Matrix4x4;
@ -251,21 +257,18 @@ impl Mul<Matrix4x4> for Matrix4x4 {
(m[2] * other[0]).h_sum(), (m[2] * other[0]).h_sum(),
(m[3] * other[0]).h_sum(), (m[3] * other[0]).h_sum(),
), ),
Float4::new( Float4::new(
(m[0] * other[1]).h_sum(), (m[0] * other[1]).h_sum(),
(m[1] * other[1]).h_sum(), (m[1] * other[1]).h_sum(),
(m[2] * other[1]).h_sum(), (m[2] * other[1]).h_sum(),
(m[3] * other[1]).h_sum(), (m[3] * other[1]).h_sum(),
), ),
Float4::new( Float4::new(
(m[0] * other[2]).h_sum(), (m[0] * other[2]).h_sum(),
(m[1] * other[2]).h_sum(), (m[1] * other[2]).h_sum(),
(m[2] * other[2]).h_sum(), (m[2] * other[2]).h_sum(),
(m[3] * other[2]).h_sum(), (m[3] * other[2]).h_sum(),
), ),
Float4::new( Float4::new(
(m[0] * other[3]).h_sum(), (m[0] * other[3]).h_sum(),
(m[1] * other[3]).h_sum(), (m[1] * other[3]).h_sum(),
@ -277,10 +280,6 @@ impl Mul<Matrix4x4> for Matrix4x4 {
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -1,14 +1,13 @@
#![allow(dead_code)] #![allow(dead_code)]
use std::cmp::PartialEq; use std::cmp::PartialEq;
use std::ops::{Add, Sub, Mul, Div, Neg}; use std::ops::{Add, Div, Mul, Neg, Sub};
use float4::Float4; use float4::Float4;
use super::{DotProduct, CrossProduct}; use super::{CrossProduct, DotProduct};
use super::{Matrix4x4, Vector}; use super::{Matrix4x4, Vector};
/// A surface normal in 3d homogeneous space. /// A surface normal in 3d homogeneous space.
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct Normal { pub struct Normal {
@ -18,7 +17,9 @@ pub struct Normal {
impl Normal { impl Normal {
#[inline(always)] #[inline(always)]
pub fn new(x: f32, y: f32, z: f32) -> Normal { pub fn new(x: f32, y: f32, z: f32) -> Normal {
Normal { co: Float4::new(x, y, z, 0.0) } Normal {
co: Float4::new(x, y, z, 0.0),
}
} }
#[inline(always)] #[inline(always)]
@ -82,7 +83,6 @@ impl Normal {
} }
} }
impl PartialEq for Normal { impl PartialEq for Normal {
#[inline(always)] #[inline(always)]
fn eq(&self, other: &Normal) -> bool { fn eq(&self, other: &Normal) -> bool {
@ -90,33 +90,36 @@ impl PartialEq for Normal {
} }
} }
impl Add for Normal { impl Add for Normal {
type Output = Normal; type Output = Normal;
#[inline(always)] #[inline(always)]
fn add(self, other: Normal) -> Normal { fn add(self, other: Normal) -> Normal {
Normal { co: self.co + other.co } Normal {
co: self.co + other.co,
}
} }
} }
impl Sub for Normal { impl Sub for Normal {
type Output = Normal; type Output = Normal;
#[inline(always)] #[inline(always)]
fn sub(self, other: Normal) -> Normal { fn sub(self, other: Normal) -> Normal {
Normal { co: self.co - other.co } Normal {
co: self.co - other.co,
}
} }
} }
impl Mul<f32> for Normal { impl Mul<f32> for Normal {
type Output = Normal; type Output = Normal;
#[inline(always)] #[inline(always)]
fn mul(self, other: f32) -> Normal { fn mul(self, other: f32) -> Normal {
Normal { co: self.co * other } Normal {
co: self.co * other,
}
} }
} }
@ -137,17 +140,17 @@ impl Mul<Matrix4x4> for Normal {
} }
} }
impl Div<f32> for Normal { impl Div<f32> for Normal {
type Output = Normal; type Output = Normal;
#[inline(always)] #[inline(always)]
fn div(self, other: f32) -> Normal { fn div(self, other: f32) -> Normal {
Normal { co: self.co / other } Normal {
co: self.co / other,
}
} }
} }
impl Neg for Normal { impl Neg for Normal {
type Output = Normal; type Output = Normal;
@ -157,7 +160,6 @@ impl Neg for Normal {
} }
} }
impl DotProduct for Normal { impl DotProduct for Normal {
#[inline(always)] #[inline(always)]
fn dot(self, other: Normal) -> f32 { fn dot(self, other: Normal) -> f32 {
@ -165,7 +167,6 @@ impl DotProduct for Normal {
} }
} }
impl CrossProduct for Normal { impl CrossProduct for Normal {
#[inline] #[inline]
fn cross(self, other: Normal) -> Normal { fn cross(self, other: Normal) -> Normal {
@ -180,11 +181,10 @@ impl CrossProduct for Normal {
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use super::super::{Matrix4x4, CrossProduct, DotProduct}; use super::super::{CrossProduct, DotProduct, Matrix4x4};
#[test] #[test]
fn add() { fn add() {

View File

@ -1,14 +1,13 @@
#![allow(dead_code)] #![allow(dead_code)]
use std::cmp::PartialEq; use std::cmp::PartialEq;
use std::ops::{Add, Sub, Mul}; use std::ops::{Add, Mul, Sub};
use float4::Float4; use float4::Float4;
use super::Matrix4x4; use super::Matrix4x4;
use super::Vector; use super::Vector;
/// A position in 3d homogeneous space. /// A position in 3d homogeneous space.
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct Point { pub struct Point {
@ -18,14 +17,18 @@ pub struct Point {
impl Point { impl Point {
#[inline(always)] #[inline(always)]
pub fn new(x: f32, y: f32, z: f32) -> Point { pub fn new(x: f32, y: f32, z: f32) -> Point {
Point { co: Float4::new(x, y, z, 1.0) } Point {
co: Float4::new(x, y, z, 1.0),
}
} }
/// Returns the point in standardized coordinates, where the /// Returns the point in standardized coordinates, where the
/// fourth homogeneous component has been normalized to 1.0. /// fourth homogeneous component has been normalized to 1.0.
#[inline(always)] #[inline(always)]
pub fn norm(&self) -> Point { pub fn norm(&self) -> Point {
Point { co: self.co / self.co.get_3() } Point {
co: self.co / self.co.get_3(),
}
} }
#[inline(always)] #[inline(always)]
@ -33,7 +36,9 @@ impl Point {
let n1 = self.norm(); let n1 = self.norm();
let n2 = other.norm(); let n2 = other.norm();
Point { co: n1.co.v_min(n2.co) } Point {
co: n1.co.v_min(n2.co),
}
} }
#[inline(always)] #[inline(always)]
@ -41,7 +46,9 @@ impl Point {
let n1 = self.norm(); let n1 = self.norm();
let n2 = other.norm(); let n2 = other.norm();
Point { co: n1.co.v_max(n2.co) } Point {
co: n1.co.v_max(n2.co),
}
} }
#[inline(always)] #[inline(always)]
@ -90,7 +97,6 @@ impl Point {
} }
} }
impl PartialEq for Point { impl PartialEq for Point {
#[inline(always)] #[inline(always)]
fn eq(&self, other: &Point) -> bool { fn eq(&self, other: &Point) -> bool {
@ -98,23 +104,25 @@ impl PartialEq for Point {
} }
} }
impl Add<Vector> for Point { impl Add<Vector> for Point {
type Output = Point; type Output = Point;
#[inline(always)] #[inline(always)]
fn add(self, other: Vector) -> Point { fn add(self, other: Vector) -> Point {
Point { co: self.co + other.co } Point {
co: self.co + other.co,
}
} }
} }
impl Sub for Point { impl Sub for Point {
type Output = Vector; type Output = Vector;
#[inline(always)] #[inline(always)]
fn sub(self, other: Point) -> Vector { fn sub(self, other: Point) -> Vector {
Vector { co: self.norm().co - other.norm().co } Vector {
co: self.norm().co - other.norm().co,
}
} }
} }
@ -123,7 +131,9 @@ impl Sub<Vector> for Point {
#[inline(always)] #[inline(always)]
fn sub(self, other: Vector) -> Point { fn sub(self, other: Vector) -> Point {
Point { co: self.co - other.co } Point {
co: self.co - other.co,
}
} }
} }
@ -143,11 +153,10 @@ impl Mul<Matrix4x4> for Point {
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use super::super::{Vector, Matrix4x4}; use super::super::{Matrix4x4, Vector};
#[test] #[test]
fn norm() { fn norm() {

View File

@ -1,13 +1,12 @@
#![allow(dead_code)] #![allow(dead_code)]
use std::cmp::PartialEq; use std::cmp::PartialEq;
use std::ops::{Add, Sub, Mul, Div, Neg}; use std::ops::{Add, Div, Mul, Neg, Sub};
use float4::Float4; use float4::Float4;
use super::{DotProduct, CrossProduct}; use super::{CrossProduct, DotProduct};
use super::{Matrix4x4, Point, Normal}; use super::{Matrix4x4, Normal, Point};
/// A direction vector in 3d homogeneous space. /// A direction vector in 3d homogeneous space.
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
@ -18,7 +17,9 @@ pub struct Vector {
impl Vector { impl Vector {
#[inline(always)] #[inline(always)]
pub fn new(x: f32, y: f32, z: f32) -> Vector { pub fn new(x: f32, y: f32, z: f32) -> Vector {
Vector { co: Float4::new(x, y, z, 0.0) } Vector {
co: Float4::new(x, y, z, 0.0),
}
} }
#[inline(always)] #[inline(always)]
@ -92,7 +93,6 @@ impl Vector {
} }
} }
impl PartialEq for Vector { impl PartialEq for Vector {
#[inline(always)] #[inline(always)]
fn eq(&self, other: &Vector) -> bool { fn eq(&self, other: &Vector) -> bool {
@ -100,37 +100,39 @@ impl PartialEq for Vector {
} }
} }
impl Add for Vector { impl Add for Vector {
type Output = Vector; type Output = Vector;
#[inline(always)] #[inline(always)]
fn add(self, other: Vector) -> Vector { fn add(self, other: Vector) -> Vector {
Vector { co: self.co + other.co } Vector {
co: self.co + other.co,
}
} }
} }
impl Sub for Vector { impl Sub for Vector {
type Output = Vector; type Output = Vector;
#[inline(always)] #[inline(always)]
fn sub(self, other: Vector) -> Vector { fn sub(self, other: Vector) -> Vector {
Vector { co: self.co - other.co } Vector {
co: self.co - other.co,
}
} }
} }
impl Mul<f32> for Vector { impl Mul<f32> for Vector {
type Output = Vector; type Output = Vector;
#[inline(always)] #[inline(always)]
fn mul(self, other: f32) -> Vector { fn mul(self, other: f32) -> Vector {
Vector { co: self.co * other } Vector {
co: self.co * other,
}
} }
} }
impl Mul<Matrix4x4> for Vector { impl Mul<Matrix4x4> for Vector {
type Output = Vector; type Output = Vector;
@ -147,17 +149,17 @@ impl Mul<Matrix4x4> for Vector {
} }
} }
impl Div<f32> for Vector { impl Div<f32> for Vector {
type Output = Vector; type Output = Vector;
#[inline(always)] #[inline(always)]
fn div(self, other: f32) -> Vector { fn div(self, other: f32) -> Vector {
Vector { co: self.co / other } Vector {
co: self.co / other,
}
} }
} }
impl Neg for Vector { impl Neg for Vector {
type Output = Vector; type Output = Vector;
@ -167,7 +169,6 @@ impl Neg for Vector {
} }
} }
impl DotProduct for Vector { impl DotProduct for Vector {
#[inline(always)] #[inline(always)]
fn dot(self, other: Vector) -> f32 { fn dot(self, other: Vector) -> f32 {
@ -175,7 +176,6 @@ impl DotProduct for Vector {
} }
} }
impl CrossProduct for Vector { impl CrossProduct for Vector {
#[inline] #[inline]
fn cross(self, other: Vector) -> Vector { fn cross(self, other: Vector) -> Vector {
@ -190,11 +190,10 @@ impl CrossProduct for Vector {
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use super::super::{Matrix4x4, CrossProduct, DotProduct}; use super::super::{CrossProduct, DotProduct, Matrix4x4};
#[test] #[test]
fn add() { fn add() {

View File

@ -1,6 +1,6 @@
use std::slice; use std::slice;
use std::cell::{Cell, RefCell}; use std::cell::{Cell, RefCell};
use std::mem::{size_of, align_of}; use std::mem::{align_of, size_of};
use std::cmp::max; use std::cmp::max;
const GROWTH_FRACTION: usize = 8; // 1/N (smaller number leads to bigger allocations) const GROWTH_FRACTION: usize = 8; // 1/N (smaller number leads to bigger allocations)
@ -261,10 +261,8 @@ impl MemArena {
unsafe fn alloc_raw(&self, size: usize, alignment: usize) -> *mut u8 { unsafe fn alloc_raw(&self, size: usize, alignment: usize) -> *mut u8 {
assert!(alignment > 0); assert!(alignment > 0);
self.stat_space_allocated.set( self.stat_space_allocated
self.stat_space_allocated.get() + .set(self.stat_space_allocated.get() + size); // Update stats
size,
); // Update stats
let mut blocks = self.blocks.borrow_mut(); let mut blocks = self.blocks.borrow_mut();
@ -302,21 +300,22 @@ impl MemArena {
}; };
let waste_percentage = { let waste_percentage = {
let w1 = ((blocks[0].capacity() - blocks[0].len()) * 100) / let w1 =
blocks[0].capacity(); ((blocks[0].capacity() - blocks[0].len()) * 100) / blocks[0].capacity();
let w2 = ((self.stat_space_occupied.get() - self.stat_space_allocated.get()) * let w2 = ((self.stat_space_occupied.get() - self.stat_space_allocated.get())
100) / * 100) / self.stat_space_occupied.get();
self.stat_space_occupied.get(); if w1 < w2 {
if w1 < w2 { w1 } else { w2 } w1
} else {
w2
}
}; };
// If it's a "large allocation", give it its own memory block. // If it's a "large allocation", give it its own memory block.
if (size + alignment) > next_size || waste_percentage > self.max_waste_percentage { if (size + alignment) > next_size || waste_percentage > self.max_waste_percentage {
// Update stats // Update stats
self.stat_space_occupied.set( self.stat_space_occupied
self.stat_space_occupied.get() + size + alignment - .set(self.stat_space_occupied.get() + size + alignment - 1);
1,
);
blocks.push(Vec::with_capacity(size + alignment - 1)); blocks.push(Vec::with_capacity(size + alignment - 1));
blocks.last_mut().unwrap().set_len(size + alignment - 1); blocks.last_mut().unwrap().set_len(size + alignment - 1);
@ -330,10 +329,8 @@ impl MemArena {
// Otherwise create a new shared block. // Otherwise create a new shared block.
else { else {
// Update stats // Update stats
self.stat_space_occupied.set( self.stat_space_occupied
self.stat_space_occupied.get() + .set(self.stat_space_occupied.get() + next_size);
next_size,
);
blocks.push(Vec::with_capacity(next_size)); blocks.push(Vec::with_capacity(next_size));
let block_count = blocks.len(); let block_count = blocks.len();

View File

@ -23,7 +23,7 @@
mod matrices; mod matrices;
pub use matrices::NUM_DIMENSIONS; pub use matrices::NUM_DIMENSIONS;
use matrices::{SIZE, MATRICES}; use matrices::{MATRICES, SIZE};
/// Compute one component of the Sobol'-sequence, where the component /// Compute one component of the Sobol'-sequence, where the component
/// corresponds to the dimension parameter, and the index specifies /// corresponds to the dimension parameter, and the index specifies