Run new rustfmt on codebase.

This commit is contained in:
Nathan Vegdahl 2018-03-04 13:00:55 -08:00
parent f39589ab72
commit 97d3304149
56 changed files with 719 additions and 837 deletions

8
Cargo.lock generated
View File

@ -1,7 +1,3 @@
[root]
name = "spectra_xyz"
version = "0.1.0"
[[package]]
name = "ansi_term"
version = "0.9.0"
@ -207,6 +203,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
name = "sobol"
version = "0.1.0"
[[package]]
name = "spectra_xyz"
version = "0.1.0"
[[package]]
name = "strsim"
version = "0.6.0"

View File

@ -15,7 +15,6 @@ use super::bvh_base::{BVHBase, BVHBaseNode, BVH_MAX_DEPTH};
use super::ACCEL_TRAV_TIME;
use super::ACCEL_NODE_RAY_TESTS;
#[derive(Copy, Clone, Debug)]
pub struct BVH<'a> {
root: Option<&'a BVHNode<'a>>,
@ -175,10 +174,8 @@ impl<'a> BVH<'a> {
} => {
let mut node = unsafe { arena.alloc_uninitialized_with_alignment::<BVHNode>(32) };
let bounds = arena.copy_slice_with_alignment(
&base.bounds[bounds_range.0..bounds_range.1],
32,
);
let bounds = arena
.copy_slice_with_alignment(&base.bounds[bounds_range.0..bounds_range.1], 32);
let child1 = BVH::construct_from_base(arena, base, children_indices.0);
let child2 = BVH::construct_from_base(arena, base, children_indices.1);
@ -219,20 +216,18 @@ impl<'a> Boundable for BVH<'a> {
fn bounds(&self) -> &[BBox] {
match self.root {
None => &DEGENERATE_BOUNDS[..],
Some(root) => {
match *root {
BVHNode::Internal {
bounds_start,
bounds_len,
..
} |
BVHNode::Leaf {
bounds_start,
bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
Some(root) => match *root {
BVHNode::Internal {
bounds_start,
bounds_len,
..
}
}
| BVHNode::Leaf {
bounds_start,
bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
},
}
}
}

View File

@ -11,12 +11,11 @@ use lerp::lerp_slice;
use ray::AccelRay;
use timer::Timer;
use bvh_order::{TRAVERSAL_TABLE, SplitAxes, calc_traversal_code};
use bvh_order::{calc_traversal_code, SplitAxes, TRAVERSAL_TABLE};
use super::bvh_base::{BVHBase, BVHBaseNode, BVH_MAX_DEPTH};
use super::ACCEL_TRAV_TIME;
use super::ACCEL_NODE_RAY_TESTS;
#[derive(Copy, Clone, Debug)]
pub struct BVH4<'a> {
root: Option<&'a BVH4Node<'a>>,
@ -88,8 +87,8 @@ impl<'a> BVH4<'a> {
rays[0].dir_inv.y() < 0.0,
rays[0].dir_inv.z() < 0.0,
];
let ray_code = ray_sign_is_neg[0] as usize + ((ray_sign_is_neg[1] as usize) << 1) +
((ray_sign_is_neg[2] as usize) << 2);
let ray_code = ray_sign_is_neg[0] as usize + ((ray_sign_is_neg[1] as usize) << 1)
+ ((ray_sign_is_neg[2] as usize) << 2);
&TRAVERSAL_TABLE[ray_code]
};
@ -271,10 +270,8 @@ impl<'a> BVH4<'a> {
}
// Copy bounds
let bounds = arena.copy_slice_with_alignment(
&base.bounds[bounds_range.0..bounds_range.1],
32,
);
let bounds = arena
.copy_slice_with_alignment(&base.bounds[bounds_range.0..bounds_range.1], 32);
// Build children
let mut children_mem = unsafe {
@ -317,20 +314,18 @@ impl<'a> Boundable for BVH4<'a> {
fn bounds(&self) -> &[BBox] {
match self.root {
None => &DEGENERATE_BOUNDS[..],
Some(root) => {
match *root {
BVH4Node::Inner {
bounds_start,
bounds_len,
..
} |
BVH4Node::Leaf {
bounds_start,
bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
Some(root) => match *root {
BVH4Node::Inner {
bounds_start,
bounds_len,
..
}
}
| BVH4Node::Leaf {
bounds_start,
bounds_len,
..
} => unsafe { std::slice::from_raw_parts(bounds_start, bounds_len as usize) },
},
}
}
}

View File

@ -5,8 +5,7 @@ use bbox::BBox;
use lerp::lerp_slice;
use math::log2_64;
use super::objects_split::{sah_split, median_split};
use super::objects_split::{median_split, sah_split};
pub const BVH_MAX_DEPTH: usize = 42;
@ -41,8 +40,9 @@ pub enum BVHBaseNode {
impl BVHBaseNode {
pub fn bounds_range(&self) -> (usize, usize) {
match *self {
BVHBaseNode::Internal { bounds_range, .. } |
BVHBaseNode::Leaf { bounds_range, .. } => bounds_range,
BVHBaseNode::Internal { bounds_range, .. } | BVHBaseNode::Leaf { bounds_range, .. } => {
bounds_range
}
}
}
}
@ -119,13 +119,13 @@ impl BVHBase {
// We make sure that it's worth having multiple time samples, and if not
// we reduce to the union of the time samples.
self.acc_bounds(objects, bounder);
let union_bounds = self.bounds_cache.iter().fold(
BBox::new(),
|b1, b2| (b1 | *b2),
);
let average_area = self.bounds_cache.iter().fold(0.0, |area, bb| {
area + bb.surface_area()
}) / self.bounds_cache.len() as f32;
let union_bounds = self.bounds_cache
.iter()
.fold(BBox::new(), |b1, b2| (b1 | *b2));
let average_area = self.bounds_cache
.iter()
.fold(0.0, |area, bb| area + bb.surface_area())
/ self.bounds_cache.len() as f32;
if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) {
self.bounds.push(union_bounds);
} else {
@ -195,8 +195,8 @@ impl BVHBase {
// We make sure that it's worth having multiple time samples, and if not
// we reduce to the union of the time samples.
let union_bounds = merged.iter().fold(BBox::new(), |b1, b2| (b1 | *b2));
let average_area = merged.iter().fold(0.0, |area, bb| area + bb.surface_area()) /
merged.len() as f32;
let average_area = merged.iter().fold(0.0, |area, bb| area + bb.surface_area())
/ merged.len() as f32;
if union_bounds.surface_area() <= (average_area * USE_UNION_FACTOR) {
self.bounds.push(union_bounds);
} else {
@ -204,7 +204,6 @@ impl BVHBase {
}
}
// Set node
self.nodes[me] = BVHBaseNode::Internal {
bounds_range: (bi, self.bounds.len()),

View File

@ -1,7 +1,7 @@
use mem_arena::MemArena;
use bbox::BBox;
use math::{Vector, Point, Normal};
use math::{Normal, Point, Vector};
use shading::surface_closure::SurfaceClosure;
use super::LightAccel;

View File

@ -3,18 +3,17 @@ use mem_arena::MemArena;
use algorithm::merge_slices_append;
use bbox::BBox;
use lerp::lerp_slice;
use math::{Vector, Point, Normal};
use math::{Normal, Point, Vector};
use shading::surface_closure::SurfaceClosure;
use super::LightAccel;
use super::objects_split::sah_split;
const ARITY_LOG2: usize = 3; // Determines how much to collapse the binary tree,
// implicitly defining the light tree's arity. 1 = no collapsing, leave as binary
// tree.
// implicitly defining the light tree's arity. 1 = no collapsing, leave as binary
// tree.
const ARITY: usize = 1 << ARITY_LOG2; // Arity of the final tree
#[derive(Copy, Clone, Debug)]
pub struct LightTree<'a> {
root: Option<&'a Node<'a>>,
@ -38,15 +37,13 @@ enum Node<'a> {
impl<'a> Node<'a> {
fn bounds(&self) -> &'a [BBox] {
match *self {
Node::Inner { bounds, .. } |
Node::Leaf { bounds, .. } => bounds,
Node::Inner { bounds, .. } | Node::Leaf { bounds, .. } => bounds,
}
}
fn energy(&self) -> f32 {
match *self {
Node::Inner { energy, .. } |
Node::Leaf { energy, .. } => energy,
Node::Inner { energy, .. } | Node::Leaf { energy, .. } => energy,
}
}
@ -127,7 +124,6 @@ impl<'a> LightTree<'a> {
}
}
impl<'a> LightAccel for LightTree<'a> {
fn select(
&self,
@ -210,7 +206,6 @@ impl<'a> LightAccel for LightTree<'a> {
}
}
struct LightTreeBuilder {
nodes: Vec<BuilderNode>,
bounds: Vec<BBox>,

View File

@ -7,10 +7,10 @@ mod objects_split;
use std::cell::Cell;
use math::{Vector, Point, Normal};
use math::{Normal, Point, Vector};
use shading::surface_closure::SurfaceClosure;
pub use self::bvh::{BVH, BVHNode};
pub use self::bvh::{BVHNode, BVH};
pub use self::bvh4::{BVH4, BVH4Node};
pub use self::light_tree::LightTree;
pub use self::light_array::LightArray;

View File

@ -8,14 +8,12 @@ use halton;
use algorithm::{partition, quick_select};
use bbox::BBox;
use lerp::lerp_slice;
use math::{Vector, dot};
use math::{dot, Vector};
use sampling::uniform_sample_hemisphere;
const SAH_BIN_COUNT: usize = 13; // Prime numbers work best, for some reason
const SPLIT_PLANE_COUNT: usize = 5;
/// Takes a slice of boundable objects and partitions them based on the Surface
/// Area Heuristic, but using arbitrarily oriented planes.
///
@ -66,8 +64,8 @@ where
// Build SAH bins
let sah_bins = {
let mut sah_bins = [[(BBox::new(), BBox::new(), 0, 0); SAH_BIN_COUNT - 1];
SPLIT_PLANE_COUNT];
let mut sah_bins =
[[(BBox::new(), BBox::new(), 0, 0); SAH_BIN_COUNT - 1]; SPLIT_PLANE_COUNT];
for obj in objects.iter() {
let tb = lerp_slice(bounder(obj), 0.5);
let centroid = tb.center().into_vector();
@ -148,7 +146,6 @@ where
(split_i, approx_axis)
}
/// Takes a slice of boundable objects and partitions them based on the Surface
/// Area Heuristic.
///
@ -288,7 +285,6 @@ where
(split_i, split_axis)
}
/// Takes a slice of boundable objects and partitions them based on the median heuristic.
///
/// Returns the index of the partition boundary and the axis that it split on
@ -321,7 +317,11 @@ where
let place = {
let place = objects.len() / 2;
if place > 0 { place } else { 1 }
if place > 0 {
place
} else {
1
}
};
quick_select(objects, place, |a, b| {
let tb_a = lerp_slice(bounder(a), 0.5);

View File

@ -5,8 +5,7 @@ use std::cmp;
use std::cmp::Ordering;
use hash::hash_u64;
use lerp::{Lerp, lerp_slice};
use lerp::{lerp_slice, Lerp};
/// Selects an item from a slice based on a weighting function and a
/// number (n) between 0.0 and 1.0. Returns the index of the selected
@ -33,7 +32,6 @@ where
unreachable!()
}
/// Partitions a slice in-place with the given unary predicate, returning
/// the index of the first element for which the predicate evaluates
/// false.
@ -129,7 +127,6 @@ where
}
}
/// Partitions two slices in-place in concert based on the given unary
/// predicate, returning the index of the first element for which the
/// predicate evaluates false.
@ -167,8 +164,7 @@ where
((a1 as usize) - start) / std::mem::size_of::<A>(),
&mut *a1,
&mut *a2,
)
{
) {
break;
}
a1 = a1.offset(1);
@ -185,8 +181,7 @@ where
((b1 as usize) - start) / std::mem::size_of::<A>(),
&mut *b1,
&mut *b2,
)
{
) {
break;
}
}
@ -214,11 +209,10 @@ where
let i = left + (hash_u64(right as u64, seed) as usize % (right - left));
slc.swap(i, right - 1);
let ii = left +
{
let (val, list) = (&mut slc[left..right]).split_last_mut().unwrap();
partition(list, |n| order(n, val) == Ordering::Less)
};
let ii = left + {
let (val, list) = (&mut slc[left..right]).split_last_mut().unwrap();
partition(list, |n| order(n, val) == Ordering::Less)
};
slc.swap(ii, right - 1);
if ii == n {
@ -276,12 +270,10 @@ where
if slice1.is_empty() || slice2.is_empty() {
return;
} else if slice1.len() == slice2.len() {
for (xfo, (xf1, xf2)) in
Iterator::zip(
slice_out.iter_mut(),
Iterator::zip(slice1.iter(), slice2.iter()),
)
{
for (xfo, (xf1, xf2)) in Iterator::zip(
slice_out.iter_mut(),
Iterator::zip(slice1.iter(), slice2.iter()),
) {
*xfo = merge(xf1, xf2);
}
} else if slice1.len() > slice2.len() {
@ -305,12 +297,14 @@ mod tests {
use super::*;
fn quick_select_ints(list: &mut [i32], i: usize) {
quick_select(list, i, |a, b| if a < b {
Ordering::Less
} else if a == b {
Ordering::Equal
} else {
Ordering::Greater
quick_select(list, i, |a, b| {
if a < b {
Ordering::Less
} else if a == b {
Ordering::Equal
} else {
Ordering::Greater
}
});
}

View File

@ -5,10 +5,9 @@ use std::iter::Iterator;
use std::ops::{BitOr, BitOrAssign};
use lerp::{lerp, lerp_slice, Lerp};
use math::{Point, Matrix4x4, fast_minf32};
use math::{Matrix4x4, Point, fast_minf32};
use ray::AccelRay;
const BBOX_MAXT_ADJUST: f32 = 1.00000024;
/// A 3D axis-aligned bounding box.
@ -98,15 +97,18 @@ impl BBox {
}
}
/// Union of two `BBox`es.
impl BitOr for BBox {
type Output = BBox;
fn bitor(self, rhs: BBox) -> BBox {
BBox::from_points(
Point { co: self.min.co.v_min(rhs.min.co) },
Point { co: self.max.co.v_max(rhs.max.co) },
Point {
co: self.min.co.v_min(rhs.min.co),
},
Point {
co: self.max.co.v_max(rhs.max.co),
},
)
}
}
@ -123,8 +125,12 @@ impl BitOr<Point> for BBox {
fn bitor(self, rhs: Point) -> BBox {
BBox::from_points(
Point { co: self.min.co.v_min(rhs.co) },
Point { co: self.max.co.v_max(rhs.co) },
Point {
co: self.min.co.v_min(rhs.co),
},
Point {
co: self.max.co.v_max(rhs.co),
},
)
}
}
@ -135,7 +141,6 @@ impl BitOrAssign<Point> for BBox {
}
}
impl Lerp for BBox {
fn lerp(self, other: BBox, alpha: f32) -> BBox {
BBox {
@ -145,7 +150,6 @@ impl Lerp for BBox {
}
}
pub fn transform_bbox_slice_from(bbs_in: &[BBox], xforms: &[Matrix4x4], bbs_out: &mut Vec<BBox>) {
bbs_out.clear();

View File

@ -2,7 +2,6 @@
use bbox::BBox;
pub trait Boundable {
fn bounds(&self) -> &[BBox];
}

View File

@ -3,11 +3,10 @@
use mem_arena::MemArena;
use lerp::lerp_slice;
use math::{Vector, Point, Matrix4x4};
use math::{Matrix4x4, Point, Vector};
use ray::Ray;
use sampling::square_to_circle;
#[derive(Copy, Clone, Debug)]
pub struct Camera<'a> {
transforms: &'a [Matrix4x4],
@ -36,12 +35,12 @@ impl<'a> Camera<'a> {
if aperture_radii.is_empty() && !focus_distances.is_empty() {
println!(
"WARNING: camera has aperture radius but no focus distance. Disabling \
focal blur."
focal blur."
);
} else if !aperture_radii.is_empty() && focus_distances.is_empty() {
println!(
"WARNING: camera has focus distance but no aperture radius. Disabling \
focal blur."
focal blur."
);
}
}

View File

@ -1,4 +1,4 @@
use std::ops::{Add, AddAssign, Mul, MulAssign, Div, DivAssign};
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign};
use spectra_xyz::{spectrum_xyz_to_p, EQUAL_ENERGY_REFLECTANCE};
@ -6,8 +6,7 @@ use float4::Float4;
use lerp::Lerp;
use math::fast_exp;
pub use color_util::{xyz_to_rec709, xyz_to_rec709_e, rec709_to_xyz, rec709_e_to_xyz};
pub use color_util::{rec709_e_to_xyz, rec709_to_xyz, xyz_to_rec709, xyz_to_rec709_e};
// Minimum and maximum wavelength of light we care about, in nanometers
const WL_MIN: f32 = 380.0;
@ -38,7 +37,11 @@ pub trait Color {
fn nth_wavelength(hero_wavelength: f32, n: usize) -> f32 {
let wl = hero_wavelength + (WL_RANGE_Q * n as f32);
if wl > WL_MAX { wl - WL_RANGE } else { wl }
if wl > WL_MAX {
wl - WL_RANGE
} else {
wl
}
}
//----------------------------------------------------------------
@ -78,7 +81,11 @@ impl SpectralSample {
/// Returns the nth wavelength
fn wl_n(&self, n: usize) -> f32 {
let wl = self.hero_wavelength + (WL_RANGE_Q * n as f32);
if wl > WL_MAX { wl - WL_RANGE } else { wl }
if wl > WL_MAX {
wl - WL_RANGE
} else {
wl
}
}
}
@ -278,8 +285,8 @@ pub fn x_1931(wavelength: f32) -> f32 {
let t1 = (wavelength - 442.0) * (if wavelength < 442.0 { 0.0624 } else { 0.0374 });
let t2 = (wavelength - 599.8) * (if wavelength < 599.8 { 0.0264 } else { 0.0323 });
let t3 = (wavelength - 501.1) * (if wavelength < 501.1 { 0.0490 } else { 0.0382 });
(0.362 * fast_exp(-0.5 * t1 * t1)) + (1.056 * fast_exp(-0.5 * t2 * t2)) -
(0.065 * fast_exp(-0.5 * t3 * t3))
(0.362 * fast_exp(-0.5 * t1 * t1)) + (1.056 * fast_exp(-0.5 * t2 * t2))
- (0.065 * fast_exp(-0.5 * t3 * t3))
}
pub fn y_1931(wavelength: f32) -> f32 {

View File

@ -3,7 +3,7 @@
//! This is based on the work in section 3.9 of "Physically Based Rendering:
//! From Theory to Implementation" 3rd edition by Pharr et al.
use math::{Point, Vector, Normal, dot};
use math::{dot, Normal, Point, Vector};
#[inline(always)]
pub fn fp_gamma(n: u32) -> f32 {
@ -12,7 +12,6 @@ pub fn fp_gamma(n: u32) -> f32 {
(e * n as f32) / (1.0 - (e * n as f32))
}
pub fn increment_ulp(v: f32) -> f32 {
// Handle special cases
if (v.is_infinite() && v > 0.0) || v.is_nan() {
@ -30,7 +29,6 @@ pub fn increment_ulp(v: f32) -> f32 {
}
}
pub fn decrement_ulp(v: f32) -> f32 {
// Handle special cases
if (v.is_infinite() && v < 0.0) || v.is_nan() {
@ -53,7 +51,11 @@ pub fn robust_ray_origin(pos: Point, pos_err: f32, nor: Normal, ray_dir: Vector)
// direction as ray_dir.
let nor = {
let nor = nor.into_vector();
if dot(nor, ray_dir) >= 0.0 { nor } else { -nor }
if dot(nor, ray_dir) >= 0.0 {
nor
} else {
-nor
}
};
// Calculate offset point
@ -83,7 +85,6 @@ pub fn robust_ray_origin(pos: Point, pos_err: f32, nor: Normal, ray_dir: Vector)
Point::new(x, y, z)
}
#[inline(always)]
fn f32_to_bits(v: f32) -> u32 {
use std::mem::transmute_copy;
@ -96,7 +97,6 @@ fn bits_to_f32(bits: u32) -> f32 {
unsafe { transmute_copy::<u32, f32>(&bits) }
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -39,7 +39,6 @@ pub fn xy2d(x: u32, y: u32) -> u32 {
d
}
/// Convert hilbert curve index to (x,y).
///
/// d: The hilbert curve index.

View File

@ -14,8 +14,7 @@ use half::f16;
use png_encode_mini;
use openexr;
use color::{XYZ, xyz_to_rec709_e};
use color::{xyz_to_rec709_e, XYZ};
#[derive(Debug)]
pub struct Image {
@ -260,8 +259,8 @@ impl<'a> Drop for Bucket<'a> {
// Find matching bucket and remove it
let i = bucket_list.iter().position(|bucket| {
(bucket.0).0 == self.min.0 && (bucket.0).1 == self.min.1 &&
(bucket.1).0 == self.max.0 && (bucket.1).1 == self.max.1
(bucket.0).0 == self.min.0 && (bucket.0).1 == self.min.1 && (bucket.1).0 == self.max.0
&& (bucket.1).1 == self.max.1
});
bucket_list.swap_remove(i.unwrap());
}

View File

@ -8,7 +8,6 @@ pub trait Lerp {
fn lerp(self, other: Self, alpha: f32) -> Self;
}
/// Interpolates between two instances of a Lerp types.
pub fn lerp<T: Lerp>(a: T, b: T, alpha: f32) -> T {
debug_assert!(alpha >= 0.0);
@ -17,7 +16,6 @@ pub fn lerp<T: Lerp>(a: T, b: T, alpha: f32) -> T {
a.lerp(b, alpha)
}
/// Interpolates a slice of data as if each adjecent pair of elements
/// represent a linear segment.
pub fn lerp_slice<T: Lerp + Copy>(s: &[T], alpha: f32) -> T {
@ -58,7 +56,6 @@ where
}
}
impl Lerp for f32 {
fn lerp(self, other: f32, alpha: f32) -> f32 {
(self * (1.0 - alpha)) + (other * alpha)
@ -103,23 +100,22 @@ impl Lerp for Normal {
}
}
impl Lerp for Point {
fn lerp(self, other: Point, alpha: f32) -> Point {
let s = self.norm();
let o = other.norm();
Point { co: (s.co * (1.0 - alpha)) + (o.co * alpha) }
Point {
co: (s.co * (1.0 - alpha)) + (o.co * alpha),
}
}
}
impl Lerp for Vector {
fn lerp(self, other: Vector, alpha: f32) -> Vector {
(self * (1.0 - alpha)) + (other * alpha)
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -2,9 +2,9 @@ use std::f64::consts::PI as PI_64;
use mem_arena::MemArena;
use color::{XYZ, SpectralSample, Color};
use color::{Color, SpectralSample, XYZ};
use lerp::lerp_slice;
use math::{Vector, coordinate_system_from_vector};
use math::{coordinate_system_from_vector, Vector};
use sampling::{uniform_sample_cone, uniform_sample_cone_pdf};
use super::WorldLightSource;
@ -87,10 +87,10 @@ impl<'a> WorldLightSource for DistantDiskLight<'a> {
}
fn approximate_energy(&self) -> f32 {
let color: XYZ = self.colors.iter().fold(
XYZ::new(0.0, 0.0, 0.0),
|a, &b| a + b,
) / self.colors.len() as f32;
let color: XYZ = self.colors
.iter()
.fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b)
/ self.colors.len() as f32;
color.y
}
}

View File

@ -5,14 +5,13 @@ mod sphere_light;
use std::fmt::Debug;
use color::SpectralSample;
use math::{Vector, Normal, Point, Matrix4x4};
use math::{Matrix4x4, Normal, Point, Vector};
use surface::Surface;
pub use self::distant_disk_light::DistantDiskLight;
pub use self::rectangle_light::RectangleLight;
pub use self::sphere_light::SphereLight;
/// A finite light source that can be bounded in space.
pub trait SurfaceLight: Surface {
/// Samples the surface given a point to be illuminated.
@ -40,7 +39,6 @@ pub trait SurfaceLight: Surface {
time: f32,
) -> (SpectralSample, (Point, Normal, f32), f32);
/// Returns whether the light has a delta distribution.
///
/// If a light has no chance of a ray hitting it through random process
@ -48,7 +46,6 @@ pub trait SurfaceLight: Surface {
/// lights that only emit in a single direction, etc.
fn is_delta(&self) -> bool;
/// Returns an approximation of the total energy emitted by the surface.
///
/// Note: this does not need to be exact, but it does need to be non-zero
@ -57,7 +54,6 @@ pub trait SurfaceLight: Surface {
fn approximate_energy(&self) -> f32;
}
/// An infinite light source that cannot be bounded in space. E.g.
/// a sun light source.
pub trait WorldLightSource: Debug + Sync {
@ -78,7 +74,6 @@ pub trait WorldLightSource: Debug + Sync {
time: f32,
) -> (SpectralSample, Vector, f32);
/// Returns whether the light has a delta distribution.
///
/// If a light has no chance of a ray hitting it through random process
@ -86,7 +81,6 @@ pub trait WorldLightSource: Debug + Sync {
/// lights that only emit in a single direction, etc.
fn is_delta(&self) -> bool;
/// Returns an approximation of the total energy emitted by the light
/// source.
///

View File

@ -2,22 +2,20 @@ use mem_arena::MemArena;
use bbox::BBox;
use boundable::Boundable;
use color::{XYZ, SpectralSample, Color};
use color::{Color, SpectralSample, XYZ};
use lerp::lerp_slice;
use math::{Vector, Normal, Point, Matrix4x4, cross, dot};
use ray::{Ray, AccelRay};
use sampling::{spherical_triangle_solid_angle, uniform_sample_spherical_triangle,
triangle_surface_area, uniform_sample_triangle};
use shading::surface_closure::{SurfaceClosureUnion, EmitClosure};
use math::{cross, dot, Matrix4x4, Normal, Point, Vector};
use ray::{AccelRay, Ray};
use sampling::{spherical_triangle_solid_angle, triangle_surface_area,
uniform_sample_spherical_triangle, uniform_sample_triangle};
use shading::surface_closure::{EmitClosure, SurfaceClosureUnion};
use shading::SurfaceShader;
use surface::{Surface, SurfaceIntersection, SurfaceIntersectionData, triangle};
use surface::{triangle, Surface, SurfaceIntersection, SurfaceIntersectionData};
use super::SurfaceLight;
const SIMPLE_SAMPLING_THRESHOLD: f32 = 0.01;
#[derive(Copy, Clone, Debug)]
pub struct RectangleLight<'a> {
dimensions: &'a [(f32, f32)],
@ -33,11 +31,9 @@ impl<'a> RectangleLight<'a> {
) -> RectangleLight<'b> {
let bbs: Vec<_> = dimensions
.iter()
.map(|d| {
BBox {
min: Point::new(d.0 * -0.5, d.1 * -0.5, 0.0),
max: Point::new(d.0 * 0.5, d.1 * 0.5, 0.0),
}
.map(|d| BBox {
min: Point::new(d.0 * -0.5, d.1 * -0.5, 0.0),
max: Point::new(d.0 * 0.5, d.1 * 0.5, 0.0),
})
.collect();
RectangleLight {
@ -87,9 +83,9 @@ impl<'a> RectangleLight<'a> {
// PDF
if (area_1 + area_2) < SIMPLE_SAMPLING_THRESHOLD {
let area = triangle_surface_area(p2, p1, p3) + triangle_surface_area(p4, p1, p3);
(hit_point - arr).length2() /
dot(sample_dir.normalized(), normal.into_vector().normalized()).abs() /
area
(hit_point - arr).length2()
/ dot(sample_dir.normalized(), normal.into_vector().normalized()).abs()
/ area
} else {
1.0 / (area_1 + area_2)
}
@ -188,9 +184,9 @@ impl<'a> SurfaceLight for RectangleLight<'a> {
let shadow_vec = sample_point - arr;
let spectral_sample =
(col * surface_area_inv as f32 * 0.5).to_spectral_sample(wavelength);
let pdf = (sample_point - arr).length2() /
dot(shadow_vec.normalized(), normal.into_vector().normalized()).abs() /
(surface_area_1 + surface_area_2);
let pdf = (sample_point - arr).length2()
/ dot(shadow_vec.normalized(), normal.into_vector().normalized()).abs()
/ (surface_area_1 + surface_area_2);
let point_err = 0.0001; // TODO: this is a hack, do properly.
(spectral_sample, (sample_point, normal, point_err), pdf)
} else {
@ -246,15 +242,14 @@ impl<'a> SurfaceLight for RectangleLight<'a> {
}
fn approximate_energy(&self) -> f32 {
let color: XYZ = self.colors.iter().fold(
XYZ::new(0.0, 0.0, 0.0),
|a, &b| a + b,
) / self.colors.len() as f32;
let color: XYZ = self.colors
.iter()
.fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b)
/ self.colors.len() as f32;
color.y
}
}
impl<'a> Surface for RectangleLight<'a> {
fn intersect_rays(
&self,
@ -313,9 +308,9 @@ impl<'a> Surface for RectangleLight<'a> {
let closure = {
let inv_surface_area = (1.0 / (dim.0 as f64 * dim.1 as f64)) as f32;
let color = lerp_slice(self.colors, r.time).to_spectral_sample(
wr.wavelength,
) * inv_surface_area;
let color = lerp_slice(self.colors, r.time)
.to_spectral_sample(wr.wavelength)
* inv_surface_area;
SurfaceClosureUnion::EmitClosure(EmitClosure::new(color))
};

View File

@ -4,12 +4,12 @@ use mem_arena::MemArena;
use bbox::BBox;
use boundable::Boundable;
use color::{XYZ, SpectralSample, Color};
use color::{Color, SpectralSample, XYZ};
use lerp::lerp_slice;
use math::{Vector, Normal, Point, Matrix4x4, dot, coordinate_system_from_vector};
use ray::{Ray, AccelRay};
use math::{coordinate_system_from_vector, dot, Matrix4x4, Normal, Point, Vector};
use ray::{AccelRay, Ray};
use sampling::{uniform_sample_cone, uniform_sample_cone_pdf, uniform_sample_sphere};
use shading::surface_closure::{SurfaceClosureUnion, EmitClosure};
use shading::surface_closure::{EmitClosure, SurfaceClosureUnion};
use shading::SurfaceShader;
use surface::{Surface, SurfaceIntersection, SurfaceIntersectionData};
@ -32,11 +32,9 @@ impl<'a> SphereLight<'a> {
pub fn new<'b>(arena: &'b MemArena, radii: Vec<f32>, colors: Vec<XYZ>) -> SphereLight<'b> {
let bbs: Vec<_> = radii
.iter()
.map(|r| {
BBox {
min: Point::new(-*r, -*r, -*r),
max: Point::new(*r, *r, *r),
}
.map(|r| BBox {
min: Point::new(-*r, -*r, -*r),
max: Point::new(*r, *r, *r),
})
.collect();
SphereLight {
@ -81,7 +79,6 @@ impl<'a> SphereLight<'a> {
}
}
impl<'a> SurfaceLight for SphereLight<'a> {
fn sample_from_point(
&self,
@ -197,15 +194,14 @@ impl<'a> SurfaceLight for SphereLight<'a> {
}
fn approximate_energy(&self) -> f32 {
let color: XYZ = self.colors.iter().fold(
XYZ::new(0.0, 0.0, 0.0),
|a, &b| a + b,
) / self.colors.len() as f32;
let color: XYZ = self.colors
.iter()
.fold(XYZ::new(0.0, 0.0, 0.0), |a, &b| a + b)
/ self.colors.len() as f32;
color.y
}
}
impl<'a> Surface for SphereLight<'a> {
fn intersect_rays(
&self,
@ -260,7 +256,8 @@ impl<'a> Surface for SphereLight<'a> {
// Get our final parametric values
let mut t0 = q / a;
let mut t1 = if q != 0.0 { c / q } else { r.max_t };
let mut t1 =
if q != 0.0 { c / q } else { r.max_t };
// Swap them so they are ordered right
if t0 > t1 {
@ -323,11 +320,10 @@ impl<'a> Surface for SphereLight<'a> {
};
let closure = {
let inv_surface_area = (1.0 / (4.0 * PI_64 * radius as f64 * radius as f64)) as
f32;
let color = lerp_slice(self.colors, r.time).to_spectral_sample(
wr.wavelength,
) * inv_surface_area;
let inv_surface_area =
(1.0 / (4.0 * PI_64 * radius as f64 * radius as f64)) as f32;
let color = lerp_slice(self.colors, r.time).to_spectral_sample(wr.wavelength)
* inv_surface_area;
SurfaceClosureUnion::EmitClosure(EmitClosure::new(color))
};
@ -344,7 +340,6 @@ impl<'a> Surface for SphereLight<'a> {
}
}
impl<'a> Boundable for SphereLight<'a> {
fn bounds(&self) -> &[BBox] {
self.bounds_

View File

@ -68,16 +68,13 @@ use clap::{App, Arg};
use mem_arena::MemArena;
use parse::{parse_scene, DataTree};
use ray::{Ray, AccelRay};
use ray::{AccelRay, Ray};
use surface::SurfaceIntersection;
use renderer::LightPath;
use bbox::BBox;
use accel::{BVHNode, BVH4Node};
use accel::{BVH4Node, BVHNode};
use timer::Timer;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
fn main() {
@ -104,10 +101,9 @@ fn main() {
.help("Number of samples per pixel")
.takes_value(true)
.validator(|s| {
usize::from_str(&s).and(Ok(())).or(Err(
"must be an integer"
.to_string(),
))
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
@ -115,15 +111,12 @@ fn main() {
.short("b")
.long("spb")
.value_name("N")
.help(
"Target number of samples per bucket (determines bucket size)",
)
.help("Target number of samples per bucket (determines bucket size)")
.takes_value(true)
.validator(|s| {
usize::from_str(&s).and(Ok(())).or(Err(
"must be an integer"
.to_string(),
))
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(
@ -132,15 +125,14 @@ fn main() {
.value_name("X1 Y1 X2 Y2")
.help(
"Only render the image between pixel coordinates (X1, Y1) \
and (X2, Y2). Coordinates are zero-indexed and inclusive.",
and (X2, Y2). Coordinates are zero-indexed and inclusive.",
)
.takes_value(true)
.number_of_values(4)
.validator(|s| {
usize::from_str(&s).and(Ok(())).or(Err(
"must be four integers"
.to_string(),
))
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be four integers".to_string()))
}),
)
.arg(
@ -150,22 +142,25 @@ fn main() {
.value_name("N")
.help(
"Number of threads to render with. Defaults to the number of logical \
cores on the system.",
cores on the system.",
)
.takes_value(true)
.validator(|s| {
usize::from_str(&s).and(Ok(())).or(Err(
"must be an integer"
.to_string(),
))
usize::from_str(&s)
.and(Ok(()))
.or(Err("must be an integer".to_string()))
}),
)
.arg(Arg::with_name("stats").long("stats").help(
"Print additional statistics about rendering",
))
.arg(Arg::with_name("dev").long("dev").help(
"Show useful dev/debug info.",
))
.arg(
Arg::with_name("stats")
.long("stats")
.help("Print additional statistics about rendering"),
)
.arg(
Arg::with_name("dev")
.long("dev")
.help("Show useful dev/debug info."),
)
.arg(
Arg::with_name("serialized_output")
.long("serialized_output")
@ -213,9 +208,7 @@ fn main() {
// Parse data tree of scene file
if !args.is_present("serialized_output") {
println!(
"Parsing scene file...",
);
println!("Parsing scene file...",);
}
t.tick();
let psy_contents = if args.is_present("use_stdin") {
@ -225,9 +218,9 @@ fn main() {
let mut stdin = tmp.lock();
let mut buf = vec![0u8; 4096];
loop {
let count = stdin.read(&mut buf).expect(
"Unexpected end of scene input.",
);
let count = stdin
.read(&mut buf)
.expect("Unexpected end of scene input.");
let start = if input.len() < 11 {
0
} else {
@ -238,8 +231,7 @@ fn main() {
let mut done = false;
let mut trunc_len = 0;
if let nom::IResult::Done(remaining, _) =
take_until!(&input[start..end], "__PSY_EOF__")
if let nom::IResult::Done(remaining, _) = take_until!(&input[start..end], "__PSY_EOF__")
{
done = true;
trunc_len = input.len() - remaining.len();
@ -344,9 +336,9 @@ fn main() {
if !args.is_present("serialized_output") {
println!("Writing image to disk into '{}'...", r.output_file);
if r.output_file.ends_with(".png") {
image.write_png(Path::new(&r.output_file)).expect(
"Failed to write png...",
);
image
.write_png(Path::new(&r.output_file))
.expect("Failed to write png...");
} else if r.output_file.ends_with(".exr") {
image.write_exr(Path::new(&r.output_file));
} else {

View File

@ -2,8 +2,7 @@
use std::f32;
pub use math3d::{Matrix4x4, Normal, Point, Vector, DotProduct, dot, CrossProduct, cross};
pub use math3d::{cross, dot, CrossProduct, DotProduct, Matrix4x4, Normal, Point, Vector};
/// Clamps a value between a min and max.
pub fn clamp<T: PartialOrd>(v: T, lower: T, upper: T) -> T {
@ -18,12 +17,20 @@ pub fn clamp<T: PartialOrd>(v: T, lower: T, upper: T) -> T {
// The stdlib min function is slower than a simple if statement for some reason.
pub fn fast_minf32(a: f32, b: f32) -> f32 {
if a < b { a } else { b }
if a < b {
a
} else {
b
}
}
// The stdlib max function is slower than a simple if statement for some reason.
pub fn fast_maxf32(a: f32, b: f32) -> f32 {
if a > b { a } else { b }
if a > b {
a
} else {
b
}
}
/// Rounds an integer up to the next power of two.
@ -119,7 +126,6 @@ pub fn fast_logit(p: f32, width: f32) -> f32 {
fast_ln(n / (1.0 - n)) * width * (0.6266 / 4.0)
}
//----------------------------------------------------------------
// Adapted to Rust from https://code.google.com/archive/p/fastapprox/
@ -139,9 +145,9 @@ pub fn fast_pow2(p: f32) -> f32 {
let w: i32 = clipp as i32;
let z: f32 = clipp - w as f32 + offset;
let i: u32 = ((1 << 23) as f32 *
(clipp + 121.2740575 + 27.7280233 / (4.84252568 - z) - 1.49012907 * z)) as
u32;
let i: u32 = ((1 << 23) as f32
* (clipp + 121.2740575 + 27.7280233 / (4.84252568 - z) - 1.49012907 * z))
as u32;
unsafe { transmute_copy::<u32, f32>(&i) }
}
@ -177,11 +183,9 @@ pub fn faster_exp(p: f32) -> f32 {
faster_pow2(f32::consts::LOG2_E * p)
}
// End of adapted code
//----------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;

View File

@ -3,10 +3,9 @@
use std::str;
use nom::{IResult, Needed, digit, multispace};
use nom::{digit, multispace, IResult, Needed};
use nom::IResult::*;
// Parsers for numbers surrounded by whitespace
named!(pub ws_u32<u32>, delimited!(opt!(multispace), u32_utf8, opt!(multispace)));
named!(pub ws_u64<u64>, delimited!(opt!(multispace), u64_utf8, opt!(multispace)));
@ -17,9 +16,6 @@ named!(pub ws_isize<isize>, delimited!(opt!(multispace), isize_utf8, opt!(multis
named!(pub ws_f32<f32>, delimited!(opt!(multispace), f32_utf8, opt!(multispace)));
named!(pub ws_f64<f64>, delimited!(opt!(multispace), f64_utf8, opt!(multispace)));
// ========================================================
named!(pub u32_utf8<u32>, chain!(
@ -131,9 +127,6 @@ fn take_decimal_real(i: &[u8]) -> IResult<&[u8], &[u8]> {
}
}
// ========================================================
#[cfg(test)]

View File

@ -4,7 +4,6 @@ use std::iter::Iterator;
use std::result::Result;
use std::slice;
#[derive(Debug, Eq, PartialEq)]
pub enum DataTree<'a> {
Internal {
@ -21,7 +20,6 @@ pub enum DataTree<'a> {
},
}
impl<'a> DataTree<'a> {
pub fn from_str(source_text: &'a str) -> Result<DataTree<'a>, ParseError> {
let mut items = Vec::new();
@ -49,15 +47,15 @@ impl<'a> DataTree<'a> {
pub fn type_name(&'a self) -> &'a str {
match *self {
DataTree::Internal { type_name, .. } |
DataTree::Leaf { type_name, .. } => type_name,
DataTree::Internal { type_name, .. } | DataTree::Leaf { type_name, .. } => type_name,
}
}
pub fn byte_offset(&'a self) -> usize {
match *self {
DataTree::Internal { byte_offset, .. } |
DataTree::Leaf { byte_offset, .. } => byte_offset,
DataTree::Internal { byte_offset, .. } | DataTree::Leaf { byte_offset, .. } => {
byte_offset
}
}
}
@ -166,7 +164,6 @@ impl<'a> DataTree<'a> {
}
}
/// An iterator over the children of a `DataTree` node that filters out the
/// children not matching a specified type name.
pub struct DataTreeFilterIter<'a> {
@ -192,7 +189,6 @@ impl<'a> Iterator for DataTreeFilterIter<'a> {
}
}
/// An iterator over the children of a `DataTree` node that filters out the
/// children that aren't internal nodes and that don't match a specified
/// type name.
@ -208,11 +204,11 @@ impl<'a> Iterator for DataTreeFilterInternalIter<'a> {
loop {
match self.iter.next() {
Some(&DataTree::Internal {
type_name,
ident,
ref children,
byte_offset,
}) => {
type_name,
ident,
ref children,
byte_offset,
}) => {
if type_name == self.type_name {
return Some((type_name, ident, children, byte_offset));
} else {
@ -232,7 +228,6 @@ impl<'a> Iterator for DataTreeFilterInternalIter<'a> {
}
}
/// An iterator over the children of a `DataTree` node that filters out the
/// children that aren't internal nodes and that don't match a specified
/// type name.
@ -252,10 +247,10 @@ impl<'a> Iterator for DataTreeFilterLeafIter<'a> {
}
Some(&DataTree::Leaf {
type_name,
contents,
byte_offset,
}) => {
type_name,
contents,
byte_offset,
}) => {
if type_name == self.type_name {
return Some((type_name, contents, byte_offset));
} else {
@ -271,7 +266,6 @@ impl<'a> Iterator for DataTreeFilterLeafIter<'a> {
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum ParseError {
MissingOpener(usize),
@ -285,9 +279,6 @@ pub enum ParseError {
Other((usize, &'static str)),
}
// ================================================================
#[derive(Debug, PartialEq, Eq)]
@ -386,7 +377,6 @@ fn parse_node<'a>(source_text: (usize, &'a str)) -> ParseResult<'a> {
}
}
fn parse_leaf_content(source_text: (usize, &str)) -> (&str, (usize, &str)) {
let mut si = 1;
let mut escaped = false;
@ -407,13 +397,12 @@ fn parse_leaf_content(source_text: (usize, &str)) -> (&str, (usize, &str)) {
si = source_text.1.len();
}
return (&source_text.1[0..si], (
source_text.0 + si,
&source_text.1[si..],
));
return (
&source_text.1[0..si],
(source_text.0 + si, &source_text.1[si..]),
);
}
fn next_token<'a>(source_text: (usize, &'a str)) -> (Token<'a>, (usize, &'a str)) {
let text1 = skip_ws_and_comments(source_text);
@ -480,13 +469,12 @@ fn next_token<'a>(source_text: (usize, &'a str)) -> (Token<'a>, (usize, &'a str)
si = text1.1.len();
}
return (Token::TypeName(&text1.1[0..si]), (
text1.0 + si,
&text1.1[si..],
));
return (
Token::TypeName(&text1.1[0..si]),
(text1.0 + si, &text1.1[si..]),
);
}
}
}
} else {
return (Token::End, text1);
@ -576,9 +564,6 @@ fn skip_ws_and_comments(text: (usize, &str)) -> (usize, &str) {
return (offset, remaining_text);
}
// ================================================================
#[cfg(test)]
@ -623,10 +608,10 @@ mod tests {
fn tokenize_5() {
let input = (0, " $hi\\ t\\#he\\[re ");
assert_eq!(next_token(input), (
Token::Ident("$hi\\ t\\#he\\[re"),
(15, " "),
));
assert_eq!(
next_token(input),
(Token::Ident("$hi\\ t\\#he\\[re"), (15, " "),)
);
}
#[test]
@ -657,18 +642,24 @@ mod tests {
let (token7, input8) = next_token(input7);
let (token8, input9) = next_token(input8);
assert_eq!((token1, input2), (Token::TypeName("Thing"), (
5,
" $yar { # A comment\n\tThing2 []\n}",
)));
assert_eq!((token2, input3), (Token::Ident("$yar"), (
10,
" { # A comment\n\tThing2 []\n}",
)));
assert_eq!((token3, input4), (Token::OpenInner, (
12,
" # A comment\n\tThing2 []\n}",
)));
assert_eq!(
(token1, input2),
(
Token::TypeName("Thing"),
(5, " $yar { # A comment\n\tThing2 []\n}",)
)
);
assert_eq!(
(token2, input3),
(
Token::Ident("$yar"),
(10, " { # A comment\n\tThing2 []\n}",)
)
);
assert_eq!(
(token3, input4),
(Token::OpenInner, (12, " # A comment\n\tThing2 []\n}",))
);
assert_eq!(
(token4, input5),
(Token::TypeName("Thing2"), (32, " []\n}"))

View File

@ -9,29 +9,28 @@ use nom::IResult;
use mem_arena::MemArena;
use camera::Camera;
use color::{XYZ, rec709_e_to_xyz};
use color::{rec709_e_to_xyz, XYZ};
use light::WorldLightSource;
use math::Matrix4x4;
use renderer::Renderer;
use scene::Scene;
use scene::World;
use super::basics::{ws_u32, ws_f32};
use super::basics::{ws_f32, ws_u32};
use super::DataTree;
use super::psy_assembly::parse_assembly;
use super::psy_light::parse_distant_disk_light;
#[derive(Debug)]
pub enum PsyParseError {
// The first usize for all errors is their byte offset
// into the psy content where they occured.
UnknownError(usize),
UnknownVariant(usize, &'static str), // Error message
ExpectedInternalNode(usize, &'static str), // Error message
ExpectedLeafNode(usize, &'static str), // Error message
MissingNode(usize, &'static str), // Error message
IncorrectLeafData(usize, &'static str), // Error message
UnknownVariant(usize, &'static str), // Error message
ExpectedInternalNode(usize, &'static str), // Error message
ExpectedLeafNode(usize, &'static str), // Error message
MissingNode(usize, &'static str), // Error message
IncorrectLeafData(usize, &'static str), // Error message
WrongNodeCount(usize, &'static str, usize), // Error message, sections found
InstancedMissingData(usize, &'static str, String), // Error message, data name
}
@ -43,7 +42,7 @@ impl PsyParseError {
let line = line_count_to_byte_offset(psy_content, offset);
println!(
"Line {}: Unknown parse error. If you get this message, please report \
it to the developers so they can improve the error messages.",
it to the developers so they can improve the error messages.",
line
);
}
@ -90,7 +89,6 @@ fn line_count_to_byte_offset(text: &str, offset: usize) -> usize {
text[..offset].matches('\n').count() + 1
}
/// Takes in a `DataTree` representing a Scene node and returns
pub fn parse_scene<'a>(
arena: &'a MemArena,
@ -102,7 +100,7 @@ pub fn parse_scene<'a>(
return Err(PsyParseError::WrongNodeCount(
tree.byte_offset(),
"Scene should have precisely one Output \
section.",
section.",
count,
));
}
@ -111,7 +109,7 @@ pub fn parse_scene<'a>(
return Err(PsyParseError::WrongNodeCount(
tree.byte_offset(),
"Scene should have precisely one \
RenderSettings section.",
RenderSettings section.",
count,
));
}
@ -120,7 +118,7 @@ pub fn parse_scene<'a>(
return Err(PsyParseError::WrongNodeCount(
tree.byte_offset(),
"Scene should have precisely one Camera \
section.",
section.",
count,
));
}
@ -137,7 +135,7 @@ pub fn parse_scene<'a>(
return Err(PsyParseError::WrongNodeCount(
tree.byte_offset(),
"Scene should have precisely one Root Assembly \
section.",
section.",
count,
));
}
@ -199,9 +197,6 @@ pub fn parse_scene<'a>(
return Ok(renderer);
}
fn parse_output_info(tree: &DataTree) -> Result<String, PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree {
let mut found_path = false;
@ -213,21 +208,22 @@ fn parse_output_info(tree: &DataTree) -> Result<String, PsyParseError> {
type_name,
contents,
byte_offset,
} if type_name == "Path" => {
} if type_name == "Path" =>
{
// Trim and validate
let tc = contents.trim();
if tc.chars().count() < 2 {
return Err(PsyParseError::IncorrectLeafData(
byte_offset,
"File path format is \
incorrect.",
incorrect.",
));
}
if tc.chars().nth(0).unwrap() != '"' || tc.chars().last().unwrap() != '"' {
return Err(PsyParseError::IncorrectLeafData(
byte_offset,
"File paths must be \
surrounded by quotes.",
surrounded by quotes.",
));
}
let len = tc.len();
@ -255,14 +251,11 @@ fn parse_output_info(tree: &DataTree) -> Result<String, PsyParseError> {
return Err(PsyParseError::ExpectedInternalNode(
tree.byte_offset(),
"Output section should be an internal \
node, containing at least a Path.",
node, containing at least a Path.",
));
};
}
fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree {
let mut found_res = false;
@ -278,7 +271,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
type_name,
contents,
byte_offset,
} if type_name == "Resolution" => {
} if type_name == "Resolution" =>
{
if let IResult::Done(_, (w, h)) =
closure!(terminated!(tuple!(ws_u32, ws_u32), nom::eof))(contents.as_bytes())
{
@ -299,7 +293,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
type_name,
contents,
byte_offset,
} if type_name == "SamplesPerPixel" => {
} if type_name == "SamplesPerPixel" =>
{
if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) {
found_spp = true;
spp = n;
@ -308,8 +303,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
return Err(PsyParseError::IncorrectLeafData(
byte_offset,
"SamplesPerPixel should be \
an integer specified in \
the form '[samples]'.",
an integer specified in \
the form '[samples]'.",
));
}
}
@ -319,7 +314,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
type_name,
contents,
byte_offset,
} if type_name == "Seed" => {
} if type_name == "Seed" =>
{
if let IResult::Done(_, n) = ws_u32(contents.as_bytes()) {
seed = n;
} else {
@ -327,8 +323,8 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
return Err(PsyParseError::IncorrectLeafData(
byte_offset,
"Seed should be an integer \
specified in the form \
'[samples]'.",
specified in the form \
'[samples]'.",
));
}
}
@ -343,22 +339,19 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
return Err(PsyParseError::MissingNode(
tree.byte_offset(),
"RenderSettings must have both Resolution and \
SamplesPerPixel specified.",
SamplesPerPixel specified.",
));
}
} else {
return Err(PsyParseError::ExpectedInternalNode(
tree.byte_offset(),
"RenderSettings section should be an \
internal node, containing at least \
Resolution and SamplesPerPixel.",
internal node, containing at least \
Resolution and SamplesPerPixel.",
));
};
}
fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a>, PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree {
let mut mats = Vec::new();
@ -374,7 +367,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
type_name,
contents,
byte_offset,
} if type_name == "Fov" => {
} if type_name == "Fov" =>
{
if let IResult::Done(_, fov) = ws_f32(contents.as_bytes()) {
fovs.push(fov * (f32::consts::PI / 180.0));
} else {
@ -382,8 +376,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
return Err(PsyParseError::IncorrectLeafData(
byte_offset,
"Fov should be a decimal \
number specified in the \
form '[fov]'.",
number specified in the \
form '[fov]'.",
));
}
}
@ -393,7 +387,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
type_name,
contents,
byte_offset,
} if type_name == "FocalDistance" => {
} if type_name == "FocalDistance" =>
{
if let IResult::Done(_, fd) = ws_f32(contents.as_bytes()) {
focus_distances.push(fd);
} else {
@ -401,8 +396,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
return Err(PsyParseError::IncorrectLeafData(
byte_offset,
"FocalDistance should be a \
decimal number specified \
in the form '[fov]'.",
decimal number specified \
in the form '[fov]'.",
));
}
}
@ -412,7 +407,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
type_name,
contents,
byte_offset,
} if type_name == "ApertureRadius" => {
} if type_name == "ApertureRadius" =>
{
if let IResult::Done(_, ar) = ws_f32(contents.as_bytes()) {
aperture_radii.push(ar);
} else {
@ -420,8 +416,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
return Err(PsyParseError::IncorrectLeafData(
byte_offset,
"ApertureRadius should be a \
decimal number specified \
in the form '[fov]'.",
decimal number specified \
in the form '[fov]'.",
));
}
}
@ -431,7 +427,8 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
type_name,
contents,
byte_offset,
} if type_name == "Transform" => {
} if type_name == "Transform" =>
{
if let Ok(mat) = parse_matrix(contents) {
mats.push(mat);
} else {
@ -455,15 +452,12 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
return Err(PsyParseError::ExpectedInternalNode(
tree.byte_offset(),
"Camera section should be an internal \
node, containing at least Fov and \
Transform.",
node, containing at least Fov and \
Transform.",
));
}
}
fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>, PsyParseError> {
if tree.is_internal() {
let background_color;
@ -487,7 +481,7 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::WrongNodeCount(
bgs.byte_offset(),
"BackgroundShader should have \
precisely one Type specified.",
precisely one Type specified.",
bgs.iter_children_with_type("Type").count(),
));
}
@ -499,17 +493,17 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::ExpectedLeafNode(
bgs.byte_offset(),
"BackgroundShader's Type should be a \
leaf node.",
leaf node.",
));
}
};
match bgs_type {
"Color" => {
if let Some(&DataTree::Leaf {
contents,
byte_offset,
..
}) = bgs.iter_children_with_type("Color").nth(0)
contents,
byte_offset,
..
}) = bgs.iter_children_with_type("Color").nth(0)
{
if let IResult::Done(_, color) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.trim().as_bytes())
@ -521,15 +515,15 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::IncorrectLeafData(
byte_offset,
"Color should be specified \
with three decimal numbers \
in the form '[R G B]'.",
with three decimal numbers \
in the form '[R G B]'.",
));
}
} else {
return Err(PsyParseError::MissingNode(
bgs.byte_offset(),
"BackgroundShader's Type is Color, \
but no Color is specified.",
but no Color is specified.",
));
}
}
@ -538,7 +532,7 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::UnknownVariant(
bgs.byte_offset(),
"The specified BackgroundShader Type \
isn't a recognized type.",
isn't a recognized type.",
))
}
}
@ -563,38 +557,34 @@ fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>,
return Err(PsyParseError::ExpectedInternalNode(
tree.byte_offset(),
"World section should be an internal \
node, containing at least a \
BackgroundShader.",
node, containing at least a \
BackgroundShader.",
));
}
}
pub fn parse_matrix(contents: &str) -> Result<Matrix4x4, PsyParseError> {
if let IResult::Done(_, ns) =
closure!(terminated!(
tuple!(
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32
),
nom::eof
))(contents.as_bytes())
if let IResult::Done(_, ns) = closure!(terminated!(
tuple!(
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32,
ws_f32
),
nom::eof
))(contents.as_bytes())
{
return Ok(Matrix4x4::new_from_values(
ns.0,
@ -623,6 +613,6 @@ pub fn make_transform_format_error(byte_offset: usize) -> PsyParseError {
PsyParseError::IncorrectLeafData(
byte_offset,
"Transform should be sixteen integers specified in \
the form '[# # # # # # # # # # # # # # # #]'.",
the form '[# # # # # # # # # # # # # # # #]'.",
)
}

View File

@ -7,12 +7,11 @@ use mem_arena::MemArena;
use scene::{Assembly, AssemblyBuilder, Object};
use super::DataTree;
use super::psy_light::{parse_sphere_light, parse_rectangle_light};
use super::psy_light::{parse_rectangle_light, parse_sphere_light};
use super::psy_mesh_surface::parse_mesh_surface;
use super::psy_surface_shader::parse_surface_shader;
use super::psy::{parse_matrix, PsyParseError};
pub fn parse_assembly<'a>(
arena: &'a MemArena,
tree: &'a DataTree,
@ -24,7 +23,10 @@ pub fn parse_assembly<'a>(
match child.type_name() {
// Sub-Assembly
"Assembly" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child {
if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_assembly(ident, parse_assembly(arena, child)?);
} else {
return Err(PsyParseError::UnknownError(child.byte_offset()));
@ -75,9 +77,9 @@ pub fn parse_assembly<'a>(
return Err(PsyParseError::InstancedMissingData(
child.iter_leaf_children_with_type("Data").nth(0).unwrap().2,
"Attempted to add \
instance for data with \
a name that doesn't \
exist.",
instance for data with \
a name that doesn't \
exist.",
name.to_string(),
));
}
@ -85,13 +87,16 @@ pub fn parse_assembly<'a>(
// SurfaceShader
"SurfaceShader" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child {
if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_surface_shader(ident, parse_surface_shader(arena, child)?);
} else {
// TODO: error condition of some kind, because no ident
panic!(
"SurfaceShader encountered that was a leaf, but SurfaceShaders cannot \
be a leaf: {}",
be a leaf: {}",
child.byte_offset()
);
}
@ -99,7 +104,10 @@ pub fn parse_assembly<'a>(
// MeshSurface
"MeshSurface" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child {
if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_object(
ident,
Object::Surface(arena.alloc(parse_mesh_surface(arena, child)?)),
@ -108,7 +116,7 @@ pub fn parse_assembly<'a>(
// TODO: error condition of some kind, because no ident
panic!(
"MeshSurface encountered that was a leaf, but MeshSurfaces cannot \
be a leaf: {}",
be a leaf: {}",
child.byte_offset()
);
}
@ -116,7 +124,10 @@ pub fn parse_assembly<'a>(
// Sphere Light
"SphereLight" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child {
if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_object(
ident,
Object::SurfaceLight(arena.alloc(parse_sphere_light(arena, child)?)),
@ -129,12 +140,13 @@ pub fn parse_assembly<'a>(
// Rectangle Light
"RectangleLight" => {
if let DataTree::Internal { ident: Some(ident), .. } = *child {
if let DataTree::Internal {
ident: Some(ident), ..
} = *child
{
builder.add_object(
ident,
Object::SurfaceLight(
arena.alloc(parse_rectangle_light(arena, child)?),
),
Object::SurfaceLight(arena.alloc(parse_rectangle_light(arena, child)?)),
);
} else {
// No ident
@ -144,27 +156,25 @@ pub fn parse_assembly<'a>(
_ => {
// TODO: some kind of error, because not a known type name
}
// // Bilinear Patch
// "BilinearPatch" => {
// assembly->add_object(child.name, parse_bilinear_patch(child));
// }
//
// // Bicubic Patch
// else if (child.type == "BicubicPatch") {
// assembly->add_object(child.name, parse_bicubic_patch(child));
// }
//
// // Subdivision surface
// else if (child.type == "SubdivisionSurface") {
// assembly->add_object(child.name, parse_subdivision_surface(child));
// }
//
// // Sphere
// else if (child.type == "Sphere") {
// assembly->add_object(child.name, parse_sphere(child));
// }
} // // Bilinear Patch
// "BilinearPatch" => {
// assembly->add_object(child.name, parse_bilinear_patch(child));
// }
//
// // Bicubic Patch
// else if (child.type == "BicubicPatch") {
// assembly->add_object(child.name, parse_bicubic_patch(child));
// }
//
// // Subdivision surface
// else if (child.type == "SubdivisionSurface") {
// assembly->add_object(child.name, parse_subdivision_surface(child));
// }
//
// // Sphere
// else if (child.type == "Sphere") {
// assembly->add_object(child.name, parse_sphere(child));
// }
}
}
} else {

View File

@ -7,14 +7,13 @@ use nom::IResult;
use mem_arena::MemArena;
use math::Vector;
use color::{XYZ, rec709_e_to_xyz};
use light::{DistantDiskLight, SphereLight, RectangleLight};
use color::{rec709_e_to_xyz, XYZ};
use light::{DistantDiskLight, RectangleLight, SphereLight};
use super::basics::ws_f32;
use super::DataTree;
use super::psy::PsyParseError;
pub fn parse_distant_disk_light<'a>(
arena: &'a MemArena,
tree: &'a DataTree,
@ -32,7 +31,8 @@ pub fn parse_distant_disk_light<'a>(
type_name,
contents,
byte_offset,
} if type_name == "Radius" => {
} if type_name == "Radius" =>
{
if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) {
radii.push(radius);
} else {
@ -46,7 +46,8 @@ pub fn parse_distant_disk_light<'a>(
type_name,
contents,
byte_offset,
} if type_name == "Direction" => {
} if type_name == "Direction" =>
{
if let IResult::Done(_, direction) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes())
{
@ -62,7 +63,8 @@ pub fn parse_distant_disk_light<'a>(
type_name,
contents,
byte_offset,
} if type_name == "Color" => {
} if type_name == "Color" =>
{
if let IResult::Done(_, color) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes())
{
@ -86,7 +88,6 @@ pub fn parse_distant_disk_light<'a>(
}
}
pub fn parse_sphere_light<'a>(
arena: &'a MemArena,
tree: &'a DataTree,
@ -103,7 +104,8 @@ pub fn parse_sphere_light<'a>(
type_name,
contents,
byte_offset,
} if type_name == "Radius" => {
} if type_name == "Radius" =>
{
if let IResult::Done(_, radius) = ws_f32(contents.as_bytes()) {
radii.push(radius);
} else {
@ -117,7 +119,8 @@ pub fn parse_sphere_light<'a>(
type_name,
contents,
byte_offset,
} if type_name == "Color" => {
} if type_name == "Color" =>
{
if let IResult::Done(_, color) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes())
{
@ -157,7 +160,8 @@ pub fn parse_rectangle_light<'a>(
type_name,
contents,
byte_offset,
} if type_name == "Dimensions" => {
} if type_name == "Dimensions" =>
{
if let IResult::Done(_, radius) =
closure!(tuple!(ws_f32, ws_f32))(contents.as_bytes())
{
@ -173,7 +177,8 @@ pub fn parse_rectangle_light<'a>(
type_name,
contents,
byte_offset,
} if type_name == "Color" => {
} if type_name == "Color" =>
{
if let IResult::Done(_, color) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(contents.as_bytes())
{

View File

@ -6,14 +6,13 @@ use nom::IResult;
use mem_arena::MemArena;
use math::{Point, Normal};
use math::{Normal, Point};
use surface::triangle_mesh::TriangleMesh;
use super::basics::{ws_usize, ws_f32};
use super::DataTree;
use super::psy::PsyParseError;
// pub struct TriangleMesh {
// time_samples: usize,
// geo: Vec<(Point, Point, Point)>,
@ -61,8 +60,7 @@ pub fn parse_mesh_surface<'a>(
// Collect verts for this time sample
let mut tnormals = Vec::new();
while let IResult::Done(remaining, nor) =
closure!(tuple!(ws_f32, ws_f32, ws_f32))(raw_text)
while let IResult::Done(remaining, nor) = closure!(tuple!(ws_f32, ws_f32, ws_f32))(raw_text)
{
raw_text = remaining;

View File

@ -6,14 +6,13 @@ use nom::IResult;
use mem_arena::MemArena;
use color::{XYZ, rec709_e_to_xyz};
use shading::{SurfaceShader, SimpleSurfaceShader};
use color::{rec709_e_to_xyz, XYZ};
use shading::{SimpleSurfaceShader, SurfaceShader};
use super::basics::ws_f32;
use super::DataTree;
use super::psy::PsyParseError;
// pub struct TriangleMesh {
// time_samples: usize,
// geo: Vec<(Point, Point, Point)>,

View File

@ -3,8 +3,7 @@
use std;
use float4::Float4;
use math::{Vector, Point, Matrix4x4};
use math::{Matrix4x4, Point, Vector};
const OCCLUSION_FLAG: u32 = 1;
const DONE_FLAG: u32 = 1 << 1;
@ -52,7 +51,6 @@ impl Ray {
}
}
#[derive(Debug, Copy, Clone)]
pub struct AccelRay {
pub orig: Point,
@ -67,7 +65,9 @@ impl AccelRay {
pub fn new(ray: &Ray, id: u32) -> AccelRay {
AccelRay {
orig: ray.orig,
dir_inv: Vector { co: Float4::new(1.0, 1.0, 1.0, 1.0) / ray.dir.co },
dir_inv: Vector {
co: Float4::new(1.0, 1.0, 1.0, 1.0) / ray.dir.co,
},
max_t: ray.max_t,
time: ray.time,
flags: ray.flags,
@ -77,12 +77,16 @@ impl AccelRay {
pub fn update_from_world_ray(&mut self, wr: &Ray) {
self.orig = wr.orig;
self.dir_inv = Vector { co: Float4::new(1.0, 1.0, 1.0, 1.0) / wr.dir.co };
self.dir_inv = Vector {
co: Float4::new(1.0, 1.0, 1.0, 1.0) / wr.dir.co,
};
}
pub fn update_from_xformed_world_ray(&mut self, wr: &Ray, mat: &Matrix4x4) {
self.orig = wr.orig * *mat;
self.dir_inv = Vector { co: Float4::new(1.0, 1.0, 1.0, 1.0) / (wr.dir * *mat).co };
self.dir_inv = Vector {
co: Float4::new(1.0, 1.0, 1.0, 1.0) / (wr.dir * *mat).co,
};
}
pub fn is_occlusion(&self) -> bool {

View File

@ -3,16 +3,16 @@ use std::cell::Cell;
use std::cmp;
use std::cmp::min;
use std::io::{self, Write};
use std::sync::{RwLock, Mutex};
use std::sync::{Mutex, RwLock};
use crossbeam::sync::MsQueue;
use scoped_threadpool::Pool;
use halton;
use accel::{ACCEL_TRAV_TIME, ACCEL_NODE_RAY_TESTS};
use accel::{ACCEL_NODE_RAY_TESTS, ACCEL_TRAV_TIME};
use algorithm::partition_pair;
use color::{Color, XYZ, SpectralSample, map_0_1_to_wavelength};
use color::{Color, SpectralSample, map_0_1_to_wavelength, XYZ};
use float4::Float4;
use fp_utils::robust_ray_origin;
use hash::hash_u32;
@ -27,7 +27,6 @@ use timer::Timer;
use tracer::Tracer;
use transform_stack::TransformStack;
#[derive(Debug)]
pub struct Renderer<'a> {
pub output_file: String,
@ -184,9 +183,7 @@ impl<'a> Renderer<'a> {
});
// Clear percentage progress print
print!(
"\r \r",
);
print!("\r \r",);
// Return the rendered image and stats
return (image, *collective_stats.read().unwrap());
@ -353,7 +350,6 @@ impl<'a> Renderer<'a> {
}
}
#[derive(Debug)]
enum LightPathEvent {
CameraRay,
@ -410,7 +406,6 @@ impl LightPath {
pending_color_addition: Float4::splat(0.0),
color: Float4::splat(0.0),
},
scene.camera.generate_ray(
image_plane_co.0,
image_plane_co.1,
@ -438,8 +433,7 @@ impl LightPath {
match self.event {
//--------------------------------------------------------------------
// Result of Camera or bounce ray, prepare next bounce and light rays
LightPathEvent::CameraRay |
LightPathEvent::BounceRay => {
LightPathEvent::CameraRay | LightPathEvent::BounceRay => {
if let surface::SurfaceIntersection::Hit {
intersection_data: ref idata,
ref closure,
@ -482,8 +476,8 @@ impl LightPath {
self.time,
isect,
);
let found_light = if light_info.is_none() || light_info.pdf() <= 0.0 ||
light_info.selection_pdf() <= 0.0
let found_light = if light_info.is_none() || light_info.pdf() <= 0.0
|| light_info.selection_pdf() <= 0.0
{
false
} else {
@ -564,9 +558,9 @@ impl LightPath {
// Calculate and store the light that will be contributed
// to the film plane if the light is not in shadow.
let light_mis_pdf = power_heuristic(light_pdf, closure_pdf);
self.pending_color_addition = light_info.color().e * attenuation.e *
self.light_attenuation /
(light_mis_pdf * light_sel_pdf);
self.pending_color_addition = light_info.color().e * attenuation.e
* self.light_attenuation
/ (light_mis_pdf * light_sel_pdf);
*ray = shadow_ray;
@ -630,8 +624,8 @@ impl LightPath {
.world
.background_color
.to_spectral_sample(self.wavelength)
.e * self.light_attenuation /
self.closure_sample_pdf;
.e * self.light_attenuation
/ self.closure_sample_pdf;
return false;
}
}
@ -672,7 +666,6 @@ fn get_sample(dimension: u32, i: u32) -> f32 {
}
}
#[derive(Debug)]
struct BucketJob {
x: u32,

View File

@ -1,6 +1,7 @@
mod monte_carlo;
pub use self::monte_carlo::{square_to_circle, cosine_sample_hemisphere, uniform_sample_hemisphere,
uniform_sample_sphere, uniform_sample_cone, uniform_sample_cone_pdf,
uniform_sample_triangle, triangle_surface_area,
spherical_triangle_solid_angle, uniform_sample_spherical_triangle};
pub use self::monte_carlo::{cosine_sample_hemisphere, spherical_triangle_solid_angle,
square_to_circle, triangle_surface_area, uniform_sample_cone,
uniform_sample_cone_pdf, uniform_sample_hemisphere,
uniform_sample_sphere, uniform_sample_spherical_triangle,
uniform_sample_triangle};

View File

@ -4,8 +4,7 @@ use std::f32::consts::FRAC_PI_4 as QPI_32;
use std::f32::consts::PI as PI_32;
use std::f64::consts::PI as PI_64;
use math::{Vector, Point, cross, dot};
use math::{cross, dot, Point, Vector};
/// Maps the unit square to the unit circle.
/// NOTE: x and y should be distributed within [-1, 1],
@ -194,8 +193,8 @@ pub fn uniform_sample_spherical_triangle(
let q_bottom = ((v * s) + (u * t)) * sin_va;
let q = q_top / q_bottom;
let vc_2 = (va * q as f32) +
((vc - (va * dot(vc, va))).normalized() * (1.0 - (q * q)).sqrt() as f32);
let vc_2 =
(va * q as f32) + ((vc - (va * dot(vc, va))).normalized() * (1.0 - (q * q)).sqrt() as f32);
let z = 1.0 - (j * (1.0 - dot(vc_2, vb)));

View File

@ -4,7 +4,7 @@ use mem_arena::MemArena;
use accel::{LightAccel, LightTree};
use accel::BVH4;
use bbox::{BBox, transform_bbox_slice_from};
use bbox::{transform_bbox_slice_from, BBox};
use boundable::Boundable;
use color::SpectralSample;
use lerp::lerp_slice;
@ -14,7 +14,6 @@ use surface::{Surface, SurfaceIntersection};
use shading::SurfaceShader;
use transform_stack::TransformStack;
#[derive(Copy, Clone, Debug)]
pub struct Assembly<'a> {
// Instance list
@ -59,20 +58,17 @@ impl<'a> Assembly<'a> {
} else {
Matrix4x4::new()
};
if let Some((light_i, sel_pdf, whittled_n)) =
self.light_accel.select(
idata.incoming * sel_xform,
idata.pos * sel_xform,
idata.nor * sel_xform,
idata.nor_g * sel_xform,
closure.as_surface_closure(),
time,
n,
)
{
if let Some((light_i, sel_pdf, whittled_n)) = self.light_accel.select(
idata.incoming * sel_xform,
idata.pos * sel_xform,
idata.nor * sel_xform,
idata.nor_g * sel_xform,
closure.as_surface_closure(),
time,
n,
) {
let inst = self.light_instances[light_i];
match inst.instance_type {
InstanceType::Object => {
match self.objects[inst.data_index] {
Object::SurfaceLight(light) => {
@ -151,7 +147,6 @@ impl<'a> Boundable for Assembly<'a> {
}
}
#[derive(Debug)]
pub struct AssemblyBuilder<'a> {
arena: &'a MemArena,
@ -173,7 +168,6 @@ pub struct AssemblyBuilder<'a> {
assembly_map: HashMap<String, usize>, // map Name -> Index
}
impl<'a> AssemblyBuilder<'a> {
pub fn new(arena: &'a MemArena) -> AssemblyBuilder<'a> {
AssemblyBuilder {
@ -196,10 +190,8 @@ impl<'a> AssemblyBuilder<'a> {
}
// Add shader
self.surface_shader_map.insert(
name.to_string(),
self.surface_shaders.len(),
);
self.surface_shader_map
.insert(name.to_string(), self.surface_shaders.len());
self.surface_shaders.push(shader);
}
@ -219,15 +211,13 @@ impl<'a> AssemblyBuilder<'a> {
if self.name_exists(name) {
panic!(
"Attempted to add assembly to another assembly with a name that already \
exists."
exists."
);
}
// Add assembly
self.assembly_map.insert(
name.to_string(),
self.assemblies.len(),
);
self.assembly_map
.insert(name.to_string(), self.assemblies.len());
self.assemblies.push(asmb);
}
@ -244,7 +234,11 @@ impl<'a> AssemblyBuilder<'a> {
// Map zero-length transforms to None
let xforms = if let Some(xf) = xforms {
if !xf.is_empty() { Some(xf) } else { None }
if !xf.is_empty() {
Some(xf)
} else {
None
}
} else {
None
};
@ -255,30 +249,26 @@ impl<'a> AssemblyBuilder<'a> {
instance_type: InstanceType::Object,
data_index: self.object_map[name],
surface_shader_index: surface_shader_name.map(|name| {
*self.surface_shader_map.get(name).expect(&format!(
"Unknown surface shader '{}'.",
name
))
*self.surface_shader_map
.get(name)
.expect(&format!("Unknown surface shader '{}'.", name))
}),
id: self.instances.len(),
transform_indices: xforms.map(
|xf| (self.xforms.len(), self.xforms.len() + xf.len()),
),
transform_indices: xforms
.map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())),
}
} else {
Instance {
instance_type: InstanceType::Assembly,
data_index: self.assembly_map[name],
surface_shader_index: surface_shader_name.map(|name| {
*self.surface_shader_map.get(name).expect(&format!(
"Unknown surface shader '{}'.",
name
))
*self.surface_shader_map
.get(name)
.expect(&format!("Unknown surface shader '{}'.", name))
}),
id: self.instances.len(),
transform_indices: xforms.map(
|xf| (self.xforms.len(), self.xforms.len() + xf.len()),
),
transform_indices: xforms
.map(|xf| (self.xforms.len(), self.xforms.len() + xf.len())),
}
};
@ -337,11 +327,9 @@ impl<'a> AssemblyBuilder<'a> {
}
}
InstanceType::Assembly => {
self.assemblies[inst.data_index]
.light_accel
.approximate_energy()
}
InstanceType::Assembly => self.assemblies[inst.data_index]
.light_accel
.approximate_energy(),
};
(bounds, energy)
});
@ -358,7 +346,6 @@ impl<'a> AssemblyBuilder<'a> {
}
}
/// Returns a pair of vectors with the bounds of all instances.
/// This is used for building the assembly's BVH4.
fn instance_bounds(&self) -> (Vec<usize>, Vec<BBox>) {
@ -405,15 +392,12 @@ impl<'a> AssemblyBuilder<'a> {
}
}
#[derive(Copy, Clone, Debug)]
pub enum Object<'a> {
Surface(&'a Surface),
SurfaceLight(&'a SurfaceLight),
}
#[derive(Debug, Copy, Clone)]
pub struct Instance {
pub instance_type: InstanceType,

View File

@ -2,6 +2,6 @@ mod assembly;
mod scene;
mod world;
pub use self::assembly::{Assembly, AssemblyBuilder, Object, InstanceType};
pub use self::assembly::{Assembly, AssemblyBuilder, InstanceType, Object};
pub use self::scene::{Scene, SceneLightSample};
pub use self::world::World;

View File

@ -2,14 +2,13 @@ use accel::LightAccel;
use algorithm::weighted_choice;
use camera::Camera;
use color::SpectralSample;
use math::{Vector, Normal, Point};
use math::{Normal, Point, Vector};
use surface::SurfaceIntersection;
use transform_stack::TransformStack;
use super::Assembly;
use super::World;
#[derive(Debug)]
pub struct Scene<'a> {
pub name: Option<String>,
@ -35,9 +34,11 @@ impl<'a> Scene<'a> {
// Calculate relative probabilities of traversing into world lights
// or local lights.
let wl_energy = if self.world.lights.iter().fold(0.0, |energy, light| {
energy + light.approximate_energy()
}) <= 0.0
let wl_energy = if self.world
.lights
.iter()
.fold(0.0, |energy, light| energy + light.approximate_energy())
<= 0.0
{
0.0
} else {
@ -73,14 +74,8 @@ impl<'a> Scene<'a> {
let n = (n - wl_prob) / (1.0 - wl_prob);
if let Some((ss, sgeo, pdf, spdf)) =
self.root.sample_lights(
xform_stack,
n,
uvw,
wavelength,
time,
intr,
)
self.root
.sample_lights(xform_stack, n, uvw, wavelength, time, intr)
{
return SceneLightSample::Surface {
color: ss,
@ -96,7 +91,6 @@ impl<'a> Scene<'a> {
}
}
#[derive(Debug, Copy, Clone)]
pub enum SceneLightSample {
None,

View File

@ -2,8 +2,8 @@ pub mod surface_closure;
use std::fmt::Debug;
use color::{XYZ, Color};
use self::surface_closure::{SurfaceClosureUnion, EmitClosure, LambertClosure, GTRClosure};
use color::{Color, XYZ};
use self::surface_closure::{EmitClosure, GTRClosure, LambertClosure, SurfaceClosureUnion};
use surface::SurfaceIntersectionData;
/// Trait for surface shaders.
@ -31,8 +31,12 @@ pub trait SurfaceShader: Debug + Sync {
/// building.
#[derive(Debug, Copy, Clone)]
pub enum SimpleSurfaceShader {
Emit { color: XYZ },
Lambert { color: XYZ },
Emit {
color: XYZ,
},
Lambert {
color: XYZ,
},
GTR {
color: XYZ,
roughness: f32,
@ -51,29 +55,23 @@ impl SurfaceShader for SimpleSurfaceShader {
let _ = (data, time); // Silence "unused" compiler warning
match *self {
SimpleSurfaceShader::Emit { color } => {
SurfaceClosureUnion::EmitClosure(
EmitClosure::new(color.to_spectral_sample(wavelength)),
)
}
SimpleSurfaceShader::Lambert { color } => {
SurfaceClosureUnion::LambertClosure(
LambertClosure::new(color.to_spectral_sample(wavelength)),
)
}
SimpleSurfaceShader::Emit { color } => SurfaceClosureUnion::EmitClosure(
EmitClosure::new(color.to_spectral_sample(wavelength)),
),
SimpleSurfaceShader::Lambert { color } => SurfaceClosureUnion::LambertClosure(
LambertClosure::new(color.to_spectral_sample(wavelength)),
),
SimpleSurfaceShader::GTR {
color,
roughness,
tail_shape,
fresnel,
} => {
SurfaceClosureUnion::GTRClosure(GTRClosure::new(
color.to_spectral_sample(wavelength),
roughness,
tail_shape,
fresnel,
))
}
} => SurfaceClosureUnion::GTRClosure(GTRClosure::new(
color.to_spectral_sample(wavelength),
roughness,
tail_shape,
fresnel,
)),
}
}
}

View File

@ -3,11 +3,10 @@
use std::f32::consts::PI as PI_32;
use color::SpectralSample;
use math::{Vector, Normal, dot, clamp, zup_to_vec};
use math::{clamp, dot, zup_to_vec, Normal, Vector};
use sampling::cosine_sample_hemisphere;
use lerp::lerp;
const INV_PI: f32 = 1.0 / PI_32;
const H_PI: f32 = PI_32 / 2.0;
@ -90,7 +89,6 @@ pub trait SurfaceClosure {
) -> f32;
}
/// Utility function that calculates the fresnel reflection factor of a given
/// incoming ray against a surface with the given ior outside/inside ratio.
///
@ -114,7 +112,6 @@ fn dielectric_fresnel(ior_ratio: f32, c: f32) -> f32 {
0.5 * f3 * f6
}
/// Schlick's approximation of the fresnel reflection factor.
///
/// Same interface as `dielectric_fresnel()`, above.
@ -128,7 +125,6 @@ fn schlick_fresnel(ior_ratio: f32, c: f32) -> f32 {
f2 + ((1.0 - f2) * c1 * c2 * c2)
}
/// Utility function that calculates the fresnel reflection factor of a given
/// incoming ray against a surface with the given normal-reflectance factor.
///
@ -154,7 +150,6 @@ fn dielectric_fresnel_from_fac(fresnel_fac: f32, c: f32) -> f32 {
dielectric_fresnel(ior_ratio, c)
}
/// Schlick's approximation version of `dielectric_fresnel_from_fac()` above.
#[allow(dead_code)]
fn schlick_fresnel_from_fac(frensel_fac: f32, c: f32) -> f32 {
@ -163,7 +158,6 @@ fn schlick_fresnel_from_fac(frensel_fac: f32, c: f32) -> f32 {
frensel_fac + ((1.0 - frensel_fac) * c1 * c2 * c2)
}
/// Emit closure.
///
/// NOTE: this needs to be handled specially by the integrator! It does not
@ -228,7 +222,6 @@ impl SurfaceClosure for EmitClosure {
}
}
/// Lambertian surface closure
#[derive(Debug, Copy, Clone)]
pub struct LambertClosure {
@ -368,7 +361,6 @@ impl SurfaceClosure for LambertClosure {
}
}
/// The GTR microfacet BRDF from the Disney Principled BRDF paper.
#[derive(Debug, Copy, Clone)]
pub struct GTRClosure {
@ -438,8 +430,8 @@ impl GTRClosure {
let roughness2 = self.roughness * self.roughness;
// Calculate top half of equation
let top = 1.0 -
((roughness2.powf(1.0 - self.tail_shape) * (1.0 - u)) + u)
let top = 1.0
- ((roughness2.powf(1.0 - self.tail_shape) * (1.0 - u)) + u)
.powf(1.0 / (1.0 - self.tail_shape));
// Calculate bottom half of equation
@ -470,7 +462,6 @@ impl SurfaceClosure for GTRClosure {
self.roughness == 0.0
}
fn sample(
&self,
inc: Vector,
@ -505,7 +496,6 @@ impl SurfaceClosure for GTRClosure {
}
}
fn evaluate(&self, inc: Vector, out: Vector, nor: Normal, nor_g: Normal) -> SpectralSample {
// Calculate needed vectors, normalized
let aa = -inc.normalized(); // Vector pointing to where "in" came from
@ -605,7 +595,6 @@ impl SurfaceClosure for GTRClosure {
}
}
fn sample_pdf(&self, inc: Vector, out: Vector, nor: Normal, nor_g: Normal) -> f32 {
// Calculate needed vectors, normalized
let aa = -inc.normalized(); // Vector pointing to where "in" came from
@ -630,7 +619,6 @@ impl SurfaceClosure for GTRClosure {
self.dist(nh, self.roughness) * INV_PI
}
fn estimate_eval_over_sphere_light(
&self,
inc: Vector,

View File

@ -6,12 +6,11 @@ pub mod triangle_mesh;
use std::fmt::Debug;
use boundable::Boundable;
use math::{Point, Vector, Normal, Matrix4x4};
use ray::{Ray, AccelRay};
use math::{Matrix4x4, Normal, Point, Vector};
use ray::{AccelRay, Ray};
use shading::surface_closure::SurfaceClosureUnion;
use shading::SurfaceShader;
pub trait Surface: Boundable + Debug + Sync {
fn intersect_rays(
&self,
@ -23,7 +22,6 @@ pub trait Surface: Boundable + Debug + Sync {
);
}
#[derive(Debug, Copy, Clone)]
pub enum SurfaceIntersection {
Miss,
@ -34,17 +32,16 @@ pub enum SurfaceIntersection {
},
}
#[derive(Debug, Copy, Clone)]
pub struct SurfaceIntersectionData {
pub incoming: Vector, // Direction of the incoming ray
pub pos: Point, // Position of the intersection
pub pos_err: f32, // Error magnitude of the intersection position. Imagine
pub pos: Point, // Position of the intersection
pub pos_err: f32, // Error magnitude of the intersection position. Imagine
// a cube centered around `pos` with dimensions of `2 * pos_err`.
pub nor: Normal, // Shading normal
pub nor_g: Normal, // True geometric normal
pub nor: Normal, // Shading normal
pub nor_g: Normal, // True geometric normal
pub local_space: Matrix4x4, // Matrix from global space to local space
pub t: f32, // Ray t-value at the intersection point
pub uv: (f32, f32), // 2d surface parameters
pub sample_pdf: f32, // The PDF of getting this point by explicitly sampling the surface
pub t: f32, // Ray t-value at the intersection point
pub uv: (f32, f32), // 2d surface parameters
pub sample_pdf: f32, // The PDF of getting this point by explicitly sampling the surface
}

View File

@ -4,7 +4,6 @@ use fp_utils::fp_gamma;
use math::Point;
use ray::Ray;
/// Intersects `ray` with `tri`, returning `Some((t, b0, b1, b2))`, or `None`
/// if no intersection.
///
@ -83,8 +82,8 @@ pub fn intersect_ray(ray: &Ray, tri: (Point, Point, Point)) -> Option<(f32, f32,
let t_scaled = (e0 * p0z) + (e1 * p1z) + (e2 * p2z);
// Check if the hitpoint t is within ray min/max t.
if (det > 0.0 && (t_scaled <= 0.0 || t_scaled > (ray.max_t * det))) ||
(det < 0.0 && (t_scaled >= 0.0 || t_scaled < (ray.max_t * det)))
if (det > 0.0 && (t_scaled <= 0.0 || t_scaled > (ray.max_t * det)))
|| (det < 0.0 && (t_scaled >= 0.0 || t_scaled < (ray.max_t * det)))
{
return None;
}
@ -115,8 +114,8 @@ pub fn intersect_ray(ray: &Ray, tri: (Point, Point, Point)) -> Option<(f32, f32,
// Calculate delta t
let max_e = max_abs_3(e0, e1, e2);
let dt = 3.0 * ((fp_gamma(3) * max_e * max_zt) + (de * max_zt + dz * max_e)) *
inv_det.abs();
let dt =
3.0 * ((fp_gamma(3) * max_e * max_zt) + (de * max_zt + dz * max_e)) * inv_det.abs();
// Finally, do the check
if t <= dt {
@ -133,12 +132,13 @@ pub fn intersect_ray(ray: &Ray, tri: (Point, Point, Point)) -> Option<(f32, f32,
///
/// Returns the point and the error magnitude of the point.
pub fn surface_point(tri: (Point, Point, Point), bary: (f32, f32, f32)) -> (Point, f32) {
let pos = ((tri.0.into_vector() * bary.0) + (tri.1.into_vector() * bary.1) +
(tri.2.into_vector() * bary.2))
let pos = ((tri.0.into_vector() * bary.0) + (tri.1.into_vector() * bary.1)
+ (tri.2.into_vector() * bary.2))
.into_point();
let pos_err = (((tri.0.into_vector().abs() * bary.0) + (tri.1.into_vector().abs() * bary.1) +
(tri.2.into_vector().abs() * bary.2)) * fp_gamma(7)).co
let pos_err = (((tri.0.into_vector().abs() * bary.0) + (tri.1.into_vector().abs() * bary.1)
+ (tri.2.into_vector().abs() * bary.2)) * fp_gamma(7))
.co
.h_max();
(pos, pos_err)

View File

@ -6,14 +6,13 @@ use accel::BVH4;
use bbox::BBox;
use boundable::Boundable;
use lerp::lerp_slice;
use math::{Point, Normal, Matrix4x4, dot, cross};
use ray::{Ray, AccelRay};
use math::{cross, dot, Matrix4x4, Normal, Point};
use ray::{AccelRay, Ray};
use shading::SurfaceShader;
use super::{Surface, SurfaceIntersection, SurfaceIntersectionData};
use super::triangle;
#[derive(Copy, Clone, Debug)]
pub struct TriangleMesh<'a> {
time_sample_count: usize,
@ -94,8 +93,8 @@ impl<'a> TriangleMesh<'a> {
// Build BVH
let accel = BVH4::from_objects(arena, &mut indices[..], 3, |tri| {
&bounds[(tri.3 as usize * time_sample_count)..
((tri.3 as usize + 1) * time_sample_count)]
&bounds
[(tri.3 as usize * time_sample_count)..((tri.3 as usize + 1) * time_sample_count)]
});
TriangleMesh {
@ -114,7 +113,6 @@ impl<'a> Boundable for TriangleMesh<'a> {
}
}
impl<'a> Surface for TriangleMesh<'a> {
fn intersect_rays(
&self,
@ -131,27 +129,25 @@ impl<'a> Surface for TriangleMesh<'a> {
Matrix4x4::new()
};
self.accel.traverse(
&mut accel_rays[..],
self.indices,
|tri_indices, rs| {
self.accel
.traverse(&mut accel_rays[..], self.indices, |tri_indices, rs| {
for r in rs {
let wr = &wrays[r.id as usize];
// Get triangle
let tri = {
let p0_slice = &self.vertices[(tri_indices.0 as usize *
self.time_sample_count)..
((tri_indices.0 as usize + 1) *
self.time_sample_count)];
let p1_slice = &self.vertices[(tri_indices.1 as usize *
self.time_sample_count)..
((tri_indices.1 as usize + 1) *
self.time_sample_count)];
let p2_slice = &self.vertices[(tri_indices.2 as usize *
self.time_sample_count)..
((tri_indices.2 as usize + 1) *
self.time_sample_count)];
let p0_slice = &self.vertices[(tri_indices.0 as usize
* self.time_sample_count)
..((tri_indices.0 as usize + 1)
* self.time_sample_count)];
let p1_slice = &self.vertices[(tri_indices.1 as usize
* self.time_sample_count)
..((tri_indices.1 as usize + 1)
* self.time_sample_count)];
let p2_slice = &self.vertices[(tri_indices.2 as usize
* self.time_sample_count)
..((tri_indices.2 as usize + 1)
* self.time_sample_count)];
let p0 = lerp_slice(p0_slice, wr.time);
let p1 = lerp_slice(p1_slice, wr.time);
@ -166,18 +162,20 @@ impl<'a> Surface for TriangleMesh<'a> {
if space.len() > 1 {
// Per-ray transform, for motion blur
let mat_space = lerp_slice(space, wr.time).inverse();
(mat_space, (
tri.0 * mat_space,
tri.1 * mat_space,
tri.2 * mat_space,
))
(
mat_space,
(tri.0 * mat_space, tri.1 * mat_space, tri.2 * mat_space),
)
} else {
// Same transform for all rays
(static_mat_space, (
tri.0 * static_mat_space,
tri.1 * static_mat_space,
tri.2 * static_mat_space,
))
(
static_mat_space,
(
tri.0 * static_mat_space,
tri.1 * static_mat_space,
tri.2 * static_mat_space,
),
)
}
} else {
// No transforms
@ -199,18 +197,18 @@ impl<'a> Surface for TriangleMesh<'a> {
// Calculate interpolated surface normal, if any
let shading_normal = if let Some(normals) = self.normals {
let n0_slice = &normals[(tri_indices.0 as usize *
self.time_sample_count)..
((tri_indices.0 as usize + 1) *
self.time_sample_count)];
let n1_slice = &normals[(tri_indices.1 as usize *
self.time_sample_count)..
((tri_indices.1 as usize + 1) *
self.time_sample_count)];
let n2_slice = &normals[(tri_indices.2 as usize *
self.time_sample_count)..
((tri_indices.2 as usize + 1) *
self.time_sample_count)];
let n0_slice = &normals[(tri_indices.0 as usize
* self.time_sample_count)
..((tri_indices.0 as usize + 1)
* self.time_sample_count)];
let n1_slice = &normals[(tri_indices.1 as usize
* self.time_sample_count)
..((tri_indices.1 as usize + 1)
* self.time_sample_count)];
let n2_slice = &normals[(tri_indices.2 as usize
* self.time_sample_count)
..((tri_indices.2 as usize + 1)
* self.time_sample_count)];
let n0 = lerp_slice(n0_slice, wr.time).normalized();
let n1 = lerp_slice(n1_slice, wr.time).normalized();
@ -252,7 +250,6 @@ impl<'a> Surface for TriangleMesh<'a> {
}
}
}
},
);
});
}
}

View File

@ -5,7 +5,6 @@ use std::time::Duration;
use time;
#[derive(Copy, Clone)]
pub struct Timer {
last_time: u64,
@ -13,7 +12,9 @@ pub struct Timer {
impl Timer {
pub fn new() -> Timer {
Timer { last_time: time::precise_time_ns() }
Timer {
last_time: time::precise_time_ns(),
}
}
/// Marks a new tick time and returns the time elapsed in seconds since

View File

@ -2,13 +2,12 @@ use std::iter;
use algorithm::partition;
use lerp::lerp_slice;
use ray::{Ray, AccelRay};
use scene::{Assembly, Object, InstanceType};
use ray::{AccelRay, Ray};
use scene::{Assembly, InstanceType, Object};
use surface::SurfaceIntersection;
use transform_stack::TransformStack;
use shading::{SurfaceShader, SimpleSurfaceShader};
use color::{XYZ, rec709_to_xyz};
use shading::{SimpleSurfaceShader, SurfaceShader};
use color::{rec709_to_xyz, XYZ};
pub struct Tracer<'a> {
rays: Vec<AccelRay>,
@ -31,9 +30,11 @@ impl<'a> Tracer<'a> {
self.rays.clear();
self.rays.reserve(wrays.len());
let mut ids = 0..(wrays.len() as u32);
self.rays.extend(wrays.iter().map(
|wr| AccelRay::new(wr, ids.next().unwrap()),
));
self.rays.extend(
wrays
.iter()
.map(|wr| AccelRay::new(wr, ids.next().unwrap())),
);
self.inner.trace(wrays, &mut self.rays[..])
}
@ -50,12 +51,8 @@ impl<'a> TracerInner<'a> {
// Ready the isects
self.isects.clear();
self.isects.reserve(wrays.len());
self.isects.extend(
iter::repeat(SurfaceIntersection::Miss).take(
wrays
.len(),
),
);
self.isects
.extend(iter::repeat(SurfaceIntersection::Miss).take(wrays.len()));
let mut ray_sets = split_rays_by_direction(&mut rays[..]);
for ray_set in ray_sets.iter_mut().filter(|ray_set| !ray_set.is_empty()) {
@ -71,10 +68,9 @@ impl<'a> TracerInner<'a> {
wrays: &[Ray],
accel_rays: &mut [AccelRay],
) {
assembly.object_accel.traverse(
&mut accel_rays[..],
&assembly.instances[..],
|inst, rs| {
assembly
.object_accel
.traverse(&mut accel_rays[..], &assembly.instances[..], |inst, rs| {
// Transform rays if needed
if let Some((xstart, xend)) = inst.transform_indices {
// Push transforms to stack
@ -130,9 +126,8 @@ impl<'a> TracerInner<'a> {
InstanceType::Object => {
self.trace_object(
&assembly.objects[inst.data_index],
inst.surface_shader_index.map(
|i| assembly.surface_shaders[i],
),
inst.surface_shader_index
.map(|i| assembly.surface_shaders[i]),
wrays,
ray_set,
);
@ -172,8 +167,7 @@ impl<'a> TracerInner<'a> {
}
}
}
},
);
});
}
fn trace_object<'b>(
@ -217,7 +211,6 @@ impl<'a> TracerInner<'a> {
}
}
fn split_rays_by_direction(rays: &mut [AccelRay]) -> [&mut [AccelRay]; 8] {
// | | | | | | | | |
// s1 s2 s3 s4 s5 s6 s7

View File

@ -3,7 +3,6 @@ use std::cmp;
use algorithm::merge_slices_to;
use math::Matrix4x4;
pub struct TransformStack {
stack: Vec<Matrix4x4>,
stack_indices: Vec<usize>,

View File

@ -34,10 +34,8 @@ fn main() {
perm.swap(0, 2);
perm.swap(1, 3);
}
traversal_table[raydir].push(
perm[0] + (perm[1] << 2) + (perm[2] << 4) +
(perm[3] << 6),
);
traversal_table[raydir]
.push(perm[0] + (perm[1] << 2) + (perm[2] << 4) + (perm[3] << 6));
}
}
}

View File

@ -19,9 +19,9 @@ include!(concat!(env!("OUT_DIR"), "/table_inc.rs"));
#[derive(Debug, Copy, Clone)]
pub enum SplitAxes {
Full((u8, u8, u8)), // top, left, right
Left((u8, u8)), // top, left
Right((u8, u8)), // top, right
TopOnly(u8), // top
Left((u8, u8)), // top, left
Right((u8, u8)), // top, right
TopOnly(u8), // top
}
/// Calculates the traversal code for a BVH4 node based on the splits and

View File

@ -3,7 +3,6 @@ use std::fs::File;
use std::io::Write;
use std::path::Path;
#[derive(Copy, Clone)]
struct Chromaticities {
r: (f64, f64),
@ -12,7 +11,6 @@ struct Chromaticities {
w: (f64, f64),
}
fn main() {
let out_dir = env::var("OUT_DIR").unwrap();
@ -77,10 +75,8 @@ fn main() {
}
}
/// Generates conversion functions for the given rgb to xyz transform matrix.
fn write_conversion_functions(space_name: &str, to_xyz: [[f64; 3]; 3], f: &mut File) {
f.write_all(
format!(
r#"
@ -185,7 +181,6 @@ pub fn xyz_to_{}_e(xyz: (f32, f32, f32)) -> (f32, f32, f32) {{
).unwrap();
}
/// Port of the RGBtoXYZ function from the ACES CTL reference implementation.
/// See lib/IlmCtlMath/CtlColorSpace.cpp in the CTL reference implementation.
///
@ -200,20 +195,20 @@ fn rgb_to_xyz(chroma: Chromaticities, y: f64) -> [[f64; 3]; 3] {
let z = (1.0 - chroma.w.0 - chroma.w.1) * y / chroma.w.1;
// Scale factors for matrix rows
let d = chroma.r.0 * (chroma.b.1 - chroma.g.1) + chroma.b.0 * (chroma.g.1 - chroma.r.1) +
chroma.g.0 * (chroma.r.1 - chroma.b.1);
let d = chroma.r.0 * (chroma.b.1 - chroma.g.1) + chroma.b.0 * (chroma.g.1 - chroma.r.1)
+ chroma.g.0 * (chroma.r.1 - chroma.b.1);
let sr = (x * (chroma.b.1 - chroma.g.1) -
chroma.g.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z)) +
chroma.b.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z))) / d;
let sr = (x * (chroma.b.1 - chroma.g.1)
- chroma.g.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z))
+ chroma.b.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z))) / d;
let sg = (x * (chroma.r.1 - chroma.b.1) +
chroma.r.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z)) -
chroma.b.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
let sg = (x * (chroma.r.1 - chroma.b.1)
+ chroma.r.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z))
- chroma.b.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
let sb = (x * (chroma.g.1 - chroma.r.1) -
chroma.r.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z)) +
chroma.g.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
let sb = (x * (chroma.g.1 - chroma.r.1)
- chroma.r.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z))
+ chroma.g.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
// Assemble the matrix
let mut mat = [[0.0; 3]; 3];
@ -233,7 +228,6 @@ fn rgb_to_xyz(chroma: Chromaticities, y: f64) -> [[f64; 3]; 3] {
mat
}
/// Chromatically adapts a matrix from `rgb_to_xyz` to a whitepoint of E.
///
/// In other words, makes it so that RGB (1,1,1) maps to XYZ (1,1,1).
@ -259,7 +253,6 @@ fn adapt_to_e(mat: [[f64; 3]; 3], y: f64) -> [[f64; 3]; 3] {
mat2
}
/// Calculates the inverse of the given 3x3 matrix.
///
/// Ported to Rust from `gjInverse()` in IlmBase's Imath/ImathMatrix.h

View File

@ -27,7 +27,6 @@ use std::fs::File;
use std::io::Write;
use std::path::Path;
/// How many components to generate.
const NUM_DIMENSIONS: usize = 128;
@ -111,8 +110,7 @@ pub fn sample(dimension: u32, index: u32) -> f32 {{
format!(
r#"
{} => halton{}(index),"#,
i,
primes[i]
i, primes[i]
).as_bytes(),
).unwrap();
}
@ -127,7 +125,6 @@ pub fn sample(dimension: u32, index: u32) -> f32 {{
).as_bytes(),
).unwrap();
// Write the special-cased first dimension
f.write_all(
format!(
@ -196,9 +193,7 @@ fn halton{}(index: u32) -> f32 {{
format!(
r#"
return (unsafe{{*PERM{}.get_unchecked((index % {}) as usize)}} as u32 * {} +"#,
base,
pow_base,
power
base, pow_base, power
).as_bytes(),
).unwrap();;
@ -211,10 +206,7 @@ fn halton{}(index: u32) -> f32 {{
format!(
r#"
unsafe{{*PERM{}.get_unchecked(((index / {}) % {}) as usize)}} as u32 * {} +"#,
base,
div,
pow_base,
power
base, div, pow_base, power
).as_bytes(),
).unwrap();;
}
@ -235,7 +227,6 @@ fn halton{}(index: u32) -> f32 {{
}
}
/// Check primality. Not optimized, since it's not performance-critical.
fn is_prime(p: usize) -> bool {
for i in 2..p {
@ -271,10 +262,12 @@ fn get_faure_permutation(faure: &Vec<Vec<usize>>, b: usize) -> Vec<usize> {
let c = b / 2;
return (0..b)
.map(|i| if i < c {
2 * faure[c][i]
} else {
2 * faure[c][i - c] + 1
.map(|i| {
if i < c {
2 * faure[c][i]
} else {
2 * faure[c][i - c] + 1
}
})
.collect();
}

View File

@ -23,7 +23,6 @@ pub fn dot<T: DotProduct>(a: T, b: T) -> f32 {
a.dot(b)
}
/// Trait for calculating cross products.
pub trait CrossProduct {
#[inline]

View File

@ -7,14 +7,12 @@ use float4::Float4;
use super::Point;
/// A 4x4 matrix, used for transforms
#[derive(Debug, Copy, Clone)]
pub struct Matrix4x4 {
pub values: [Float4; 4],
}
impl Matrix4x4 {
/// Creates a new identity matrix
#[inline]
@ -143,7 +141,6 @@ impl Matrix4x4 {
}
}
/// Returns the inverse of the Matrix
#[inline]
pub fn inverse(&self) -> Matrix4x4 {
@ -169,31 +166,44 @@ impl Matrix4x4 {
values: {
[
Float4::new(
((self[1].get_1() * c5) - (self[1].get_2() * c4) + (self[1].get_3() * c3)) * invdet,
((-self[0].get_1() * c5) + (self[0].get_2() * c4) - (self[0].get_3() * c3)) * invdet,
((self[3].get_1() * s5) - (self[3].get_2() * s4) + (self[3].get_3() * s3)) * invdet,
((-self[2].get_1() * s5) + (self[2].get_2() * s4) - (self[2].get_3() * s3)) * invdet,
((self[1].get_1() * c5) - (self[1].get_2() * c4) + (self[1].get_3() * c3))
* invdet,
((-self[0].get_1() * c5) + (self[0].get_2() * c4) - (self[0].get_3() * c3))
* invdet,
((self[3].get_1() * s5) - (self[3].get_2() * s4) + (self[3].get_3() * s3))
* invdet,
((-self[2].get_1() * s5) + (self[2].get_2() * s4) - (self[2].get_3() * s3))
* invdet,
),
Float4::new(
((-self[1].get_0() * c5) + (self[1].get_2() * c2) - (self[1].get_3() * c1)) * invdet,
((self[0].get_0() * c5) - (self[0].get_2() * c2) + (self[0].get_3() * c1)) * invdet,
((-self[3].get_0() * s5) + (self[3].get_2() * s2) - (self[3].get_3() * s1)) * invdet,
((self[2].get_0() * s5) - (self[2].get_2() * s2) + (self[2].get_3() * s1)) * invdet,
((-self[1].get_0() * c5) + (self[1].get_2() * c2) - (self[1].get_3() * c1))
* invdet,
((self[0].get_0() * c5) - (self[0].get_2() * c2) + (self[0].get_3() * c1))
* invdet,
((-self[3].get_0() * s5) + (self[3].get_2() * s2) - (self[3].get_3() * s1))
* invdet,
((self[2].get_0() * s5) - (self[2].get_2() * s2) + (self[2].get_3() * s1))
* invdet,
),
Float4::new(
((self[1].get_0() * c4) - (self[1].get_1() * c2) + (self[1].get_3() * c0)) * invdet,
((-self[0].get_0() * c4) + (self[0].get_1() * c2) - (self[0].get_3() * c0)) * invdet,
((self[3].get_0() * s4) - (self[3].get_1() * s2) + (self[3].get_3() * s0)) * invdet,
((-self[2].get_0() * s4) + (self[2].get_1() * s2) - (self[2].get_3() * s0)) * invdet,
((self[1].get_0() * c4) - (self[1].get_1() * c2) + (self[1].get_3() * c0))
* invdet,
((-self[0].get_0() * c4) + (self[0].get_1() * c2) - (self[0].get_3() * c0))
* invdet,
((self[3].get_0() * s4) - (self[3].get_1() * s2) + (self[3].get_3() * s0))
* invdet,
((-self[2].get_0() * s4) + (self[2].get_1() * s2) - (self[2].get_3() * s0))
* invdet,
),
Float4::new(
((-self[1].get_0() * c3) + (self[1].get_1() * c1) - (self[1].get_2() * c0)) * invdet,
((self[0].get_0() * c3) - (self[0].get_1() * c1) + (self[0].get_2() * c0)) * invdet,
((-self[3].get_0() * s3) + (self[3].get_1() * s1) - (self[3].get_2() * s0)) * invdet,
((self[2].get_0() * s3) - (self[2].get_1() * s1) + (self[2].get_2() * s0)) * invdet,
((-self[1].get_0() * c3) + (self[1].get_1() * c1) - (self[1].get_2() * c0))
* invdet,
((self[0].get_0() * c3) - (self[0].get_1() * c1) + (self[0].get_2() * c0))
* invdet,
((-self[3].get_0() * s3) + (self[3].get_1() * s1) - (self[3].get_2() * s0))
* invdet,
((self[2].get_0() * s3) - (self[2].get_1() * s1) + (self[2].get_2() * s0))
* invdet,
),
]
},
@ -201,7 +211,6 @@ impl Matrix4x4 {
}
}
impl Index<usize> for Matrix4x4 {
type Output = Float4;
@ -211,7 +220,6 @@ impl Index<usize> for Matrix4x4 {
}
}
impl IndexMut<usize> for Matrix4x4 {
#[inline(always)]
fn index_mut<'a>(&'a mut self, _index: usize) -> &'a mut Float4 {
@ -219,7 +227,6 @@ impl IndexMut<usize> for Matrix4x4 {
}
}
impl PartialEq for Matrix4x4 {
#[inline]
fn eq(&self, other: &Matrix4x4) -> bool {
@ -235,7 +242,6 @@ impl PartialEq for Matrix4x4 {
}
}
/// Multiply two matrices together
impl Mul<Matrix4x4> for Matrix4x4 {
type Output = Matrix4x4;
@ -251,21 +257,18 @@ impl Mul<Matrix4x4> for Matrix4x4 {
(m[2] * other[0]).h_sum(),
(m[3] * other[0]).h_sum(),
),
Float4::new(
(m[0] * other[1]).h_sum(),
(m[1] * other[1]).h_sum(),
(m[2] * other[1]).h_sum(),
(m[3] * other[1]).h_sum(),
),
Float4::new(
(m[0] * other[2]).h_sum(),
(m[1] * other[2]).h_sum(),
(m[2] * other[2]).h_sum(),
(m[3] * other[2]).h_sum(),
),
Float4::new(
(m[0] * other[3]).h_sum(),
(m[1] * other[3]).h_sum(),
@ -277,10 +280,6 @@ impl Mul<Matrix4x4> for Matrix4x4 {
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -1,14 +1,13 @@
#![allow(dead_code)]
use std::cmp::PartialEq;
use std::ops::{Add, Sub, Mul, Div, Neg};
use std::ops::{Add, Div, Mul, Neg, Sub};
use float4::Float4;
use super::{DotProduct, CrossProduct};
use super::{CrossProduct, DotProduct};
use super::{Matrix4x4, Vector};
/// A surface normal in 3d homogeneous space.
#[derive(Debug, Copy, Clone)]
pub struct Normal {
@ -18,7 +17,9 @@ pub struct Normal {
impl Normal {
#[inline(always)]
pub fn new(x: f32, y: f32, z: f32) -> Normal {
Normal { co: Float4::new(x, y, z, 0.0) }
Normal {
co: Float4::new(x, y, z, 0.0),
}
}
#[inline(always)]
@ -82,7 +83,6 @@ impl Normal {
}
}
impl PartialEq for Normal {
#[inline(always)]
fn eq(&self, other: &Normal) -> bool {
@ -90,33 +90,36 @@ impl PartialEq for Normal {
}
}
impl Add for Normal {
type Output = Normal;
#[inline(always)]
fn add(self, other: Normal) -> Normal {
Normal { co: self.co + other.co }
Normal {
co: self.co + other.co,
}
}
}
impl Sub for Normal {
type Output = Normal;
#[inline(always)]
fn sub(self, other: Normal) -> Normal {
Normal { co: self.co - other.co }
Normal {
co: self.co - other.co,
}
}
}
impl Mul<f32> for Normal {
type Output = Normal;
#[inline(always)]
fn mul(self, other: f32) -> Normal {
Normal { co: self.co * other }
Normal {
co: self.co * other,
}
}
}
@ -137,17 +140,17 @@ impl Mul<Matrix4x4> for Normal {
}
}
impl Div<f32> for Normal {
type Output = Normal;
#[inline(always)]
fn div(self, other: f32) -> Normal {
Normal { co: self.co / other }
Normal {
co: self.co / other,
}
}
}
impl Neg for Normal {
type Output = Normal;
@ -157,7 +160,6 @@ impl Neg for Normal {
}
}
impl DotProduct for Normal {
#[inline(always)]
fn dot(self, other: Normal) -> f32 {
@ -165,7 +167,6 @@ impl DotProduct for Normal {
}
}
impl CrossProduct for Normal {
#[inline]
fn cross(self, other: Normal) -> Normal {
@ -180,11 +181,10 @@ impl CrossProduct for Normal {
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::{Matrix4x4, CrossProduct, DotProduct};
use super::super::{CrossProduct, DotProduct, Matrix4x4};
#[test]
fn add() {

View File

@ -1,14 +1,13 @@
#![allow(dead_code)]
use std::cmp::PartialEq;
use std::ops::{Add, Sub, Mul};
use std::ops::{Add, Mul, Sub};
use float4::Float4;
use super::Matrix4x4;
use super::Vector;
/// A position in 3d homogeneous space.
#[derive(Debug, Copy, Clone)]
pub struct Point {
@ -18,14 +17,18 @@ pub struct Point {
impl Point {
#[inline(always)]
pub fn new(x: f32, y: f32, z: f32) -> Point {
Point { co: Float4::new(x, y, z, 1.0) }
Point {
co: Float4::new(x, y, z, 1.0),
}
}
/// Returns the point in standardized coordinates, where the
/// fourth homogeneous component has been normalized to 1.0.
#[inline(always)]
pub fn norm(&self) -> Point {
Point { co: self.co / self.co.get_3() }
Point {
co: self.co / self.co.get_3(),
}
}
#[inline(always)]
@ -33,7 +36,9 @@ impl Point {
let n1 = self.norm();
let n2 = other.norm();
Point { co: n1.co.v_min(n2.co) }
Point {
co: n1.co.v_min(n2.co),
}
}
#[inline(always)]
@ -41,7 +46,9 @@ impl Point {
let n1 = self.norm();
let n2 = other.norm();
Point { co: n1.co.v_max(n2.co) }
Point {
co: n1.co.v_max(n2.co),
}
}
#[inline(always)]
@ -90,7 +97,6 @@ impl Point {
}
}
impl PartialEq for Point {
#[inline(always)]
fn eq(&self, other: &Point) -> bool {
@ -98,23 +104,25 @@ impl PartialEq for Point {
}
}
impl Add<Vector> for Point {
type Output = Point;
#[inline(always)]
fn add(self, other: Vector) -> Point {
Point { co: self.co + other.co }
Point {
co: self.co + other.co,
}
}
}
impl Sub for Point {
type Output = Vector;
#[inline(always)]
fn sub(self, other: Point) -> Vector {
Vector { co: self.norm().co - other.norm().co }
Vector {
co: self.norm().co - other.norm().co,
}
}
}
@ -123,7 +131,9 @@ impl Sub<Vector> for Point {
#[inline(always)]
fn sub(self, other: Vector) -> Point {
Point { co: self.co - other.co }
Point {
co: self.co - other.co,
}
}
}
@ -143,11 +153,10 @@ impl Mul<Matrix4x4> for Point {
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::{Vector, Matrix4x4};
use super::super::{Matrix4x4, Vector};
#[test]
fn norm() {

View File

@ -1,13 +1,12 @@
#![allow(dead_code)]
use std::cmp::PartialEq;
use std::ops::{Add, Sub, Mul, Div, Neg};
use std::ops::{Add, Div, Mul, Neg, Sub};
use float4::Float4;
use super::{DotProduct, CrossProduct};
use super::{Matrix4x4, Point, Normal};
use super::{CrossProduct, DotProduct};
use super::{Matrix4x4, Normal, Point};
/// A direction vector in 3d homogeneous space.
#[derive(Debug, Copy, Clone)]
@ -18,7 +17,9 @@ pub struct Vector {
impl Vector {
#[inline(always)]
pub fn new(x: f32, y: f32, z: f32) -> Vector {
Vector { co: Float4::new(x, y, z, 0.0) }
Vector {
co: Float4::new(x, y, z, 0.0),
}
}
#[inline(always)]
@ -92,7 +93,6 @@ impl Vector {
}
}
impl PartialEq for Vector {
#[inline(always)]
fn eq(&self, other: &Vector) -> bool {
@ -100,37 +100,39 @@ impl PartialEq for Vector {
}
}
impl Add for Vector {
type Output = Vector;
#[inline(always)]
fn add(self, other: Vector) -> Vector {
Vector { co: self.co + other.co }
Vector {
co: self.co + other.co,
}
}
}
impl Sub for Vector {
type Output = Vector;
#[inline(always)]
fn sub(self, other: Vector) -> Vector {
Vector { co: self.co - other.co }
Vector {
co: self.co - other.co,
}
}
}
impl Mul<f32> for Vector {
type Output = Vector;
#[inline(always)]
fn mul(self, other: f32) -> Vector {
Vector { co: self.co * other }
Vector {
co: self.co * other,
}
}
}
impl Mul<Matrix4x4> for Vector {
type Output = Vector;
@ -147,17 +149,17 @@ impl Mul<Matrix4x4> for Vector {
}
}
impl Div<f32> for Vector {
type Output = Vector;
#[inline(always)]
fn div(self, other: f32) -> Vector {
Vector { co: self.co / other }
Vector {
co: self.co / other,
}
}
}
impl Neg for Vector {
type Output = Vector;
@ -167,7 +169,6 @@ impl Neg for Vector {
}
}
impl DotProduct for Vector {
#[inline(always)]
fn dot(self, other: Vector) -> f32 {
@ -175,7 +176,6 @@ impl DotProduct for Vector {
}
}
impl CrossProduct for Vector {
#[inline]
fn cross(self, other: Vector) -> Vector {
@ -190,11 +190,10 @@ impl CrossProduct for Vector {
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::{Matrix4x4, CrossProduct, DotProduct};
use super::super::{CrossProduct, DotProduct, Matrix4x4};
#[test]
fn add() {

View File

@ -1,6 +1,6 @@
use std::slice;
use std::cell::{Cell, RefCell};
use std::mem::{size_of, align_of};
use std::mem::{align_of, size_of};
use std::cmp::max;
const GROWTH_FRACTION: usize = 8; // 1/N (smaller number leads to bigger allocations)
@ -261,10 +261,8 @@ impl MemArena {
unsafe fn alloc_raw(&self, size: usize, alignment: usize) -> *mut u8 {
assert!(alignment > 0);
self.stat_space_allocated.set(
self.stat_space_allocated.get() +
size,
); // Update stats
self.stat_space_allocated
.set(self.stat_space_allocated.get() + size); // Update stats
let mut blocks = self.blocks.borrow_mut();
@ -302,21 +300,22 @@ impl MemArena {
};
let waste_percentage = {
let w1 = ((blocks[0].capacity() - blocks[0].len()) * 100) /
blocks[0].capacity();
let w2 = ((self.stat_space_occupied.get() - self.stat_space_allocated.get()) *
100) /
self.stat_space_occupied.get();
if w1 < w2 { w1 } else { w2 }
let w1 =
((blocks[0].capacity() - blocks[0].len()) * 100) / blocks[0].capacity();
let w2 = ((self.stat_space_occupied.get() - self.stat_space_allocated.get())
* 100) / self.stat_space_occupied.get();
if w1 < w2 {
w1
} else {
w2
}
};
// If it's a "large allocation", give it its own memory block.
if (size + alignment) > next_size || waste_percentage > self.max_waste_percentage {
// Update stats
self.stat_space_occupied.set(
self.stat_space_occupied.get() + size + alignment -
1,
);
self.stat_space_occupied
.set(self.stat_space_occupied.get() + size + alignment - 1);
blocks.push(Vec::with_capacity(size + alignment - 1));
blocks.last_mut().unwrap().set_len(size + alignment - 1);
@ -330,10 +329,8 @@ impl MemArena {
// Otherwise create a new shared block.
else {
// Update stats
self.stat_space_occupied.set(
self.stat_space_occupied.get() +
next_size,
);
self.stat_space_occupied
.set(self.stat_space_occupied.get() + next_size);
blocks.push(Vec::with_capacity(next_size));
let block_count = blocks.len();

View File

@ -23,7 +23,7 @@
mod matrices;
pub use matrices::NUM_DIMENSIONS;
use matrices::{SIZE, MATRICES};
use matrices::{MATRICES, SIZE};
/// Compute one component of the Sobol'-sequence, where the component
/// corresponds to the dimension parameter, and the index specifies