Switched all uninitialized memory to use MaybeUninit.
This commit is contained in:
parent
452a29a95c
commit
152d265c82
|
@ -4,6 +4,8 @@
|
||||||
|
|
||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
use std::mem::{transmute, MaybeUninit};
|
||||||
|
|
||||||
use mem_arena::MemArena;
|
use mem_arena::MemArena;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -71,7 +73,7 @@ impl<'a> BVH4<'a> {
|
||||||
} else {
|
} else {
|
||||||
let base = BVHBase::from_objects(objects, objects_per_leaf, bounder);
|
let base = BVHBase::from_objects(objects, objects_per_leaf, bounder);
|
||||||
|
|
||||||
let fill_node = unsafe { arena.alloc_uninitialized_with_alignment::<BVH4Node>(32) };
|
let fill_node = arena.alloc_uninitialized_with_alignment::<BVH4Node>(32);
|
||||||
let node_count = BVH4::construct_from_base(
|
let node_count = BVH4::construct_from_base(
|
||||||
arena,
|
arena,
|
||||||
&base,
|
&base,
|
||||||
|
@ -80,7 +82,7 @@ impl<'a> BVH4<'a> {
|
||||||
);
|
);
|
||||||
|
|
||||||
BVH4 {
|
BVH4 {
|
||||||
root: Some(fill_node),
|
root: Some(unsafe { transmute(fill_node) }),
|
||||||
depth: (base.depth / 2) + 1,
|
depth: (base.depth / 2) + 1,
|
||||||
node_count: node_count,
|
node_count: node_count,
|
||||||
_bounds: {
|
_bounds: {
|
||||||
|
@ -184,7 +186,7 @@ impl<'a> BVH4<'a> {
|
||||||
arena: &'a MemArena,
|
arena: &'a MemArena,
|
||||||
base: &BVHBase,
|
base: &BVHBase,
|
||||||
node: &BVHBaseNode,
|
node: &BVHBaseNode,
|
||||||
fill_node: &mut BVH4Node<'a>,
|
fill_node: &mut MaybeUninit<BVH4Node<'a>>,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
let mut node_count = 0;
|
let mut node_count = 0;
|
||||||
|
|
||||||
|
@ -282,8 +284,7 @@ impl<'a> BVH4<'a> {
|
||||||
.max()
|
.max()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
debug_assert!(bounds_len >= 1);
|
debug_assert!(bounds_len >= 1);
|
||||||
let bounds =
|
let bounds = arena.alloc_array_uninitialized_with_alignment(bounds_len, 32);
|
||||||
unsafe { arena.alloc_array_uninitialized_with_alignment(bounds_len, 32) };
|
|
||||||
if bounds_len < 2 {
|
if bounds_len < 2 {
|
||||||
let b1 =
|
let b1 =
|
||||||
children[0].map_or(BBox::new(), |c| base.bounds[c.bounds_range().0]);
|
children[0].map_or(BBox::new(), |c| base.bounds[c.bounds_range().0]);
|
||||||
|
@ -293,7 +294,9 @@ impl<'a> BVH4<'a> {
|
||||||
children[2].map_or(BBox::new(), |c| base.bounds[c.bounds_range().0]);
|
children[2].map_or(BBox::new(), |c| base.bounds[c.bounds_range().0]);
|
||||||
let b4 =
|
let b4 =
|
||||||
children[3].map_or(BBox::new(), |c| base.bounds[c.bounds_range().0]);
|
children[3].map_or(BBox::new(), |c| base.bounds[c.bounds_range().0]);
|
||||||
bounds[0] = BBox4::from_bboxes(b1, b2, b3, b4);
|
unsafe {
|
||||||
|
*bounds[0].as_mut_ptr() = BBox4::from_bboxes(b1, b2, b3, b4);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
for (i, b) in bounds.iter_mut().enumerate() {
|
for (i, b) in bounds.iter_mut().enumerate() {
|
||||||
let time = i as f32 / (bounds_len - 1) as f32;
|
let time = i as f32 / (bounds_len - 1) as f32;
|
||||||
|
@ -314,34 +317,39 @@ impl<'a> BVH4<'a> {
|
||||||
let (x, y) = c.bounds_range();
|
let (x, y) = c.bounds_range();
|
||||||
lerp_slice(&base.bounds[x..y], time)
|
lerp_slice(&base.bounds[x..y], time)
|
||||||
});
|
});
|
||||||
*b = BBox4::from_bboxes(b1, b2, b3, b4);
|
unsafe {
|
||||||
|
*b.as_mut_ptr() = BBox4::from_bboxes(b1, b2, b3, b4);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bounds
|
bounds
|
||||||
};
|
};
|
||||||
|
|
||||||
// Construct child nodes
|
// Construct child nodes
|
||||||
let child_nodes = unsafe {
|
let child_nodes =
|
||||||
arena.alloc_array_uninitialized_with_alignment::<BVH4Node>(child_count, 32)
|
arena.alloc_array_uninitialized_with_alignment::<BVH4Node>(child_count, 32);
|
||||||
};
|
|
||||||
for (i, c) in children[0..child_count].iter().enumerate() {
|
for (i, c) in children[0..child_count].iter().enumerate() {
|
||||||
node_count +=
|
node_count +=
|
||||||
BVH4::construct_from_base(arena, base, c.unwrap(), &mut child_nodes[i]);
|
BVH4::construct_from_base(arena, base, c.unwrap(), &mut child_nodes[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build this node
|
// Build this node
|
||||||
*fill_node = BVH4Node::Internal {
|
unsafe {
|
||||||
bounds: bounds,
|
*fill_node.as_mut_ptr() = BVH4Node::Internal {
|
||||||
children: child_nodes,
|
bounds: transmute(bounds),
|
||||||
traversal_code: calc_traversal_code(split_info),
|
children: transmute(child_nodes),
|
||||||
};
|
traversal_code: calc_traversal_code(split_info),
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create internal node
|
// Create internal node
|
||||||
&BVHBaseNode::Leaf { object_range, .. } => {
|
&BVHBaseNode::Leaf { object_range, .. } => {
|
||||||
*fill_node = BVH4Node::Leaf {
|
unsafe {
|
||||||
object_range: object_range,
|
*fill_node.as_mut_ptr() = BVH4Node::Leaf {
|
||||||
};
|
object_range: object_range,
|
||||||
|
};
|
||||||
|
}
|
||||||
node_count += 1;
|
node_count += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use std::mem::{transmute, MaybeUninit};
|
||||||
|
|
||||||
use mem_arena::MemArena;
|
use mem_arena::MemArena;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -74,11 +76,11 @@ impl<'a> LightTree<'a> {
|
||||||
let mut builder = LightTreeBuilder::new();
|
let mut builder = LightTreeBuilder::new();
|
||||||
builder.recursive_build(0, 0, objects, &info_getter);
|
builder.recursive_build(0, 0, objects, &info_getter);
|
||||||
|
|
||||||
let root = unsafe { arena.alloc_uninitialized::<Node>() };
|
let root = arena.alloc_uninitialized::<Node>();
|
||||||
LightTree::construct_from_builder(arena, &builder, builder.root_node_index(), root);
|
LightTree::construct_from_builder(arena, &builder, builder.root_node_index(), root);
|
||||||
|
|
||||||
LightTree {
|
LightTree {
|
||||||
root: Some(root),
|
root: Some(unsafe { transmute(root) }),
|
||||||
depth: builder.depth,
|
depth: builder.depth,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -88,25 +90,27 @@ impl<'a> LightTree<'a> {
|
||||||
arena: &'a MemArena,
|
arena: &'a MemArena,
|
||||||
base: &LightTreeBuilder,
|
base: &LightTreeBuilder,
|
||||||
node_index: usize,
|
node_index: usize,
|
||||||
node_mem: &mut Node<'a>,
|
node_mem: &mut MaybeUninit<Node<'a>>,
|
||||||
) {
|
) {
|
||||||
if base.nodes[node_index].is_leaf {
|
if base.nodes[node_index].is_leaf {
|
||||||
// Leaf
|
// Leaf
|
||||||
let bounds_range = base.nodes[node_index].bounds_range;
|
let bounds_range = base.nodes[node_index].bounds_range;
|
||||||
let bounds = arena.copy_slice(&base.bounds[bounds_range.0..bounds_range.1]);
|
let bounds = arena.copy_slice(&base.bounds[bounds_range.0..bounds_range.1]);
|
||||||
|
|
||||||
*node_mem = Node::Leaf {
|
unsafe {
|
||||||
light_index: base.nodes[node_index].child_index,
|
*node_mem.as_mut_ptr() = Node::Leaf {
|
||||||
bounds: bounds,
|
light_index: base.nodes[node_index].child_index,
|
||||||
energy: base.nodes[node_index].energy,
|
bounds: bounds,
|
||||||
};
|
energy: base.nodes[node_index].energy,
|
||||||
|
};
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Inner
|
// Inner
|
||||||
let bounds_range = base.nodes[node_index].bounds_range;
|
let bounds_range = base.nodes[node_index].bounds_range;
|
||||||
let bounds = arena.copy_slice(&base.bounds[bounds_range.0..bounds_range.1]);
|
let bounds = arena.copy_slice(&base.bounds[bounds_range.0..bounds_range.1]);
|
||||||
|
|
||||||
let child_count = base.node_child_count(node_index);
|
let child_count = base.node_child_count(node_index);
|
||||||
let children = unsafe { arena.alloc_array_uninitialized::<Node>(child_count) };
|
let children = arena.alloc_array_uninitialized::<Node>(child_count);
|
||||||
for i in 0..child_count {
|
for i in 0..child_count {
|
||||||
LightTree::construct_from_builder(
|
LightTree::construct_from_builder(
|
||||||
arena,
|
arena,
|
||||||
|
@ -116,11 +120,13 @@ impl<'a> LightTree<'a> {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
*node_mem = Node::Inner {
|
unsafe {
|
||||||
children: children,
|
*node_mem.as_mut_ptr() = Node::Inner {
|
||||||
bounds: bounds,
|
children: transmute(children),
|
||||||
energy: base.nodes[node_index].energy,
|
bounds: bounds,
|
||||||
};
|
energy: base.nodes[node_index].energy,
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use std::cmp::{self, Ordering};
|
use std::{
|
||||||
|
cmp::{self, Ordering},
|
||||||
|
mem::MaybeUninit,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
hash::hash_u64,
|
hash::hash_u64,
|
||||||
|
@ -260,8 +263,12 @@ pub fn merge_slices_append<T: Lerp + Copy, F>(
|
||||||
|
|
||||||
/// Merges two slices of things, storing the result in `slice_out`.
|
/// Merges two slices of things, storing the result in `slice_out`.
|
||||||
/// Panics if `slice_out` is not the right size.
|
/// Panics if `slice_out` is not the right size.
|
||||||
pub fn merge_slices_to<T: Lerp + Copy, F>(slice1: &[T], slice2: &[T], slice_out: &mut [T], merge: F)
|
pub fn merge_slices_to<T: Lerp + Copy, F>(
|
||||||
where
|
slice1: &[T],
|
||||||
|
slice2: &[T],
|
||||||
|
slice_out: &mut [MaybeUninit<T>],
|
||||||
|
merge: F,
|
||||||
|
) where
|
||||||
F: Fn(&T, &T) -> T,
|
F: Fn(&T, &T) -> T,
|
||||||
{
|
{
|
||||||
assert_eq!(slice_out.len(), cmp::max(slice1.len(), slice2.len()));
|
assert_eq!(slice_out.len(), cmp::max(slice1.len(), slice2.len()));
|
||||||
|
@ -274,19 +281,25 @@ where
|
||||||
slice_out.iter_mut(),
|
slice_out.iter_mut(),
|
||||||
Iterator::zip(slice1.iter(), slice2.iter()),
|
Iterator::zip(slice1.iter(), slice2.iter()),
|
||||||
) {
|
) {
|
||||||
*xfo = merge(xf1, xf2);
|
unsafe {
|
||||||
|
*xfo.as_mut_ptr() = merge(xf1, xf2);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if slice1.len() > slice2.len() {
|
} else if slice1.len() > slice2.len() {
|
||||||
let s = (slice1.len() - 1) as f32;
|
let s = (slice1.len() - 1) as f32;
|
||||||
for (i, (xfo, xf1)) in Iterator::zip(slice_out.iter_mut(), slice1.iter()).enumerate() {
|
for (i, (xfo, xf1)) in Iterator::zip(slice_out.iter_mut(), slice1.iter()).enumerate() {
|
||||||
let xf2 = lerp_slice(slice2, i as f32 / s);
|
let xf2 = lerp_slice(slice2, i as f32 / s);
|
||||||
*xfo = merge(xf1, &xf2);
|
unsafe {
|
||||||
|
*xfo.as_mut_ptr() = merge(xf1, &xf2);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if slice1.len() < slice2.len() {
|
} else if slice1.len() < slice2.len() {
|
||||||
let s = (slice2.len() - 1) as f32;
|
let s = (slice2.len() - 1) as f32;
|
||||||
for (i, (xfo, xf2)) in Iterator::zip(slice_out.iter_mut(), slice2.iter()).enumerate() {
|
for (i, (xfo, xf2)) in Iterator::zip(slice_out.iter_mut(), slice2.iter()).enumerate() {
|
||||||
let xf1 = lerp_slice(slice1, i as f32 / s);
|
let xf1 = lerp_slice(slice1, i as f32 / s);
|
||||||
*xfo = merge(&xf1, xf2);
|
unsafe {
|
||||||
|
*xfo.as_mut_ptr() = merge(&xf1, xf2);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,44 +38,49 @@ impl<'a> TriangleMesh<'a> {
|
||||||
// Copy verts over to a contiguous area of memory, reorganizing them
|
// Copy verts over to a contiguous area of memory, reorganizing them
|
||||||
// so that each vertices' time samples are contiguous in memory.
|
// so that each vertices' time samples are contiguous in memory.
|
||||||
let vertices = {
|
let vertices = {
|
||||||
let vertices =
|
let vertices = arena.alloc_array_uninitialized(vert_count * time_sample_count);
|
||||||
unsafe { arena.alloc_array_uninitialized(vert_count * time_sample_count) };
|
|
||||||
|
|
||||||
for vi in 0..vert_count {
|
for vi in 0..vert_count {
|
||||||
for ti in 0..time_sample_count {
|
for ti in 0..time_sample_count {
|
||||||
vertices[(vi * time_sample_count) + ti] = verts[ti][vi];
|
unsafe {
|
||||||
|
*vertices[(vi * time_sample_count) + ti].as_mut_ptr() = verts[ti][vi];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vertices
|
unsafe { std::mem::transmute(vertices) }
|
||||||
};
|
};
|
||||||
|
|
||||||
// Copy vertex normals, if any, organizing them the same as vertices
|
// Copy vertex normals, if any, organizing them the same as vertices
|
||||||
// above.
|
// above.
|
||||||
let normals = match vert_normals {
|
let normals = match vert_normals {
|
||||||
Some(ref vnors) => {
|
Some(ref vnors) => {
|
||||||
let normals =
|
let normals = arena.alloc_array_uninitialized(vert_count * time_sample_count);
|
||||||
unsafe { arena.alloc_array_uninitialized(vert_count * time_sample_count) };
|
|
||||||
|
|
||||||
for vi in 0..vert_count {
|
for vi in 0..vert_count {
|
||||||
for ti in 0..time_sample_count {
|
for ti in 0..time_sample_count {
|
||||||
normals[(vi * time_sample_count) + ti] = vnors[ti][vi];
|
unsafe {
|
||||||
|
*normals[(vi * time_sample_count) + ti].as_mut_ptr() = vnors[ti][vi];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(&normals[..])
|
unsafe { Some(std::mem::transmute(&normals[..])) }
|
||||||
}
|
}
|
||||||
|
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Copy triangle vertex indices over, appending the triangle index itself to the tuple
|
// Copy triangle vertex indices over, appending the triangle index itself to the tuple
|
||||||
let indices = {
|
let indices: &mut [(u32, u32, u32, u32)] = {
|
||||||
let indices = unsafe { arena.alloc_array_uninitialized(tri_indices.len()) };
|
let indices = arena.alloc_array_uninitialized(tri_indices.len());
|
||||||
for (i, tri_i) in tri_indices.iter().enumerate() {
|
for (i, tri_i) in tri_indices.iter().enumerate() {
|
||||||
indices[i] = (tri_i.0 as u32, tri_i.2 as u32, tri_i.1 as u32, i as u32);
|
unsafe {
|
||||||
|
*indices[i].as_mut_ptr() =
|
||||||
|
(tri_i.0 as u32, tri_i.2 as u32, tri_i.1 as u32, i as u32);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
indices
|
unsafe { std::mem::transmute(indices) }
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create bounds array for use during BVH construction
|
// Create bounds array for use during BVH construction
|
||||||
|
@ -140,22 +145,27 @@ impl<'a> Surface for TriangleMesh<'a> {
|
||||||
let is_cached = ray_stack.ray_count_in_next_task() >= tri_count
|
let is_cached = ray_stack.ray_count_in_next_task() >= tri_count
|
||||||
&& self.time_sample_count == 1
|
&& self.time_sample_count == 1
|
||||||
&& space.len() <= 1;
|
&& space.len() <= 1;
|
||||||
let mut tri_cache = [unsafe { std::mem::uninitialized() }; MAX_LEAF_TRIANGLE_COUNT];
|
let mut tri_cache = [std::mem::MaybeUninit::uninit(); MAX_LEAF_TRIANGLE_COUNT];
|
||||||
if is_cached {
|
if is_cached {
|
||||||
for tri_idx in idx_range.clone() {
|
for tri_idx in idx_range.clone() {
|
||||||
let i = tri_idx - idx_range.start;
|
let i = tri_idx - idx_range.start;
|
||||||
let tri_indices = self.indices[tri_idx];
|
let tri_indices = self.indices[tri_idx];
|
||||||
|
|
||||||
// For static triangles with static transforms, cache them.
|
// For static triangles with static transforms, cache them.
|
||||||
tri_cache[i] = (
|
unsafe {
|
||||||
self.vertices[tri_indices.0 as usize],
|
*tri_cache[i].as_mut_ptr() = (
|
||||||
self.vertices[tri_indices.1 as usize],
|
self.vertices[tri_indices.0 as usize],
|
||||||
self.vertices[tri_indices.2 as usize],
|
self.vertices[tri_indices.1 as usize],
|
||||||
);
|
self.vertices[tri_indices.2 as usize],
|
||||||
if !space.is_empty() {
|
);
|
||||||
tri_cache[i].0 = tri_cache[i].0 * static_mat_space;
|
if !space.is_empty() {
|
||||||
tri_cache[i].1 = tri_cache[i].1 * static_mat_space;
|
(*tri_cache[i].as_mut_ptr()).0 =
|
||||||
tri_cache[i].2 = tri_cache[i].2 * static_mat_space;
|
(*tri_cache[i].as_mut_ptr()).0 * static_mat_space;
|
||||||
|
(*tri_cache[i].as_mut_ptr()).1 =
|
||||||
|
(*tri_cache[i].as_mut_ptr()).1 * static_mat_space;
|
||||||
|
(*tri_cache[i].as_mut_ptr()).2 =
|
||||||
|
(*tri_cache[i].as_mut_ptr()).2 * static_mat_space;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,9 +190,9 @@ impl<'a> Surface for TriangleMesh<'a> {
|
||||||
|
|
||||||
// Iterate through the triangles and test the ray against them.
|
// Iterate through the triangles and test the ray against them.
|
||||||
let mut non_shadow_hit = false;
|
let mut non_shadow_hit = false;
|
||||||
let mut hit_tri = unsafe { std::mem::uninitialized() };
|
let mut hit_tri = std::mem::MaybeUninit::uninit();
|
||||||
let mut hit_tri_indices = unsafe { std::mem::uninitialized() };
|
let mut hit_tri_indices = std::mem::MaybeUninit::uninit();
|
||||||
let mut hit_tri_data = unsafe { std::mem::uninitialized() };
|
let mut hit_tri_data = std::mem::MaybeUninit::uninit();
|
||||||
let ray_pre = triangle::RayTriPrecompute::new(rays.dir(ray_idx));
|
let ray_pre = triangle::RayTriPrecompute::new(rays.dir(ray_idx));
|
||||||
for tri_idx in idx_range.clone() {
|
for tri_idx in idx_range.clone() {
|
||||||
let tri_indices = self.indices[tri_idx];
|
let tri_indices = self.indices[tri_idx];
|
||||||
|
@ -190,7 +200,7 @@ impl<'a> Surface for TriangleMesh<'a> {
|
||||||
// Get triangle if necessary
|
// Get triangle if necessary
|
||||||
let tri = if is_cached {
|
let tri = if is_cached {
|
||||||
let i = tri_idx - idx_range.start;
|
let i = tri_idx - idx_range.start;
|
||||||
tri_cache[i]
|
unsafe { tri_cache[i].assume_init() }
|
||||||
} else {
|
} else {
|
||||||
let mut tri = if self.time_sample_count == 1 {
|
let mut tri = if self.time_sample_count == 1 {
|
||||||
// No deformation motion blur, so fast-path it.
|
// No deformation motion blur, so fast-path it.
|
||||||
|
@ -241,16 +251,19 @@ impl<'a> Surface for TriangleMesh<'a> {
|
||||||
} else {
|
} else {
|
||||||
non_shadow_hit = true;
|
non_shadow_hit = true;
|
||||||
rays.set_max_t(ray_idx, t);
|
rays.set_max_t(ray_idx, t);
|
||||||
hit_tri = tri;
|
unsafe {
|
||||||
hit_tri_indices = tri_indices;
|
*hit_tri.as_mut_ptr() = tri;
|
||||||
hit_tri_data = (t, b0, b1, b2);
|
*hit_tri_indices.as_mut_ptr() = tri_indices;
|
||||||
|
*hit_tri_data.as_mut_ptr() = (t, b0, b1, b2);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate intersection data if necessary.
|
// Calculate intersection data if necessary.
|
||||||
if non_shadow_hit {
|
if non_shadow_hit {
|
||||||
let (t, b0, b1, b2) = hit_tri_data;
|
let hit_tri = unsafe { hit_tri.assume_init() };
|
||||||
|
let (t, b0, b1, b2) = unsafe { hit_tri_data.assume_init() };
|
||||||
|
|
||||||
// Calculate intersection point and error magnitudes
|
// Calculate intersection point and error magnitudes
|
||||||
let (pos, pos_err) = triangle::surface_point(hit_tri, (b0, b1, b2));
|
let (pos, pos_err) = triangle::surface_point(hit_tri, (b0, b1, b2));
|
||||||
|
@ -261,6 +274,7 @@ impl<'a> Surface for TriangleMesh<'a> {
|
||||||
|
|
||||||
// Calculate interpolated surface normal, if any
|
// Calculate interpolated surface normal, if any
|
||||||
let shading_normal = if let Some(normals) = self.normals {
|
let shading_normal = if let Some(normals) = self.normals {
|
||||||
|
let hit_tri_indices = unsafe { hit_tri_indices.assume_init() };
|
||||||
let n0_slice = &normals[(hit_tri_indices.0 as usize
|
let n0_slice = &normals[(hit_tri_indices.0 as usize
|
||||||
* self.time_sample_count)
|
* self.time_sample_count)
|
||||||
..((hit_tri_indices.0 as usize + 1) * self.time_sample_count)];
|
..((hit_tri_indices.0 as usize + 1) * self.time_sample_count)];
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
use std::cmp;
|
use std::{
|
||||||
|
cmp,
|
||||||
|
mem::{transmute, MaybeUninit},
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{algorithm::merge_slices_to, math::Matrix4x4};
|
use crate::{algorithm::merge_slices_to, math::Matrix4x4};
|
||||||
|
|
||||||
pub struct TransformStack {
|
pub struct TransformStack {
|
||||||
stack: Vec<Matrix4x4>,
|
stack: Vec<MaybeUninit<Matrix4x4>>,
|
||||||
stack_indices: Vec<usize>,
|
stack_indices: Vec<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,6 +34,7 @@ impl TransformStack {
|
||||||
assert!(!xforms.is_empty());
|
assert!(!xforms.is_empty());
|
||||||
|
|
||||||
if self.stack.is_empty() {
|
if self.stack.is_empty() {
|
||||||
|
let xforms: &[MaybeUninit<Matrix4x4>] = unsafe { transmute(xforms) };
|
||||||
self.stack.extend(xforms);
|
self.stack.extend(xforms);
|
||||||
} else {
|
} else {
|
||||||
let sil = self.stack_indices.len();
|
let sil = self.stack_indices.len();
|
||||||
|
@ -46,7 +50,12 @@ impl TransformStack {
|
||||||
unsafe { self.stack.set_len(l + maxlen) };
|
unsafe { self.stack.set_len(l + maxlen) };
|
||||||
}
|
}
|
||||||
let (xfs1, xfs2) = self.stack.split_at_mut(i2);
|
let (xfs1, xfs2) = self.stack.split_at_mut(i2);
|
||||||
merge_slices_to(&xfs1[i1..i2], xforms, xfs2, |xf1, xf2| *xf1 * *xf2);
|
merge_slices_to(
|
||||||
|
unsafe { transmute(&xfs1[i1..i2]) },
|
||||||
|
xforms,
|
||||||
|
xfs2,
|
||||||
|
|xf1, xf2| *xf1 * *xf2,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.stack_indices.push(self.stack.len());
|
self.stack_indices.push(self.stack.len());
|
||||||
|
@ -69,6 +78,6 @@ impl TransformStack {
|
||||||
let i1 = self.stack_indices[sil - 2];
|
let i1 = self.stack_indices[sil - 2];
|
||||||
let i2 = self.stack_indices[sil - 1];
|
let i2 = self.stack_indices[sil - 1];
|
||||||
|
|
||||||
&self.stack[i1..i2]
|
unsafe { transmute(&self.stack[i1..i2]) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,8 @@
|
||||||
use std::{
|
use std::{
|
||||||
cell::{Cell, RefCell},
|
cell::{Cell, RefCell},
|
||||||
cmp::max,
|
cmp::max,
|
||||||
mem::{align_of, size_of},
|
fmt,
|
||||||
|
mem::{align_of, size_of, transmute, MaybeUninit},
|
||||||
slice,
|
slice,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -26,15 +27,27 @@ fn alignment_offset(addr: usize, alignment: usize) -> usize {
|
||||||
///
|
///
|
||||||
/// Additionally, it attempts to minimize wasted space through some heuristics. By
|
/// Additionally, it attempts to minimize wasted space through some heuristics. By
|
||||||
/// default, it tries to keep memory waste within the arena below 10%.
|
/// default, it tries to keep memory waste within the arena below 10%.
|
||||||
#[derive(Debug, Default)]
|
#[derive(Default)]
|
||||||
pub struct MemArena {
|
pub struct MemArena {
|
||||||
blocks: RefCell<Vec<Vec<u8>>>,
|
blocks: RefCell<Vec<Vec<MaybeUninit<u8>>>>,
|
||||||
min_block_size: usize,
|
min_block_size: usize,
|
||||||
max_waste_percentage: usize,
|
max_waste_percentage: usize,
|
||||||
stat_space_occupied: Cell<usize>,
|
stat_space_occupied: Cell<usize>,
|
||||||
stat_space_allocated: Cell<usize>,
|
stat_space_allocated: Cell<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for MemArena {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
f.debug_struct("MemArena")
|
||||||
|
.field("blocks.len():", &self.blocks.borrow().len())
|
||||||
|
.field("min_block_size", &self.min_block_size)
|
||||||
|
.field("max_waste_percentage", &self.max_waste_percentage)
|
||||||
|
.field("stat_space_occupied", &self.stat_space_occupied)
|
||||||
|
.field("stat_space_allocated", &self.stat_space_allocated)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl MemArena {
|
impl MemArena {
|
||||||
/// Create a new arena, with default minimum block size.
|
/// Create a new arena, with default minimum block size.
|
||||||
pub fn new() -> MemArena {
|
pub fn new() -> MemArena {
|
||||||
|
@ -111,9 +124,11 @@ impl MemArena {
|
||||||
|
|
||||||
/// Allocates memory for and initializes a type T, returning a mutable reference to it.
|
/// Allocates memory for and initializes a type T, returning a mutable reference to it.
|
||||||
pub fn alloc<T: Copy>(&self, value: T) -> &mut T {
|
pub fn alloc<T: Copy>(&self, value: T) -> &mut T {
|
||||||
let memory = unsafe { self.alloc_uninitialized() };
|
let memory = self.alloc_uninitialized();
|
||||||
*memory = value;
|
unsafe {
|
||||||
memory
|
*memory.as_mut_ptr() = value;
|
||||||
|
}
|
||||||
|
unsafe { transmute(memory) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates memory for and initializes a type T, returning a mutable reference to it.
|
/// Allocates memory for and initializes a type T, returning a mutable reference to it.
|
||||||
|
@ -121,20 +136,22 @@ impl MemArena {
|
||||||
/// Additionally, the allocation will be made with the given byte alignment or
|
/// Additionally, the allocation will be made with the given byte alignment or
|
||||||
/// the type's inherent alignment, whichever is greater.
|
/// the type's inherent alignment, whichever is greater.
|
||||||
pub fn alloc_with_alignment<T: Copy>(&self, value: T, align: usize) -> &mut T {
|
pub fn alloc_with_alignment<T: Copy>(&self, value: T, align: usize) -> &mut T {
|
||||||
let memory = unsafe { self.alloc_uninitialized_with_alignment(align) };
|
let memory = self.alloc_uninitialized_with_alignment(align);
|
||||||
*memory = value;
|
unsafe {
|
||||||
memory
|
*memory.as_mut_ptr() = value;
|
||||||
|
}
|
||||||
|
unsafe { transmute(memory) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates memory for a type `T`, returning a mutable reference to it.
|
/// Allocates memory for a type `T`, returning a mutable reference to it.
|
||||||
///
|
///
|
||||||
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
|
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
|
||||||
pub unsafe fn alloc_uninitialized<T: Copy>(&self) -> &mut T {
|
pub fn alloc_uninitialized<T: Copy>(&self) -> &mut MaybeUninit<T> {
|
||||||
assert!(size_of::<T>() > 0);
|
assert!(size_of::<T>() > 0);
|
||||||
|
|
||||||
let memory = self.alloc_raw(size_of::<T>(), align_of::<T>()) as *mut T;
|
let memory = self.alloc_raw(size_of::<T>(), align_of::<T>()) as *mut MaybeUninit<T>;
|
||||||
|
|
||||||
memory.as_mut().unwrap()
|
unsafe { memory.as_mut().unwrap() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates memory for a type `T`, returning a mutable reference to it.
|
/// Allocates memory for a type `T`, returning a mutable reference to it.
|
||||||
|
@ -143,24 +160,27 @@ impl MemArena {
|
||||||
/// the type's inherent alignment, whichever is greater.
|
/// the type's inherent alignment, whichever is greater.
|
||||||
///
|
///
|
||||||
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
|
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
|
||||||
pub unsafe fn alloc_uninitialized_with_alignment<T: Copy>(&self, align: usize) -> &mut T {
|
pub fn alloc_uninitialized_with_alignment<T: Copy>(&self, align: usize) -> &mut MaybeUninit<T> {
|
||||||
assert!(size_of::<T>() > 0);
|
assert!(size_of::<T>() > 0);
|
||||||
|
|
||||||
let memory = self.alloc_raw(size_of::<T>(), max(align, align_of::<T>())) as *mut T;
|
let memory =
|
||||||
|
self.alloc_raw(size_of::<T>(), max(align, align_of::<T>())) as *mut MaybeUninit<T>;
|
||||||
|
|
||||||
memory.as_mut().unwrap()
|
unsafe { memory.as_mut().unwrap() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
|
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
|
||||||
/// All elements are initialized to the given `value`.
|
/// All elements are initialized to the given `value`.
|
||||||
pub fn alloc_array<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
|
pub fn alloc_array<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
|
||||||
let memory = unsafe { self.alloc_array_uninitialized(len) };
|
let memory = self.alloc_array_uninitialized(len);
|
||||||
|
|
||||||
for v in memory.iter_mut() {
|
for v in memory.iter_mut() {
|
||||||
*v = value;
|
unsafe {
|
||||||
|
*v.as_mut_ptr() = value;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
memory
|
unsafe { transmute(memory) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
|
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
|
||||||
|
@ -174,25 +194,29 @@ impl MemArena {
|
||||||
value: T,
|
value: T,
|
||||||
align: usize,
|
align: usize,
|
||||||
) -> &mut [T] {
|
) -> &mut [T] {
|
||||||
let memory = unsafe { self.alloc_array_uninitialized_with_alignment(len, align) };
|
let memory = self.alloc_array_uninitialized_with_alignment(len, align);
|
||||||
|
|
||||||
for v in memory.iter_mut() {
|
for v in memory.iter_mut() {
|
||||||
*v = value;
|
unsafe {
|
||||||
|
*v.as_mut_ptr() = value;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
memory
|
unsafe { transmute(memory) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates and initializes memory to duplicate the given slice, returning a mutable slice
|
/// Allocates and initializes memory to duplicate the given slice, returning a mutable slice
|
||||||
/// to the new copy.
|
/// to the new copy.
|
||||||
pub fn copy_slice<T: Copy>(&self, other: &[T]) -> &mut [T] {
|
pub fn copy_slice<T: Copy>(&self, other: &[T]) -> &mut [T] {
|
||||||
let memory = unsafe { self.alloc_array_uninitialized(other.len()) };
|
let memory = self.alloc_array_uninitialized(other.len());
|
||||||
|
|
||||||
for (v, other) in memory.iter_mut().zip(other.iter()) {
|
for (v, other) in memory.iter_mut().zip(other.iter()) {
|
||||||
*v = *other;
|
unsafe {
|
||||||
|
*v.as_mut_ptr() = *other;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
memory
|
unsafe { transmute(memory) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates and initializes memory to duplicate the given slice, returning a mutable slice
|
/// Allocates and initializes memory to duplicate the given slice, returning a mutable slice
|
||||||
|
@ -201,19 +225,21 @@ impl MemArena {
|
||||||
/// Additionally, the allocation will be made with the given byte alignment or
|
/// Additionally, the allocation will be made with the given byte alignment or
|
||||||
/// the type's inherent alignment, whichever is greater.
|
/// the type's inherent alignment, whichever is greater.
|
||||||
pub fn copy_slice_with_alignment<T: Copy>(&self, other: &[T], align: usize) -> &mut [T] {
|
pub fn copy_slice_with_alignment<T: Copy>(&self, other: &[T], align: usize) -> &mut [T] {
|
||||||
let memory = unsafe { self.alloc_array_uninitialized_with_alignment(other.len(), align) };
|
let memory = self.alloc_array_uninitialized_with_alignment(other.len(), align);
|
||||||
|
|
||||||
for (v, other) in memory.iter_mut().zip(other.iter()) {
|
for (v, other) in memory.iter_mut().zip(other.iter()) {
|
||||||
*v = *other;
|
unsafe {
|
||||||
|
*v.as_mut_ptr() = *other;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
memory
|
unsafe { transmute(memory) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
|
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
|
||||||
///
|
///
|
||||||
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
|
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
|
||||||
pub unsafe fn alloc_array_uninitialized<T: Copy>(&self, len: usize) -> &mut [T] {
|
pub fn alloc_array_uninitialized<T: Copy>(&self, len: usize) -> &mut [MaybeUninit<T>] {
|
||||||
assert!(size_of::<T>() > 0);
|
assert!(size_of::<T>() > 0);
|
||||||
|
|
||||||
let array_mem_size = {
|
let array_mem_size = {
|
||||||
|
@ -222,9 +248,9 @@ impl MemArena {
|
||||||
aligned_type_size * len
|
aligned_type_size * len
|
||||||
};
|
};
|
||||||
|
|
||||||
let memory = self.alloc_raw(array_mem_size, align_of::<T>()) as *mut T;
|
let memory = self.alloc_raw(array_mem_size, align_of::<T>()) as *mut MaybeUninit<T>;
|
||||||
|
|
||||||
slice::from_raw_parts_mut(memory, len)
|
unsafe { slice::from_raw_parts_mut(memory, len) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
|
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
|
||||||
|
@ -233,11 +259,11 @@ impl MemArena {
|
||||||
/// the type's inherent alignment, whichever is greater.
|
/// the type's inherent alignment, whichever is greater.
|
||||||
///
|
///
|
||||||
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
|
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
|
||||||
pub unsafe fn alloc_array_uninitialized_with_alignment<T: Copy>(
|
pub fn alloc_array_uninitialized_with_alignment<T: Copy>(
|
||||||
&self,
|
&self,
|
||||||
len: usize,
|
len: usize,
|
||||||
align: usize,
|
align: usize,
|
||||||
) -> &mut [T] {
|
) -> &mut [MaybeUninit<T>] {
|
||||||
assert!(size_of::<T>() > 0);
|
assert!(size_of::<T>() > 0);
|
||||||
|
|
||||||
let array_mem_size = {
|
let array_mem_size = {
|
||||||
|
@ -246,9 +272,10 @@ impl MemArena {
|
||||||
aligned_type_size * len
|
aligned_type_size * len
|
||||||
};
|
};
|
||||||
|
|
||||||
let memory = self.alloc_raw(array_mem_size, max(align, align_of::<T>())) as *mut T;
|
let memory =
|
||||||
|
self.alloc_raw(array_mem_size, max(align, align_of::<T>())) as *mut MaybeUninit<T>;
|
||||||
|
|
||||||
slice::from_raw_parts_mut(memory, len)
|
unsafe { slice::from_raw_parts_mut(memory, len) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates space with a given size and alignment.
|
/// Allocates space with a given size and alignment.
|
||||||
|
@ -257,7 +284,7 @@ impl MemArena {
|
||||||
///
|
///
|
||||||
/// CAUTION: this returns uninitialized memory. Make sure to initialize the
|
/// CAUTION: this returns uninitialized memory. Make sure to initialize the
|
||||||
/// memory after calling.
|
/// memory after calling.
|
||||||
unsafe fn alloc_raw(&self, size: usize, alignment: usize) -> *mut u8 {
|
fn alloc_raw(&self, size: usize, alignment: usize) -> *mut MaybeUninit<u8> {
|
||||||
assert!(alignment > 0);
|
assert!(alignment > 0);
|
||||||
|
|
||||||
self.stat_space_allocated
|
self.stat_space_allocated
|
||||||
|
@ -279,10 +306,12 @@ impl MemArena {
|
||||||
|
|
||||||
// If it will fit in the current block, use the current block.
|
// If it will fit in the current block, use the current block.
|
||||||
if (start_index + size) <= blocks.first().unwrap().capacity() {
|
if (start_index + size) <= blocks.first().unwrap().capacity() {
|
||||||
blocks.first_mut().unwrap().set_len(start_index + size);
|
unsafe {
|
||||||
|
blocks.first_mut().unwrap().set_len(start_index + size);
|
||||||
|
}
|
||||||
|
|
||||||
let block_ptr = blocks.first_mut().unwrap().as_mut_ptr();
|
let block_ptr = blocks.first_mut().unwrap().as_mut_ptr();
|
||||||
return block_ptr.add(start_index);
|
return unsafe { block_ptr.add(start_index) };
|
||||||
}
|
}
|
||||||
// If it won't fit in the current block, create a new block and use that.
|
// If it won't fit in the current block, create a new block and use that.
|
||||||
else {
|
else {
|
||||||
|
@ -318,13 +347,15 @@ impl MemArena {
|
||||||
.set(self.stat_space_occupied.get() + size + alignment - 1);
|
.set(self.stat_space_occupied.get() + size + alignment - 1);
|
||||||
|
|
||||||
blocks.push(Vec::with_capacity(size + alignment - 1));
|
blocks.push(Vec::with_capacity(size + alignment - 1));
|
||||||
blocks.last_mut().unwrap().set_len(size + alignment - 1);
|
unsafe {
|
||||||
|
blocks.last_mut().unwrap().set_len(size + alignment - 1);
|
||||||
|
}
|
||||||
|
|
||||||
let start_index =
|
let start_index =
|
||||||
alignment_offset(blocks.last().unwrap().as_ptr() as usize, alignment);
|
alignment_offset(blocks.last().unwrap().as_ptr() as usize, alignment);
|
||||||
|
|
||||||
let block_ptr = blocks.last_mut().unwrap().as_mut_ptr();
|
let block_ptr = blocks.last_mut().unwrap().as_mut_ptr();
|
||||||
return block_ptr.add(start_index);
|
return unsafe { block_ptr.add(start_index) };
|
||||||
}
|
}
|
||||||
// Otherwise create a new shared block.
|
// Otherwise create a new shared block.
|
||||||
else {
|
else {
|
||||||
|
@ -339,10 +370,12 @@ impl MemArena {
|
||||||
let start_index =
|
let start_index =
|
||||||
alignment_offset(blocks.first().unwrap().as_ptr() as usize, alignment);
|
alignment_offset(blocks.first().unwrap().as_ptr() as usize, alignment);
|
||||||
|
|
||||||
blocks.first_mut().unwrap().set_len(start_index + size);
|
unsafe {
|
||||||
|
blocks.first_mut().unwrap().set_len(start_index + size);
|
||||||
|
}
|
||||||
|
|
||||||
let block_ptr = blocks.first_mut().unwrap().as_mut_ptr();
|
let block_ptr = blocks.first_mut().unwrap().as_mut_ptr();
|
||||||
return block_ptr.add(start_index);
|
return unsafe { block_ptr.add(start_index) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user