diff --git a/mem_arena/src/lib.rs b/mem_arena/src/lib.rs index 97ebe50..bb1a44a 100644 --- a/mem_arena/src/lib.rs +++ b/mem_arena/src/lib.rs @@ -1,6 +1,7 @@ use std::slice; use std::cell::{Cell, RefCell}; use std::mem::{size_of, align_of}; +use std::cmp::max; const GROWTH_FRACTION: usize = 8; // 1/N (smaller number leads to bigger allocations) const DEFAULT_MIN_BLOCK_SIZE: usize = 1 << 10; // 1 KiB @@ -120,6 +121,22 @@ impl MemArena { memory.as_mut().unwrap() } + /// Allocates memory for a type `T`, returning a mutable reference to it. + /// + /// Additionally, the allocation will be made with the given byte alignment or + /// the type's inherent alignment, whichever is greater. + /// + /// CAUTION: the memory returned is uninitialized. Make sure to initalize before using! + pub unsafe fn alloc_uninitialized_with_alignment<'a, T: Copy>(&'a self, + align: usize) + -> &'a mut T { + assert!(size_of::() > 0); + + let memory = self.alloc_raw(size_of::(), max(align, align_of::())) as *mut T; + + memory.as_mut().unwrap() + } + /// Allocates memory for `len` values of type `T`, returning a mutable slice to it. /// All elements are initialized to the given `value`. pub fn alloc_array<'a, T: Copy>(&'a self, len: usize, value: T) -> &'a mut [T] { @@ -145,7 +162,8 @@ impl MemArena { } /// Allocates memory for `len` values of type `T`, returning a mutable slice to it. - /// All elements are initialized to the given `value`. + /// + /// CAUTION: the memory returned is uninitialized. Make sure to initalize before using! pub unsafe fn alloc_array_uninitialized<'a, T: Copy>(&'a self, len: usize) -> &'a mut [T] { assert!(size_of::() > 0); @@ -160,6 +178,29 @@ impl MemArena { slice::from_raw_parts_mut(memory, len) } + /// Allocates memory for `len` values of type `T`, returning a mutable slice to it. + /// + /// Additionally, the allocation will be made with the given byte alignment or + /// the type's inherent alignment, whichever is greater. + /// + /// CAUTION: the memory returned is uninitialized. Make sure to initalize before using! + pub unsafe fn alloc_array_uninitialized_with_alignment<'a, T: Copy>(&'a self, + len: usize, + align: usize) + -> &'a mut [T] { + assert!(size_of::() > 0); + + let array_mem_size = { + let alignment_padding = alignment_offset(size_of::(), align_of::()); + let aligned_type_size = size_of::() + alignment_padding; + aligned_type_size * len + }; + + let memory = self.alloc_raw(array_mem_size, max(align, align_of::())) as *mut T; + + slice::from_raw_parts_mut(memory, len) + } + /// Allocates space with a given size and alignment. /// /// This is the work-horse code of the MemArena. diff --git a/src/accel/bvh4.rs b/src/accel/bvh4.rs index c5311f7..9bee85a 100644 --- a/src/accel/bvh4.rs +++ b/src/accel/bvh4.rs @@ -56,7 +56,7 @@ impl<'a> BVH4<'a> { } else { let base = BVHBase::from_objects(objects, objects_per_leaf, bounder); - let mut fill_node = unsafe { arena.alloc_uninitialized::() }; + let mut fill_node = unsafe { arena.alloc_uninitialized_with_alignment::(32) }; BVH4::construct_from_base(arena, &base, &base.nodes[base.root_node_index()], fill_node); BVH4 { @@ -247,7 +247,8 @@ impl<'a> BVH4<'a> { }) .max() .unwrap(); - let mut bounds = unsafe { arena.alloc_array_uninitialized(bounds_len) }; + let mut bounds = + unsafe { arena.alloc_array_uninitialized_with_alignment(bounds_len, 32) }; for (i, b) in bounds.iter_mut().enumerate() { let time = i as f32 / (bounds_len - 1) as f32; @@ -274,7 +275,9 @@ impl<'a> BVH4<'a> { // Construct child nodes let mut child_nodes = - unsafe { arena.alloc_array_uninitialized::(child_count) }; + unsafe { + arena.alloc_array_uninitialized_with_alignment::(child_count, 32) + }; for (i, c) in children[0..child_count].iter().enumerate() { BVH4::construct_from_base(arena, base, c.unwrap(), &mut child_nodes[i]); }