Split out memory arena into an external crate.

This commit is contained in:
Nathan Vegdahl 2019-12-27 10:43:03 +09:00
parent 85503aec3b
commit 022c913757
20 changed files with 73 additions and 469 deletions

12
Cargo.lock generated
View File

@ -153,6 +153,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
name = "halton"
version = "0.1.0"
[[package]]
name = "kioku"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazy_static"
version = "1.3.0"
@ -183,10 +188,6 @@ dependencies = [
"glam 0.7.1 (git+https://github.com/bitshifter/glam-rs.git?rev=0f314f99)",
]
[[package]]
name = "mem_arena"
version = "0.1.0"
[[package]]
name = "memchr"
version = "2.2.1"
@ -297,9 +298,9 @@ dependencies = [
"glam 0.7.1 (git+https://github.com/bitshifter/glam-rs.git?rev=0f314f99)",
"half 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"halton 0.1.0",
"kioku 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"math3d 0.1.0",
"mem_arena 0.1.0",
"nom 5.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"oct32norm 0.1.0",
@ -655,6 +656,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
"checksum glam 0.7.1 (git+https://github.com/bitshifter/glam-rs.git?rev=0f314f99)" = "<none>"
"checksum half 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9353c2a89d550b58fa0061d8ed8d002a7d8cdf2494eb0e432859bd3a9e543836"
"checksum kioku 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "30217b27028ccf61ba75afa4f9bb7be8e4a66780d7882fdaacd9ea8e0cdede26"
"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14"
"checksum lexical-core 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2304bccb228c4b020f3a4835d247df0a02a7c4686098d4167762cfbbe4c5cb14"
"checksum libc 0.2.55 (registry+https://github.com/rust-lang/crates.io-index)" = "42914d39aad277d9e176efbdad68acb1d5443ab65afe0e0e4f0d49352a950880"

View File

@ -4,7 +4,6 @@ members = [
"sub_crates/color",
"sub_crates/halton",
"sub_crates/math3d",
"sub_crates/mem_arena",
"sub_crates/oct32norm",
"sub_crates/sobol",
"sub_crates/spectral_upsampling",
@ -31,6 +30,7 @@ lazy_static = "1.0"
nom = "5"
num_cpus = "1.8"
openexr = "0.6.0"
kioku = "0.3"
png_encode_mini = "0.1.2"
rustc-serialize = "0.3"
scoped_threadpool = "0.1"
@ -50,9 +50,6 @@ path = "sub_crates/halton"
[dependencies.math3d]
path = "sub_crates/math3d"
[dependencies.mem_arena]
path = "sub_crates/mem_arena"
[dependencies.oct32norm]
path = "sub_crates/oct32norm"

View File

@ -8,7 +8,7 @@ use std::mem::{transmute, MaybeUninit};
use glam::Vec4Mask;
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
bbox::BBox,
@ -56,7 +56,7 @@ pub enum BVH4Node<'a> {
impl<'a> BVH4<'a> {
pub fn from_objects<'b, T, F>(
arena: &'a MemArena,
arena: &'a Arena,
objects: &mut [T],
objects_per_leaf: usize,
bounder: F,
@ -74,7 +74,7 @@ impl<'a> BVH4<'a> {
} else {
let base = BVHBase::from_objects(objects, objects_per_leaf, bounder);
let fill_node = arena.alloc_uninitialized_with_alignment::<BVH4Node>(32);
let fill_node = arena.alloc_align_uninit::<BVH4Node>(32);
let node_count = BVH4::construct_from_base(
arena,
&base,
@ -184,7 +184,7 @@ impl<'a> BVH4<'a> {
}
fn construct_from_base(
arena: &'a MemArena,
arena: &'a Arena,
base: &BVHBase,
node: &BVHBaseNode,
fill_node: &mut MaybeUninit<BVH4Node<'a>>,
@ -285,7 +285,7 @@ impl<'a> BVH4<'a> {
.max()
.unwrap();
debug_assert!(bounds_len >= 1);
let bounds = arena.alloc_array_uninitialized_with_alignment(bounds_len, 32);
let bounds = arena.alloc_array_align_uninit(bounds_len, 32);
if bounds_len < 2 {
let b1 =
children[0].map_or(BBox::new(), |c| base.bounds[c.bounds_range().0]);
@ -327,8 +327,7 @@ impl<'a> BVH4<'a> {
};
// Construct child nodes
let child_nodes =
arena.alloc_array_uninitialized_with_alignment::<BVH4Node>(child_count, 32);
let child_nodes = arena.alloc_array_align_uninit::<BVH4Node>(child_count, 32);
for (i, c) in children[0..child_count].iter().enumerate() {
node_count +=
BVH4::construct_from_base(arena, base, c.unwrap(), &mut child_nodes[i]);

View File

@ -1,4 +1,4 @@
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
bbox::BBox,
@ -17,7 +17,7 @@ pub struct LightArray<'a> {
impl<'a> LightArray<'a> {
#[allow(dead_code)]
pub fn from_objects<'b, T, F>(
arena: &'a MemArena,
arena: &'a Arena,
objects: &mut [T],
info_getter: F,
) -> LightArray<'a>

View File

@ -1,6 +1,6 @@
use std::mem::{transmute, MaybeUninit};
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
algorithm::merge_slices_append,
@ -60,7 +60,7 @@ impl<'a> Node<'a> {
impl<'a> LightTree<'a> {
pub fn from_objects<'b, T, F>(
arena: &'a MemArena,
arena: &'a Arena,
objects: &mut [T],
info_getter: F,
) -> LightTree<'a>
@ -76,7 +76,7 @@ impl<'a> LightTree<'a> {
let mut builder = LightTreeBuilder::new();
builder.recursive_build(0, 0, objects, &info_getter);
let root = arena.alloc_uninitialized::<Node>();
let root = arena.alloc_uninit::<Node>();
LightTree::construct_from_builder(arena, &builder, builder.root_node_index(), root);
LightTree {
@ -87,7 +87,7 @@ impl<'a> LightTree<'a> {
}
fn construct_from_builder(
arena: &'a MemArena,
arena: &'a Arena,
base: &LightTreeBuilder,
node_index: usize,
node_mem: &mut MaybeUninit<Node<'a>>,
@ -110,7 +110,7 @@ impl<'a> LightTree<'a> {
let bounds = arena.copy_slice(&base.bounds[bounds_range.0..bounds_range.1]);
let child_count = base.node_child_count(node_index);
let children = arena.alloc_array_uninitialized::<Node>(child_count);
let children = arena.alloc_array_uninit::<Node>(child_count);
for i in 0..child_count {
LightTree::construct_from_builder(
arena,

View File

@ -1,6 +1,6 @@
#![allow(dead_code)]
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
lerp::lerp_slice,
@ -20,7 +20,7 @@ pub struct Camera<'a> {
impl<'a> Camera<'a> {
pub fn new(
arena: &'a MemArena,
arena: &'a Arena,
transforms: &[Matrix4x4],
fovs: &[f32],
mut aperture_radii: &[f32],

View File

@ -1,6 +1,6 @@
use std::f64::consts::PI as PI_64;
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
color::{Color, SpectralSample},
@ -22,7 +22,7 @@ pub struct DistantDiskLight<'a> {
impl<'a> DistantDiskLight<'a> {
pub fn new(
arena: &'a MemArena,
arena: &'a Arena,
radii: &[f32],
directions: &[Vector],
colors: &[Color],

View File

@ -1,4 +1,4 @@
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
bbox::BBox,
@ -29,7 +29,7 @@ pub struct RectangleLight<'a> {
impl<'a> RectangleLight<'a> {
pub fn new<'b>(
arena: &'b MemArena,
arena: &'b Arena,
dimensions: &[(f32, f32)],
colors: &[Color],
) -> RectangleLight<'b> {

View File

@ -1,6 +1,6 @@
use std::f64::consts::PI as PI_64;
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
bbox::BBox,
@ -31,7 +31,7 @@ pub struct SphereLight<'a> {
}
impl<'a> SphereLight<'a> {
pub fn new<'b>(arena: &'b MemArena, radii: &[f32], colors: &[Color]) -> SphereLight<'b> {
pub fn new<'b>(arena: &'b Arena, radii: &[f32], colors: &[Color]) -> SphereLight<'b> {
let bbs: Vec<_> = radii
.iter()
.map(|r| BBox {

View File

@ -45,7 +45,7 @@ use std::{fs::File, io, io::Read, mem, path::Path, str::FromStr};
use clap::{App, Arg};
use nom::bytes::complete::take_until;
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
accel::BVH4Node,
@ -246,7 +246,7 @@ fn main() {
println!("Building scene...");
}
let arena = MemArena::with_min_block_size((1 << 20) * 4);
let arena = Arena::new().with_block_size((1 << 20) * 4);
let mut r = parse_scene(&arena, child).unwrap_or_else(|e| {
e.print(&psy_contents);
panic!("Parse error.");
@ -331,25 +331,25 @@ fn main() {
// Print memory stats if stats are wanted.
if args.is_present("stats") {
let arena_stats = arena.stats();
let mib_occupied = arena_stats.0 as f64 / 1_048_576.0;
let mib_allocated = arena_stats.1 as f64 / 1_048_576.0;
// let arena_stats = arena.stats();
// let mib_occupied = arena_stats.0 as f64 / 1_048_576.0;
// let mib_allocated = arena_stats.1 as f64 / 1_048_576.0;
println!("MemArena stats:");
// println!("MemArena stats:");
if mib_occupied >= 1.0 {
println!("\tOccupied: {:.1} MiB", mib_occupied);
} else {
println!("\tOccupied: {:.4} MiB", mib_occupied);
}
// if mib_occupied >= 1.0 {
// println!("\tOccupied: {:.1} MiB", mib_occupied);
// } else {
// println!("\tOccupied: {:.4} MiB", mib_occupied);
// }
if mib_allocated >= 1.0 {
println!("\tUsed: {:.1} MiB", mib_allocated);
} else {
println!("\tUsed: {:.4} MiB", mib_allocated);
}
// if mib_allocated >= 1.0 {
// println!("\tUsed: {:.1} MiB", mib_allocated);
// } else {
// println!("\tUsed: {:.4} MiB", mib_allocated);
// }
println!("\tTotal blocks: {}", arena_stats.2);
// println!("\tTotal blocks: {}", arena_stats.2);
}
}
}

View File

@ -4,7 +4,7 @@ use std::{f32, result::Result};
use nom::{combinator::all_consuming, sequence::tuple, IResult};
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
camera::Camera,
@ -93,7 +93,7 @@ fn line_count_to_byte_offset(text: &str, offset: usize) -> usize {
/// Takes in a `DataTree` representing a Scene node and returns
pub fn parse_scene<'a>(
arena: &'a MemArena,
arena: &'a Arena,
tree: &'a DataTree,
) -> Result<Renderer<'a>, PsyParseError> {
// Verify we have the right number of each section
@ -350,7 +350,7 @@ fn parse_render_settings(tree: &DataTree) -> Result<((u32, u32), u32, u32), PsyP
};
}
fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a>, PsyParseError> {
fn parse_camera<'a>(arena: &'a Arena, tree: &'a DataTree) -> Result<Camera<'a>, PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree {
let mut mats = Vec::new();
let mut fovs = Vec::new();
@ -452,7 +452,7 @@ fn parse_camera<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<Camera<'a
}
}
fn parse_world<'a>(arena: &'a MemArena, tree: &'a DataTree) -> Result<World<'a>, PsyParseError> {
fn parse_world<'a>(arena: &'a Arena, tree: &'a DataTree) -> Result<World<'a>, PsyParseError> {
if tree.is_internal() {
let background_color;
let mut lights: Vec<&dyn WorldLightSource> = Vec::new();

View File

@ -2,7 +2,7 @@
use std::result::Result;
use mem_arena::MemArena;
use kioku::Arena;
use crate::scene::{Assembly, AssemblyBuilder, Object};
@ -15,7 +15,7 @@ use super::{
};
pub fn parse_assembly<'a>(
arena: &'a MemArena,
arena: &'a Arena,
tree: &'a DataTree,
) -> Result<Assembly<'a>, PsyParseError> {
let mut builder = AssemblyBuilder::new(arena);

View File

@ -4,7 +4,7 @@ use std::result::Result;
use nom::{combinator::all_consuming, sequence::tuple, IResult};
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
light::{DistantDiskLight, RectangleLight, SphereLight},
@ -18,7 +18,7 @@ use super::{
};
pub fn parse_distant_disk_light<'a>(
arena: &'a MemArena,
arena: &'a Arena,
tree: &'a DataTree,
) -> Result<DistantDiskLight<'a>, PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree {
@ -84,7 +84,7 @@ pub fn parse_distant_disk_light<'a>(
}
pub fn parse_sphere_light<'a>(
arena: &'a MemArena,
arena: &'a Arena,
tree: &'a DataTree,
) -> Result<SphereLight<'a>, PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree {
@ -133,7 +133,7 @@ pub fn parse_sphere_light<'a>(
}
pub fn parse_rectangle_light<'a>(
arena: &'a MemArena,
arena: &'a Arena,
tree: &'a DataTree,
) -> Result<RectangleLight<'a>, PsyParseError> {
if let DataTree::Internal { ref children, .. } = *tree {

View File

@ -4,7 +4,7 @@ use std::result::Result;
use nom::{sequence::tuple, IResult};
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
math::{Normal, Point},
@ -25,7 +25,7 @@ use super::{
// }
pub fn parse_mesh_surface<'a>(
arena: &'a MemArena,
arena: &'a Arena,
tree: &'a DataTree,
) -> Result<TriangleMesh<'a>, PsyParseError> {
let mut verts = Vec::new(); // Vec of vecs, one for each time sample

View File

@ -4,7 +4,7 @@ use std::result::Result;
use nom::{combinator::all_consuming, IResult};
use mem_arena::MemArena;
use kioku::Arena;
use crate::shading::{SimpleSurfaceShader, SurfaceShader};
@ -22,7 +22,7 @@ use super::{
// }
pub fn parse_surface_shader<'a>(
arena: &'a MemArena,
arena: &'a Arena,
tree: &'a DataTree,
) -> Result<&'a dyn SurfaceShader, PsyParseError> {
let type_name = if let Some((_, text, _)) = tree.iter_leaf_children_with_type("Type").nth(0) {

View File

@ -1,6 +1,6 @@
use std::collections::HashMap;
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
accel::BVH4,
@ -148,7 +148,7 @@ impl<'a> Boundable for Assembly<'a> {
#[derive(Debug)]
pub struct AssemblyBuilder<'a> {
arena: &'a MemArena,
arena: &'a Arena,
// Instance list
instances: Vec<Instance>,
@ -168,7 +168,7 @@ pub struct AssemblyBuilder<'a> {
}
impl<'a> AssemblyBuilder<'a> {
pub fn new(arena: &'a MemArena) -> AssemblyBuilder<'a> {
pub fn new(arena: &'a Arena) -> AssemblyBuilder<'a> {
AssemblyBuilder {
arena: arena,
instances: Vec::new(),

View File

@ -2,7 +2,7 @@
use std::collections::HashMap;
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
accel::BVH4,
@ -47,7 +47,7 @@ pub struct MicropolyBatch<'a> {
impl<'a> MicropolyBatch<'a> {
pub fn from_verts_and_indices<'b>(
arena: &'b MemArena,
arena: &'b Arena,
verts: &[Vec<Point>],
vert_normals: &[Vec<Normal>],
tri_indices: &[(usize, usize, usize)],
@ -58,7 +58,7 @@ impl<'a> MicropolyBatch<'a> {
// Copy verts over to a contiguous area of memory, reorganizing them
// so that each vertices' time samples are contiguous in memory.
let vertices = {
let vertices = arena.alloc_array_uninitialized(vert_count * time_sample_count);
let vertices = arena.alloc_array_uninit(vert_count * time_sample_count);
for vi in 0..vert_count {
for ti in 0..time_sample_count {
@ -74,7 +74,7 @@ impl<'a> MicropolyBatch<'a> {
// Copy vertex normals, if any, organizing them the same as vertices
// above.
let normals = {
let normals = arena.alloc_array_uninitialized(vert_count * time_sample_count);
let normals = arena.alloc_array_uninit(vert_count * time_sample_count);
for vi in 0..vert_count {
for ti in 0..time_sample_count {
@ -89,7 +89,7 @@ impl<'a> MicropolyBatch<'a> {
// Copy triangle vertex indices over, appending the triangle index itself to the tuple
let indices: &mut [(u32, u32, u32)] = {
let indices = arena.alloc_array_uninitialized(tri_indices.len());
let indices = arena.alloc_array_uninit(tri_indices.len());
for (i, tri_i) in tri_indices.iter().enumerate() {
unsafe {
*indices[i].as_mut_ptr() = (tri_i.0 as u32, tri_i.2 as u32, tri_i.1 as u32);

View File

@ -1,6 +1,6 @@
#![allow(dead_code)]
use mem_arena::MemArena;
use kioku::Arena;
use crate::{
accel::BVH4,
@ -27,7 +27,7 @@ pub struct TriangleMesh<'a> {
impl<'a> TriangleMesh<'a> {
pub fn from_verts_and_indices<'b>(
arena: &'b MemArena,
arena: &'b Arena,
verts: &[Vec<Point>],
vert_normals: &Option<Vec<Vec<Normal>>>,
tri_indices: &[(usize, usize, usize)],
@ -38,7 +38,7 @@ impl<'a> TriangleMesh<'a> {
// Copy verts over to a contiguous area of memory, reorganizing them
// so that each vertices' time samples are contiguous in memory.
let vertices = {
let vertices = arena.alloc_array_uninitialized(vert_count * time_sample_count);
let vertices = arena.alloc_array_uninit(vert_count * time_sample_count);
for vi in 0..vert_count {
for ti in 0..time_sample_count {
@ -55,7 +55,7 @@ impl<'a> TriangleMesh<'a> {
// above.
let normals = match vert_normals {
Some(ref vnors) => {
let normals = arena.alloc_array_uninitialized(vert_count * time_sample_count);
let normals = arena.alloc_array_uninit(vert_count * time_sample_count);
for vi in 0..vert_count {
for ti in 0..time_sample_count {
@ -73,7 +73,7 @@ impl<'a> TriangleMesh<'a> {
// Copy triangle vertex indices over, appending the triangle index itself to the tuple
let indices: &mut [(u32, u32, u32, u32)] = {
let indices = arena.alloc_array_uninitialized(tri_indices.len());
let indices = arena.alloc_array_uninit(tri_indices.len());
for (i, tri_i) in tri_indices.iter().enumerate() {
unsafe {
*indices[i].as_mut_ptr() =

View File

@ -1,10 +0,0 @@
[package]
name = "mem_arena"
version = "0.1.0"
authors = ["Nathan Vegdahl <cessen@cessen.com>"]
edition = "2018"
license = "MIT"
[lib]
name = "mem_arena"
path = "src/lib.rs"

View File

@ -1,384 +0,0 @@
#![allow(clippy::redundant_field_names)]
#![allow(clippy::needless_return)]
#![allow(clippy::mut_from_ref)]
#![allow(clippy::transmute_ptr_to_ptr)]
use std::{
cell::{Cell, RefCell},
cmp::max,
fmt,
mem::{align_of, size_of, transmute, MaybeUninit},
slice,
};
const GROWTH_FRACTION: usize = 8; // 1/N (smaller number leads to bigger allocations)
const DEFAULT_MIN_BLOCK_SIZE: usize = 1 << 10; // 1 KiB
const DEFAULT_MAX_WASTE_PERCENTAGE: usize = 10;
fn alignment_offset(addr: usize, alignment: usize) -> usize {
(alignment - (addr % alignment)) % alignment
}
/// A growable memory arena for Copy types.
///
/// The arena works by allocating memory in blocks of slowly increasing size. It
/// doles out memory from the current block until an amount of memory is requested
/// that doesn't fit in the remainder of the current block, and then allocates a new
/// block.
///
/// Additionally, it attempts to minimize wasted space through some heuristics. By
/// default, it tries to keep memory waste within the arena below 10%.
#[derive(Default)]
pub struct MemArena {
blocks: RefCell<Vec<Vec<MaybeUninit<u8>>>>,
min_block_size: usize,
max_waste_percentage: usize,
stat_space_occupied: Cell<usize>,
stat_space_allocated: Cell<usize>,
}
impl fmt::Debug for MemArena {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MemArena")
.field("blocks.len():", &self.blocks.borrow().len())
.field("min_block_size", &self.min_block_size)
.field("max_waste_percentage", &self.max_waste_percentage)
.field("stat_space_occupied", &self.stat_space_occupied)
.field("stat_space_allocated", &self.stat_space_allocated)
.finish()
}
}
impl MemArena {
/// Create a new arena, with default minimum block size.
pub fn new() -> MemArena {
MemArena {
blocks: RefCell::new(vec![Vec::with_capacity(DEFAULT_MIN_BLOCK_SIZE)]),
min_block_size: DEFAULT_MIN_BLOCK_SIZE,
max_waste_percentage: DEFAULT_MAX_WASTE_PERCENTAGE,
stat_space_occupied: Cell::new(DEFAULT_MIN_BLOCK_SIZE),
stat_space_allocated: Cell::new(0),
}
}
/// Create a new arena, with a specified minimum block size.
pub fn with_min_block_size(min_block_size: usize) -> MemArena {
assert!(min_block_size > 0);
MemArena {
blocks: RefCell::new(vec![Vec::with_capacity(min_block_size)]),
min_block_size: min_block_size,
max_waste_percentage: DEFAULT_MAX_WASTE_PERCENTAGE,
stat_space_occupied: Cell::new(min_block_size),
stat_space_allocated: Cell::new(0),
}
}
/// Create a new arena, with a specified minimum block size and maximum waste percentage.
pub fn with_settings(min_block_size: usize, max_waste_percentage: usize) -> MemArena {
assert!(min_block_size > 0);
assert!(max_waste_percentage > 0 && max_waste_percentage <= 100);
MemArena {
blocks: RefCell::new(vec![Vec::with_capacity(min_block_size)]),
min_block_size: min_block_size,
max_waste_percentage: max_waste_percentage,
stat_space_occupied: Cell::new(min_block_size),
stat_space_allocated: Cell::new(0),
}
}
/// Returns statistics about the current usage as a tuple:
/// (space occupied, space allocated, block count, large block count)
///
/// Space occupied is the amount of real memory that the MemArena
/// is taking up (not counting book keeping).
///
/// Space allocated is the amount of the occupied space that is
/// actually used. In other words, it is the sum of the all the
/// allocation requests made to the arena by client code.
///
/// Block count is the number of blocks that have been allocated.
pub fn stats(&self) -> (usize, usize, usize) {
let occupied = self.stat_space_occupied.get();
let allocated = self.stat_space_allocated.get();
let blocks = self.blocks.borrow().len();
(occupied, allocated, blocks)
}
/// Frees all memory currently allocated by the arena, resetting itself to start
/// fresh.
///
/// CAUTION: this is unsafe because it does NOT ensure that all references to the data are
/// gone, so this can potentially lead to dangling references.
pub unsafe fn free_all_and_reset(&self) {
let mut blocks = self.blocks.borrow_mut();
blocks.clear();
blocks.shrink_to_fit();
blocks.push(Vec::with_capacity(self.min_block_size));
self.stat_space_occupied.set(self.min_block_size);
self.stat_space_allocated.set(0);
}
/// Allocates memory for and initializes a type T, returning a mutable reference to it.
pub fn alloc<T: Copy>(&self, value: T) -> &mut T {
let memory = self.alloc_uninitialized();
unsafe {
*memory.as_mut_ptr() = value;
}
unsafe { transmute(memory) }
}
/// Allocates memory for and initializes a type T, returning a mutable reference to it.
///
/// Additionally, the allocation will be made with the given byte alignment or
/// the type's inherent alignment, whichever is greater.
pub fn alloc_with_alignment<T: Copy>(&self, value: T, align: usize) -> &mut T {
let memory = self.alloc_uninitialized_with_alignment(align);
unsafe {
*memory.as_mut_ptr() = value;
}
unsafe { transmute(memory) }
}
/// Allocates memory for a type `T`, returning a mutable reference to it.
///
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
pub fn alloc_uninitialized<T: Copy>(&self) -> &mut MaybeUninit<T> {
assert!(size_of::<T>() > 0);
let memory = self.alloc_raw(size_of::<T>(), align_of::<T>()) as *mut MaybeUninit<T>;
unsafe { memory.as_mut().unwrap() }
}
/// Allocates memory for a type `T`, returning a mutable reference to it.
///
/// Additionally, the allocation will be made with the given byte alignment or
/// the type's inherent alignment, whichever is greater.
///
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
pub fn alloc_uninitialized_with_alignment<T: Copy>(&self, align: usize) -> &mut MaybeUninit<T> {
assert!(size_of::<T>() > 0);
let memory =
self.alloc_raw(size_of::<T>(), max(align, align_of::<T>())) as *mut MaybeUninit<T>;
unsafe { memory.as_mut().unwrap() }
}
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
/// All elements are initialized to the given `value`.
pub fn alloc_array<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
let memory = self.alloc_array_uninitialized(len);
for v in memory.iter_mut() {
unsafe {
*v.as_mut_ptr() = value;
}
}
unsafe { transmute(memory) }
}
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
/// All elements are initialized to the given `value`.
///
/// Additionally, the allocation will be made with the given byte alignment or
/// the type's inherent alignment, whichever is greater.
pub fn alloc_array_with_alignment<T: Copy>(
&self,
len: usize,
value: T,
align: usize,
) -> &mut [T] {
let memory = self.alloc_array_uninitialized_with_alignment(len, align);
for v in memory.iter_mut() {
unsafe {
*v.as_mut_ptr() = value;
}
}
unsafe { transmute(memory) }
}
/// Allocates and initializes memory to duplicate the given slice, returning a mutable slice
/// to the new copy.
pub fn copy_slice<T: Copy>(&self, other: &[T]) -> &mut [T] {
let memory = self.alloc_array_uninitialized(other.len());
for (v, other) in memory.iter_mut().zip(other.iter()) {
unsafe {
*v.as_mut_ptr() = *other;
}
}
unsafe { transmute(memory) }
}
/// Allocates and initializes memory to duplicate the given slice, returning a mutable slice
/// to the new copy.
///
/// Additionally, the allocation will be made with the given byte alignment or
/// the type's inherent alignment, whichever is greater.
pub fn copy_slice_with_alignment<T: Copy>(&self, other: &[T], align: usize) -> &mut [T] {
let memory = self.alloc_array_uninitialized_with_alignment(other.len(), align);
for (v, other) in memory.iter_mut().zip(other.iter()) {
unsafe {
*v.as_mut_ptr() = *other;
}
}
unsafe { transmute(memory) }
}
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
///
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
pub fn alloc_array_uninitialized<T: Copy>(&self, len: usize) -> &mut [MaybeUninit<T>] {
assert!(size_of::<T>() > 0);
let array_mem_size = {
let alignment_padding = alignment_offset(size_of::<T>(), align_of::<T>());
let aligned_type_size = size_of::<T>() + alignment_padding;
aligned_type_size * len
};
let memory = self.alloc_raw(array_mem_size, align_of::<T>()) as *mut MaybeUninit<T>;
unsafe { slice::from_raw_parts_mut(memory, len) }
}
/// Allocates memory for `len` values of type `T`, returning a mutable slice to it.
///
/// Additionally, the allocation will be made with the given byte alignment or
/// the type's inherent alignment, whichever is greater.
///
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
pub fn alloc_array_uninitialized_with_alignment<T: Copy>(
&self,
len: usize,
align: usize,
) -> &mut [MaybeUninit<T>] {
assert!(size_of::<T>() > 0);
let array_mem_size = {
let alignment_padding = alignment_offset(size_of::<T>(), align_of::<T>());
let aligned_type_size = size_of::<T>() + alignment_padding;
aligned_type_size * len
};
let memory =
self.alloc_raw(array_mem_size, max(align, align_of::<T>())) as *mut MaybeUninit<T>;
unsafe { slice::from_raw_parts_mut(memory, len) }
}
/// Allocates space with a given size and alignment.
///
/// This is the work-horse code of the MemArena.
///
/// CAUTION: this returns uninitialized memory. Make sure to initialize the
/// memory after calling.
fn alloc_raw(&self, size: usize, alignment: usize) -> *mut MaybeUninit<u8> {
assert!(alignment > 0);
self.stat_space_allocated
.set(self.stat_space_allocated.get() + size); // Update stats
let mut blocks = self.blocks.borrow_mut();
// If it's a zero-size allocation, just point to the beginning of the current block.
if size == 0 {
return blocks.first_mut().unwrap().as_mut_ptr();
}
// If it's non-zero-size.
else {
let start_index = {
let block_addr = blocks.first().unwrap().as_ptr() as usize;
let block_filled = blocks.first().unwrap().len();
block_filled + alignment_offset(block_addr + block_filled, alignment)
};
// If it will fit in the current block, use the current block.
if (start_index + size) <= blocks.first().unwrap().capacity() {
unsafe {
blocks.first_mut().unwrap().set_len(start_index + size);
}
let block_ptr = blocks.first_mut().unwrap().as_mut_ptr();
return unsafe { block_ptr.add(start_index) };
}
// If it won't fit in the current block, create a new block and use that.
else {
let next_size = if blocks.len() >= GROWTH_FRACTION {
let a = self.stat_space_occupied.get() / GROWTH_FRACTION;
let b = a % self.min_block_size;
if b > 0 {
a - b + self.min_block_size
} else {
a
}
} else {
self.min_block_size
};
let waste_percentage = {
let w1 =
((blocks[0].capacity() - blocks[0].len()) * 100) / blocks[0].capacity();
let w2 = ((self.stat_space_occupied.get() - self.stat_space_allocated.get())
* 100)
/ self.stat_space_occupied.get();
if w1 < w2 {
w1
} else {
w2
}
};
// If it's a "large allocation", give it its own memory block.
if (size + alignment) > next_size || waste_percentage > self.max_waste_percentage {
// Update stats
self.stat_space_occupied
.set(self.stat_space_occupied.get() + size + alignment - 1);
blocks.push(Vec::with_capacity(size + alignment - 1));
unsafe {
blocks.last_mut().unwrap().set_len(size + alignment - 1);
}
let start_index =
alignment_offset(blocks.last().unwrap().as_ptr() as usize, alignment);
let block_ptr = blocks.last_mut().unwrap().as_mut_ptr();
return unsafe { block_ptr.add(start_index) };
}
// Otherwise create a new shared block.
else {
// Update stats
self.stat_space_occupied
.set(self.stat_space_occupied.get() + next_size);
blocks.push(Vec::with_capacity(next_size));
let block_count = blocks.len();
blocks.swap(0, block_count - 1);
let start_index =
alignment_offset(blocks.first().unwrap().as_ptr() as usize, alignment);
unsafe {
blocks.first_mut().unwrap().set_len(start_index + size);
}
let block_ptr = blocks.first_mut().unwrap().as_mut_ptr();
return unsafe { block_ptr.add(start_index) };
}
}
}
}
}