Reformatted sub-crates with new rustfmt as well.

This commit is contained in:
Nathan Vegdahl 2017-06-15 22:21:25 -07:00
parent f649bec585
commit b5f2237676
6 changed files with 53602 additions and 53543 deletions

View File

@ -82,8 +82,8 @@ fn main() {
fn write_conversion_functions(space_name: &str, to_xyz: [[f64; 3]; 3], f: &mut File) {
f.write_all(
format!(
r#"
format!(
r#"
#[inline]
pub fn {}_to_xyz(rgb: (f32, f32, f32)) -> (f32, f32, f32) {{
(
@ -93,25 +93,23 @@ pub fn {}_to_xyz(rgb: (f32, f32, f32)) -> (f32, f32, f32) {{
)
}}
"#,
space_name,
to_xyz[0][0],
to_xyz[0][1],
to_xyz[0][2],
to_xyz[1][0],
to_xyz[1][1],
to_xyz[1][2],
to_xyz[2][0],
to_xyz[2][1],
to_xyz[2][2]
)
.as_bytes()
)
.unwrap();
space_name,
to_xyz[0][0],
to_xyz[0][1],
to_xyz[0][2],
to_xyz[1][0],
to_xyz[1][1],
to_xyz[1][2],
to_xyz[2][0],
to_xyz[2][1],
to_xyz[2][2]
).as_bytes(),
).unwrap();
let inv = inverse(to_xyz);
f.write_all(
format!(
r#"
format!(
r#"
#[inline]
pub fn xyz_to_{}(xyz: (f32, f32, f32)) -> (f32, f32, f32) {{
(
@ -121,25 +119,23 @@ pub fn xyz_to_{}(xyz: (f32, f32, f32)) -> (f32, f32, f32) {{
)
}}
"#,
space_name,
inv[0][0],
inv[0][1],
inv[0][2],
inv[1][0],
inv[1][1],
inv[1][2],
inv[2][0],
inv[2][1],
inv[2][2]
)
.as_bytes()
)
.unwrap();
space_name,
inv[0][0],
inv[0][1],
inv[0][2],
inv[1][0],
inv[1][1],
inv[1][2],
inv[2][0],
inv[2][1],
inv[2][2]
).as_bytes(),
).unwrap();
let e_to_xyz = adapt_to_e(to_xyz, 1.0);
f.write_all(
format!(
r#"
format!(
r#"
#[inline]
pub fn {}_e_to_xyz(rgb: (f32, f32, f32)) -> (f32, f32, f32) {{
(
@ -149,25 +145,23 @@ pub fn {}_e_to_xyz(rgb: (f32, f32, f32)) -> (f32, f32, f32) {{
)
}}
"#,
space_name,
e_to_xyz[0][0],
e_to_xyz[0][1],
e_to_xyz[0][2],
e_to_xyz[1][0],
e_to_xyz[1][1],
e_to_xyz[1][2],
e_to_xyz[2][0],
e_to_xyz[2][1],
e_to_xyz[2][2]
)
.as_bytes()
)
.unwrap();
space_name,
e_to_xyz[0][0],
e_to_xyz[0][1],
e_to_xyz[0][2],
e_to_xyz[1][0],
e_to_xyz[1][1],
e_to_xyz[1][2],
e_to_xyz[2][0],
e_to_xyz[2][1],
e_to_xyz[2][2]
).as_bytes(),
).unwrap();
let inv_e = inverse(e_to_xyz);
f.write_all(
format!(
r#"
format!(
r#"
#[inline]
pub fn xyz_to_{}_e(xyz: (f32, f32, f32)) -> (f32, f32, f32) {{
(
@ -177,20 +171,18 @@ pub fn xyz_to_{}_e(xyz: (f32, f32, f32)) -> (f32, f32, f32) {{
)
}}
"#,
space_name,
inv_e[0][0],
inv_e[0][1],
inv_e[0][2],
inv_e[1][0],
inv_e[1][1],
inv_e[1][2],
inv_e[2][0],
inv_e[2][1],
inv_e[2][2]
)
.as_bytes()
)
.unwrap();
space_name,
inv_e[0][0],
inv_e[0][1],
inv_e[0][2],
inv_e[1][0],
inv_e[1][1],
inv_e[1][2],
inv_e[2][0],
inv_e[2][1],
inv_e[2][2]
).as_bytes(),
).unwrap();
}
@ -208,13 +200,20 @@ fn rgb_to_xyz(chroma: Chromaticities, y: f64) -> [[f64; 3]; 3] {
let z = (1.0 - chroma.w.0 - chroma.w.1) * y / chroma.w.1;
// Scale factors for matrix rows
let d = chroma.r.0 * (chroma.b.1 - chroma.g.1) + chroma.b.0 * (chroma.g.1 - chroma.r.1) + chroma.g.0 * (chroma.r.1 - chroma.b.1);
let d = chroma.r.0 * (chroma.b.1 - chroma.g.1) + chroma.b.0 * (chroma.g.1 - chroma.r.1) +
chroma.g.0 * (chroma.r.1 - chroma.b.1);
let sr = (x * (chroma.b.1 - chroma.g.1) - chroma.g.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z)) + chroma.b.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z))) / d;
let sr = (x * (chroma.b.1 - chroma.g.1) -
chroma.g.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z)) +
chroma.b.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z))) / d;
let sg = (x * (chroma.r.1 - chroma.b.1) + chroma.r.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z)) - chroma.b.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
let sg = (x * (chroma.r.1 - chroma.b.1) +
chroma.r.0 * (y * (chroma.b.1 - 1.0) + chroma.b.1 * (x + z)) -
chroma.b.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
let sb = (x * (chroma.g.1 - chroma.r.1) - chroma.r.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z)) + chroma.g.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
let sb = (x * (chroma.g.1 - chroma.r.1) -
chroma.r.0 * (y * (chroma.g.1 - 1.0) + chroma.g.1 * (x + z)) +
chroma.g.0 * (y * (chroma.r.1 - 1.0) + chroma.r.1 * (x + z))) / d;
// Assemble the matrix
let mut mat = [[0.0; 3]; 3];

View File

@ -160,10 +160,12 @@ impl Float4 {
#[inline]
pub fn lt(&self, other: Float4) -> Bool4 {
Bool4 {
data: [self.data[0] < other.data[0],
self.data[1] < other.data[1],
self.data[2] < other.data[2],
self.data[3] < other.data[3]],
data: [
self.data[0] < other.data[0],
self.data[1] < other.data[1],
self.data[2] < other.data[2],
self.data[3] < other.data[3],
],
}
}
@ -176,10 +178,12 @@ impl Float4 {
#[inline]
pub fn lte(&self, other: Float4) -> Bool4 {
Bool4 {
data: [self.data[0] <= other.data[0],
self.data[1] <= other.data[1],
self.data[2] <= other.data[2],
self.data[3] <= other.data[3]],
data: [
self.data[0] <= other.data[0],
self.data[1] <= other.data[1],
self.data[2] <= other.data[2],
self.data[3] <= other.data[3],
],
}
}
@ -192,10 +196,12 @@ impl Float4 {
#[inline]
pub fn gt(&self, other: Float4) -> Bool4 {
Bool4 {
data: [self.data[0] > other.data[0],
self.data[1] > other.data[1],
self.data[2] > other.data[2],
self.data[3] > other.data[3]],
data: [
self.data[0] > other.data[0],
self.data[1] > other.data[1],
self.data[2] > other.data[2],
self.data[3] > other.data[3],
],
}
}
@ -208,10 +214,12 @@ impl Float4 {
#[inline]
pub fn gte(&self, other: Float4) -> Bool4 {
Bool4 {
data: [self.data[0] >= other.data[0],
self.data[1] >= other.data[1],
self.data[2] >= other.data[2],
self.data[3] >= other.data[3]],
data: [
self.data[0] >= other.data[0],
self.data[1] >= other.data[1],
self.data[2] >= other.data[2],
self.data[3] >= other.data[3],
],
}
}
@ -348,7 +356,8 @@ impl Float4 {
impl PartialEq for Float4 {
#[inline]
fn eq(&self, other: &Float4) -> bool {
self.get_0() == other.get_0() && self.get_1() == other.get_1() && self.get_2() == other.get_2() && self.get_3() == other.get_3()
self.get_0() == other.get_0() && self.get_1() == other.get_1() &&
self.get_2() == other.get_2() && self.get_3() == other.get_3()
}
}
@ -365,10 +374,12 @@ impl Add for Float4 {
#[inline(always)]
fn add(self, other: Float4) -> Float4 {
Float4 {
data: [self.get_0() + other.get_0(),
self.get_1() + other.get_1(),
self.get_2() + other.get_2(),
self.get_3() + other.get_3()],
data: [
self.get_0() + other.get_0(),
self.get_1() + other.get_1(),
self.get_2() + other.get_2(),
self.get_3() + other.get_3(),
],
}
}
}
@ -394,10 +405,12 @@ impl Sub for Float4 {
#[inline(always)]
fn sub(self, other: Float4) -> Float4 {
Float4 {
data: [self.get_0() - other.get_0(),
self.get_1() - other.get_1(),
self.get_2() - other.get_2(),
self.get_3() - other.get_3()],
data: [
self.get_0() - other.get_0(),
self.get_1() - other.get_1(),
self.get_2() - other.get_2(),
self.get_3() - other.get_3(),
],
}
}
}
@ -423,10 +436,12 @@ impl Mul for Float4 {
#[inline(always)]
fn mul(self, other: Float4) -> Float4 {
Float4 {
data: [self.get_0() * other.get_0(),
self.get_1() * other.get_1(),
self.get_2() * other.get_2(),
self.get_3() * other.get_3()],
data: [
self.get_0() * other.get_0(),
self.get_1() * other.get_1(),
self.get_2() * other.get_2(),
self.get_3() * other.get_3(),
],
}
}
}
@ -443,10 +458,12 @@ impl Mul<f32> for Float4 {
#[inline(always)]
fn mul(self, other: f32) -> Float4 {
Float4 {
data: [self.get_0() * other,
self.get_1() * other,
self.get_2() * other,
self.get_3() * other],
data: [
self.get_0() * other,
self.get_1() * other,
self.get_2() * other,
self.get_3() * other,
],
}
}
}
@ -479,10 +496,12 @@ impl Div for Float4 {
#[inline(always)]
fn div(self, other: Float4) -> Float4 {
Float4 {
data: [self.get_0() / other.get_0(),
self.get_1() / other.get_1(),
self.get_2() / other.get_2(),
self.get_3() / other.get_3()],
data: [
self.get_0() / other.get_0(),
self.get_1() / other.get_1(),
self.get_2() / other.get_2(),
self.get_3() / other.get_3(),
],
}
}
}
@ -499,10 +518,12 @@ impl Div<f32> for Float4 {
#[inline(always)]
fn div(self, other: f32) -> Float4 {
Float4 {
data: [self.get_0() / other,
self.get_1() / other,
self.get_2() / other,
self.get_3() / other],
data: [
self.get_0() / other,
self.get_1() / other,
self.get_2() / other,
self.get_3() / other,
],
}
}
}
@ -598,7 +619,8 @@ impl Bool4 {
#[inline]
pub fn to_bitmask(&self) -> u8 {
(self.get_0() as u8) | ((self.get_1() as u8) << 1) | ((self.get_2() as u8) << 2) | ((self.get_3() as u8) << 3)
(self.get_0() as u8) | ((self.get_1() as u8) << 1) | ((self.get_2() as u8) << 2) |
((self.get_3() as u8) << 3)
}
}
@ -614,10 +636,12 @@ impl BitAnd for Bool4 {
#[inline]
fn bitand(self, rhs: Bool4) -> Bool4 {
Bool4 {
data: [self.data[0] && rhs.data[0],
self.data[1] && rhs.data[1],
self.data[2] && rhs.data[2],
self.data[3] && rhs.data[3]],
data: [
self.data[0] && rhs.data[0],
self.data[1] && rhs.data[1],
self.data[2] && rhs.data[2],
self.data[3] && rhs.data[3],
],
}
}
}

View File

@ -64,8 +64,8 @@ fn main() {
// Write the beginning bits of the file
f.write_all(
format!(
r#"
format!(
r#"
// Copyright (c) 2012 Leonhard Gruenschloss (leonhard@gruenschloss.org)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
@ -92,54 +92,46 @@ fn main() {
pub const MAX_DIMENSION: u32 = {};
"#,
NUM_DIMENSIONS
)
.as_bytes()
)
.unwrap();
NUM_DIMENSIONS
).as_bytes(),
).unwrap();
// Write the sampling function
f.write_all(
format!(
r#"
format!(
r#"
#[inline]
pub fn sample(dimension: u32, index: u32) -> f32 {{
match dimension {{"#
)
.as_bytes()
)
.unwrap();
).as_bytes(),
).unwrap();
for i in 0..NUM_DIMENSIONS {
f.write_all(
format!(
r#"
format!(
r#"
{} => halton{}(index),"#,
i,
primes[i]
)
.as_bytes()
)
.unwrap();
i,
primes[i]
).as_bytes(),
).unwrap();
}
f.write_all(
format!(
r#"
format!(
r#"
_ => panic!("Exceeded max dimensions."),
}}
}}
"#
)
.as_bytes()
)
.unwrap();
).as_bytes(),
).unwrap();
// Write the special-cased first dimension
f.write_all(
format!(
r#"
format!(
r#"
// Special case: radical inverse in base 2, with direct bit reversal.
fn halton2(mut index: u32) -> f32 {{
index = (index << 16) | (index >> 16);
@ -150,10 +142,8 @@ fn halton2(mut index: u32) -> f32 {{
return (index as f32) * (1.0 / ((1u64 << 32) as f32));
}}
"#
)
.as_bytes()
)
.unwrap();
).as_bytes(),
).unwrap();
for i in 1..NUM_DIMENSIONS {
// Skip base 2.
@ -191,30 +181,26 @@ fn halton2(mut index: u32) -> f32 {{
let mut power = max_power / pow_base;
f.write_all(
format!(
r#"
format!(
r#"
fn halton{}(index: u32) -> f32 {{
static PERM{}: [u16; {}] = [{}];"#,
base,
base,
perm.len(),
perm_string
)
.as_bytes()
)
.unwrap();;
base,
base,
perm.len(),
perm_string
).as_bytes(),
).unwrap();;
f.write_all(
format!(
r#"
format!(
r#"
return (unsafe{{*PERM{}.get_unchecked((index % {}) as usize)}} as u32 * {} +"#,
base,
pow_base,
power
)
.as_bytes()
)
.unwrap();;
base,
pow_base,
power
).as_bytes(),
).unwrap();;
// Advance to next set of digits.
let mut div = 1;
@ -222,34 +208,30 @@ fn halton{}(index: u32) -> f32 {{
div *= pow_base;
power /= pow_base;
f.write_all(
format!(
r#"
format!(
r#"
unsafe{{*PERM{}.get_unchecked(((index / {}) % {}) as usize)}} as u32 * {} +"#,
base,
div,
pow_base,
power
)
.as_bytes()
)
.unwrap();;
base,
div,
pow_base,
power
).as_bytes(),
).unwrap();;
}
f.write_all(
format!(
r#"
format!(
r#"
unsafe{{*PERM{}.get_unchecked(((index / {}) % {}) as usize)}} as u32) as f32 *
(0.999999940395355224609375f32 / ({}u32 as f32)); // Results in [0,1).
}}
"#,
base,
div * pow_base,
pow_base,
max_power
)
.as_bytes()
)
.unwrap();;
base,
div * pow_base,
pow_base,
max_power
).as_bytes(),
).unwrap();;
}
}
@ -275,30 +257,26 @@ fn get_faure_permutation(faure: &Vec<Vec<usize>>, b: usize) -> Vec<usize> {
let c = (b - 1) / 2;
return (0..b)
.map(
|i| {
.map(|i| {
if i == c {
return c;
}
let f: usize = faure[b - 1][i - ((i > c) as usize)];
f + ((f >= c) as usize)
}
)
.collect();
})
.collect();
} else {
// even
let c = b / 2;
return (0..b)
.map(
|i| if i < c {
.map(|i| if i < c {
2 * faure[c][i]
} else {
2 * faure[c][i - c] + 1
}
)
.collect();
})
.collect();
}
}

View File

@ -20,10 +20,12 @@ impl Matrix4x4 {
#[inline]
pub fn new() -> Matrix4x4 {
Matrix4x4 {
values: [Float4::new(1.0, 0.0, 0.0, 0.0),
Float4::new(0.0, 1.0, 0.0, 0.0),
Float4::new(0.0, 0.0, 1.0, 0.0),
Float4::new(0.0, 0.0, 0.0, 1.0)],
values: [
Float4::new(1.0, 0.0, 0.0, 0.0),
Float4::new(0.0, 1.0, 0.0, 0.0),
Float4::new(0.0, 0.0, 1.0, 0.0),
Float4::new(0.0, 0.0, 0.0, 1.0),
],
}
}
@ -33,22 +35,43 @@ impl Matrix4x4 {
/// i j k l
/// m n o p
#[inline]
pub fn new_from_values(a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: f32, i: f32, j: f32, k: f32, l: f32, m: f32, n: f32, o: f32, p: f32) -> Matrix4x4 {
pub fn new_from_values(
a: f32,
b: f32,
c: f32,
d: f32,
e: f32,
f: f32,
g: f32,
h: f32,
i: f32,
j: f32,
k: f32,
l: f32,
m: f32,
n: f32,
o: f32,
p: f32,
) -> Matrix4x4 {
Matrix4x4 {
values: [Float4::new(a, b, c, d),
Float4::new(e, f, g, h),
Float4::new(i, j, k, l),
Float4::new(m, n, o, p)],
values: [
Float4::new(a, b, c, d),
Float4::new(e, f, g, h),
Float4::new(i, j, k, l),
Float4::new(m, n, o, p),
],
}
}
#[inline]
pub fn from_location(loc: Point) -> Matrix4x4 {
Matrix4x4 {
values: [Float4::new(1.0, 0.0, 0.0, loc.x()),
Float4::new(0.0, 1.0, 0.0, loc.y()),
Float4::new(0.0, 0.0, 1.0, loc.z()),
Float4::new(0.0, 0.0, 0.0, 1.0)],
values: [
Float4::new(1.0, 0.0, 0.0, loc.x()),
Float4::new(0.0, 1.0, 0.0, loc.y()),
Float4::new(0.0, 0.0, 1.0, loc.z()),
Float4::new(0.0, 0.0, 0.0, 1.0),
],
}
}
@ -90,30 +113,32 @@ impl Matrix4x4 {
pub fn transposed(&self) -> Matrix4x4 {
Matrix4x4 {
values: {
[Float4::new(
self[0].get_0(),
self[1].get_0(),
self[2].get_0(),
self[3].get_0(),
),
Float4::new(
self[0].get_1(),
self[1].get_1(),
self[2].get_1(),
self[3].get_1(),
),
Float4::new(
self[0].get_2(),
self[1].get_2(),
self[2].get_2(),
self[3].get_2(),
),
Float4::new(
self[0].get_3(),
self[1].get_3(),
self[2].get_3(),
self[3].get_3(),
)]
[
Float4::new(
self[0].get_0(),
self[1].get_0(),
self[2].get_0(),
self[3].get_0(),
),
Float4::new(
self[0].get_1(),
self[1].get_1(),
self[2].get_1(),
self[3].get_1(),
),
Float4::new(
self[0].get_2(),
self[1].get_2(),
self[2].get_2(),
self[3].get_2(),
),
Float4::new(
self[0].get_3(),
self[1].get_3(),
self[2].get_3(),
self[3].get_3(),
),
]
},
}
}
@ -142,33 +167,35 @@ impl Matrix4x4 {
Matrix4x4 {
values: {
[Float4::new(
((self[1].get_1() * c5) - (self[1].get_2() * c4) + (self[1].get_3() * c3)) * invdet,
((-self[0].get_1() * c5) + (self[0].get_2() * c4) - (self[0].get_3() * c3)) * invdet,
((self[3].get_1() * s5) - (self[3].get_2() * s4) + (self[3].get_3() * s3)) * invdet,
((-self[2].get_1() * s5) + (self[2].get_2() * s4) - (self[2].get_3() * s3)) * invdet,
),
[
Float4::new(
((self[1].get_1() * c5) - (self[1].get_2() * c4) + (self[1].get_3() * c3)) * invdet,
((-self[0].get_1() * c5) + (self[0].get_2() * c4) - (self[0].get_3() * c3)) * invdet,
((self[3].get_1() * s5) - (self[3].get_2() * s4) + (self[3].get_3() * s3)) * invdet,
((-self[2].get_1() * s5) + (self[2].get_2() * s4) - (self[2].get_3() * s3)) * invdet,
),
Float4::new(
((-self[1].get_0() * c5) + (self[1].get_2() * c2) - (self[1].get_3() * c1)) * invdet,
((self[0].get_0() * c5) - (self[0].get_2() * c2) + (self[0].get_3() * c1)) * invdet,
((-self[3].get_0() * s5) + (self[3].get_2() * s2) - (self[3].get_3() * s1)) * invdet,
((self[2].get_0() * s5) - (self[2].get_2() * s2) + (self[2].get_3() * s1)) * invdet,
),
Float4::new(
((-self[1].get_0() * c5) + (self[1].get_2() * c2) - (self[1].get_3() * c1)) * invdet,
((self[0].get_0() * c5) - (self[0].get_2() * c2) + (self[0].get_3() * c1)) * invdet,
((-self[3].get_0() * s5) + (self[3].get_2() * s2) - (self[3].get_3() * s1)) * invdet,
((self[2].get_0() * s5) - (self[2].get_2() * s2) + (self[2].get_3() * s1)) * invdet,
),
Float4::new(
((self[1].get_0() * c4) - (self[1].get_1() * c2) + (self[1].get_3() * c0)) * invdet,
((-self[0].get_0() * c4) + (self[0].get_1() * c2) - (self[0].get_3() * c0)) * invdet,
((self[3].get_0() * s4) - (self[3].get_1() * s2) + (self[3].get_3() * s0)) * invdet,
((-self[2].get_0() * s4) + (self[2].get_1() * s2) - (self[2].get_3() * s0)) * invdet,
),
Float4::new(
((self[1].get_0() * c4) - (self[1].get_1() * c2) + (self[1].get_3() * c0)) * invdet,
((-self[0].get_0() * c4) + (self[0].get_1() * c2) - (self[0].get_3() * c0)) * invdet,
((self[3].get_0() * s4) - (self[3].get_1() * s2) + (self[3].get_3() * s0)) * invdet,
((-self[2].get_0() * s4) + (self[2].get_1() * s2) - (self[2].get_3() * s0)) * invdet,
),
Float4::new(
((-self[1].get_0() * c3) + (self[1].get_1() * c1) - (self[1].get_2() * c0)) * invdet,
((self[0].get_0() * c3) - (self[0].get_1() * c1) + (self[0].get_2() * c0)) * invdet,
((-self[3].get_0() * s3) + (self[3].get_1() * s1) - (self[3].get_2() * s0)) * invdet,
((self[2].get_0() * s3) - (self[2].get_1() * s1) + (self[2].get_2() * s0)) * invdet,
)]
Float4::new(
((-self[1].get_0() * c3) + (self[1].get_1() * c1) - (self[1].get_2() * c0)) * invdet,
((self[0].get_0() * c3) - (self[0].get_1() * c1) + (self[0].get_2() * c0)) * invdet,
((-self[3].get_0() * s3) + (self[3].get_1() * s1) - (self[3].get_2() * s0)) * invdet,
((self[2].get_0() * s3) - (self[2].get_1() * s1) + (self[2].get_2() * s0)) * invdet,
),
]
},
}
}
@ -217,33 +244,35 @@ impl Mul<Matrix4x4> for Matrix4x4 {
fn mul(self, other: Matrix4x4) -> Matrix4x4 {
let m = self.transposed();
Matrix4x4 {
values: [Float4::new(
(m[0] * other[0]).h_sum(),
(m[1] * other[0]).h_sum(),
(m[2] * other[0]).h_sum(),
(m[3] * other[0]).h_sum(),
),
values: [
Float4::new(
(m[0] * other[0]).h_sum(),
(m[1] * other[0]).h_sum(),
(m[2] * other[0]).h_sum(),
(m[3] * other[0]).h_sum(),
),
Float4::new(
(m[0] * other[1]).h_sum(),
(m[1] * other[1]).h_sum(),
(m[2] * other[1]).h_sum(),
(m[3] * other[1]).h_sum(),
),
Float4::new(
(m[0] * other[1]).h_sum(),
(m[1] * other[1]).h_sum(),
(m[2] * other[1]).h_sum(),
(m[3] * other[1]).h_sum(),
),
Float4::new(
(m[0] * other[2]).h_sum(),
(m[1] * other[2]).h_sum(),
(m[2] * other[2]).h_sum(),
(m[3] * other[2]).h_sum(),
),
Float4::new(
(m[0] * other[2]).h_sum(),
(m[1] * other[2]).h_sum(),
(m[2] * other[2]).h_sum(),
(m[3] * other[2]).h_sum(),
),
Float4::new(
(m[0] * other[3]).h_sum(),
(m[1] * other[3]).h_sum(),
(m[2] * other[3]).h_sum(),
(m[3] * other[3]).h_sum(),
)],
Float4::new(
(m[0] * other[3]).h_sum(),
(m[1] * other[3]).h_sum(),
(m[2] * other[3]).h_sum(),
(m[3] * other[3]).h_sum(),
),
],
}
}
}

View File

@ -137,7 +137,10 @@ impl MemArena {
/// the type's inherent alignment, whichever is greater.
///
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
pub unsafe fn alloc_uninitialized_with_alignment<'a, T: Copy>(&'a self, align: usize) -> &'a mut T {
pub unsafe fn alloc_uninitialized_with_alignment<'a, T: Copy>(
&'a self,
align: usize,
) -> &'a mut T {
assert!(size_of::<T>() > 0);
let memory = self.alloc_raw(size_of::<T>(), max(align, align_of::<T>())) as *mut T;
@ -162,7 +165,12 @@ impl MemArena {
///
/// Additionally, the allocation will be made with the given byte alignment or
/// the type's inherent alignment, whichever is greater.
pub fn alloc_array_with_alignment<'a, T: Copy>(&'a self, len: usize, value: T, align: usize) -> &'a mut [T] {
pub fn alloc_array_with_alignment<'a, T: Copy>(
&'a self,
len: usize,
value: T,
align: usize,
) -> &'a mut [T] {
let memory = unsafe { self.alloc_array_uninitialized_with_alignment(len, align) };
for v in memory.iter_mut() {
@ -189,7 +197,11 @@ impl MemArena {
///
/// Additionally, the allocation will be made with the given byte alignment or
/// the type's inherent alignment, whichever is greater.
pub fn copy_slice_with_alignment<'a, T: Copy>(&'a self, other: &[T], align: usize) -> &'a mut [T] {
pub fn copy_slice_with_alignment<'a, T: Copy>(
&'a self,
other: &[T],
align: usize,
) -> &'a mut [T] {
let memory = unsafe { self.alloc_array_uninitialized_with_alignment(other.len(), align) };
for (v, other) in memory.iter_mut().zip(other.iter()) {
@ -222,7 +234,11 @@ impl MemArena {
/// the type's inherent alignment, whichever is greater.
///
/// CAUTION: the memory returned is uninitialized. Make sure to initalize before using!
pub unsafe fn alloc_array_uninitialized_with_alignment<'a, T: Copy>(&'a self, len: usize, align: usize) -> &'a mut [T] {
pub unsafe fn alloc_array_uninitialized_with_alignment<'a, T: Copy>(
&'a self,
len: usize,
align: usize,
) -> &'a mut [T] {
assert!(size_of::<T>() > 0);
let array_mem_size = {
@ -245,8 +261,10 @@ impl MemArena {
unsafe fn alloc_raw(&self, size: usize, alignment: usize) -> *mut u8 {
assert!(alignment > 0);
self.stat_space_allocated
.set(self.stat_space_allocated.get() + size); // Update stats
self.stat_space_allocated.set(
self.stat_space_allocated.get() +
size,
); // Update stats
let mut blocks = self.blocks.borrow_mut();
@ -284,21 +302,27 @@ impl MemArena {
};
let waste_percentage = {
let w1 = ((blocks[0].capacity() - blocks[0].len()) * 100) / blocks[0].capacity();
let w2 = ((self.stat_space_occupied.get() - self.stat_space_allocated.get()) * 100) / self.stat_space_occupied.get();
let w1 = ((blocks[0].capacity() - blocks[0].len()) * 100) /
blocks[0].capacity();
let w2 = ((self.stat_space_occupied.get() - self.stat_space_allocated.get()) *
100) /
self.stat_space_occupied.get();
if w1 < w2 { w1 } else { w2 }
};
// If it's a "large allocation", give it its own memory block.
if (size + alignment) > next_size || waste_percentage > self.max_waste_percentage {
// Update stats
self.stat_space_occupied
.set(self.stat_space_occupied.get() + size + alignment - 1);
self.stat_space_occupied.set(
self.stat_space_occupied.get() + size + alignment -
1,
);
blocks.push(Vec::with_capacity(size + alignment - 1));
blocks.last_mut().unwrap().set_len(size + alignment - 1);
let start_index = alignment_offset(blocks.last().unwrap().as_ptr() as usize, alignment);
let start_index =
alignment_offset(blocks.last().unwrap().as_ptr() as usize, alignment);
let block_ptr = blocks.last_mut().unwrap().as_mut_ptr();
return block_ptr.offset(start_index as isize);
@ -306,14 +330,17 @@ impl MemArena {
// Otherwise create a new shared block.
else {
// Update stats
self.stat_space_occupied
.set(self.stat_space_occupied.get() + next_size);
self.stat_space_occupied.set(
self.stat_space_occupied.get() +
next_size,
);
blocks.push(Vec::with_capacity(next_size));
let block_count = blocks.len();
blocks.swap(0, block_count - 1);
let start_index = alignment_offset(blocks.first().unwrap().as_ptr() as usize, alignment);
let start_index =
alignment_offset(blocks.first().unwrap().as_ptr() as usize, alignment);
blocks.first_mut().unwrap().set_len(start_index + size);

File diff suppressed because it is too large Load Diff