🔧 Tests and benchmarks #16
30
Cargo.lock
generated
30
Cargo.lock
generated
@ -299,6 +299,7 @@ dependencies = [
|
|||||||
"criterion",
|
"criterion",
|
||||||
"getset",
|
"getset",
|
||||||
"itertools 0.12.0",
|
"itertools 0.12.0",
|
||||||
|
"ndarray",
|
||||||
"num",
|
"num",
|
||||||
"rand",
|
"rand",
|
||||||
"serde",
|
"serde",
|
||||||
@ -307,12 +308,35 @@ dependencies = [
|
|||||||
"thiserror",
|
"thiserror",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "matrixmultiply"
|
||||||
|
version = "0.3.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7574c1cf36da4798ab73da5b215bbf444f50718207754cb522201d78d1cd0ff2"
|
||||||
|
dependencies = [
|
||||||
|
"autocfg",
|
||||||
|
"rawpointer",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "memchr"
|
name = "memchr"
|
||||||
version = "2.7.1"
|
version = "2.7.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
|
checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ndarray"
|
||||||
|
version = "0.15.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "adb12d4e967ec485a5f71c6311fe28158e9d6f4bc4a447b474184d0f91a8fa32"
|
||||||
|
dependencies = [
|
||||||
|
"matrixmultiply",
|
||||||
|
"num-complex",
|
||||||
|
"num-integer",
|
||||||
|
"num-traits",
|
||||||
|
"rawpointer",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "num"
|
name = "num"
|
||||||
version = "0.4.1"
|
version = "0.4.1"
|
||||||
@ -507,6 +531,12 @@ dependencies = [
|
|||||||
"getrandom",
|
"getrandom",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rawpointer"
|
||||||
|
version = "0.2.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rayon"
|
name = "rayon"
|
||||||
version = "1.8.0"
|
version = "1.8.0"
|
||||||
|
14
Cargo.toml
14
Cargo.toml
@ -6,10 +6,7 @@ license = "MIT/Apache-2.0"
|
|||||||
authors = ["Julius Koskela <julius.koskela@nordic-dev.net>"]
|
authors = ["Julius Koskela <julius.koskela@nordic-dev.net>"]
|
||||||
|
|
||||||
description = """
|
description = """
|
||||||
GDSL is a graph data-structure library including graph containers,
|
Manifold is a Tensor library for Rust.
|
||||||
connected node strutures and efficient algorithms on those structures.
|
|
||||||
Nodes are independent of a graph container and can be used as connected
|
|
||||||
smart pointers.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
repository = "https://nordic-dev.net/julius/manifold"
|
repository = "https://nordic-dev.net/julius/manifold"
|
||||||
@ -23,10 +20,15 @@ getset = "0.1.2"
|
|||||||
itertools = "0.12.0"
|
itertools = "0.12.0"
|
||||||
num = "0.4.1"
|
num = "0.4.1"
|
||||||
serde = { version = "1.0.193", features = ["derive"] }
|
serde = { version = "1.0.193", features = ["derive"] }
|
||||||
serde_json = "1.0.108"
|
|
||||||
static_assertions = "1.1.0"
|
|
||||||
thiserror = "1.0.52"
|
thiserror = "1.0.52"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
criterion = "0.5.1"
|
criterion = "0.5.1"
|
||||||
|
serde_json = "1.0.108"
|
||||||
|
static_assertions = "1.1.0"
|
||||||
|
ndarray = "0.15.6"
|
||||||
|
|
||||||
|
[[bench]]
|
||||||
|
name = "manifold_benchmark"
|
||||||
|
harness = false
|
||||||
|
66
benches/manifold_benchmark.rs
Normal file
66
benches/manifold_benchmark.rs
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
use criterion::Throughput;
|
||||||
|
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||||
|
use manifold::*;
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
|
fn random_tensor_r2_manifold() -> Tensor<f64, 2> {
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
let mut tensor = tensor!([[0.0; 1000]; 1000]);
|
||||||
|
for i in 0..tensor.len() {
|
||||||
|
tensor[i] = rng.gen();
|
||||||
|
}
|
||||||
|
tensor
|
||||||
|
}
|
||||||
|
|
||||||
|
fn random_tensor_r2_ndarray() -> ndarray::Array2<f64> {
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
let (rows, cols) = (1000, 1000);
|
||||||
|
let mut tensor = ndarray::Array2::<f64>::zeros((rows, cols));
|
||||||
|
for i in 0..rows {
|
||||||
|
for j in 0..cols {
|
||||||
|
tensor[[i, j]] = rng.gen();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tensor
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tensor_product(c: &mut Criterion) {
|
||||||
|
let b = 1000;
|
||||||
|
|
||||||
|
let mut group = c.benchmark_group("element-wise addition");
|
||||||
|
|
||||||
|
for (i, size) in [b].iter().enumerate() {
|
||||||
|
group.throughput(Throughput::Elements(*size as u64));
|
||||||
|
|
||||||
|
group.bench_with_input(
|
||||||
|
BenchmarkId::new("manifold", size),
|
||||||
|
&i,
|
||||||
|
|b, _| {
|
||||||
|
b.iter(|| {
|
||||||
|
let a = random_tensor_r2_manifold();
|
||||||
|
let b = random_tensor_r2_manifold();
|
||||||
|
let c = a + b;
|
||||||
|
assert!(c.shape().as_array() == &[1000, 1000]);
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
group.bench_with_input(
|
||||||
|
BenchmarkId::new("ndarray", size),
|
||||||
|
&i,
|
||||||
|
|b, _| {
|
||||||
|
b.iter(|| {
|
||||||
|
let a = random_tensor_r2_ndarray();
|
||||||
|
let b = random_tensor_r2_ndarray();
|
||||||
|
let c = a + b;
|
||||||
|
assert!(c.shape() == &[1000, 1000]);
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
group.finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
criterion_group!(benches, tensor_product);
|
||||||
|
|
||||||
|
criterion_main!(benches);
|
29
src/index.rs
29
src/index.rs
@ -1,8 +1,8 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use getset::{Getters, MutGetters};
|
use getset::{Getters, MutGetters};
|
||||||
use std::{
|
use std::{
|
||||||
ops::{Index, IndexMut, Add, Sub},
|
cmp::Ordering,
|
||||||
cmp::Ordering,
|
ops::{Add, Index, IndexMut, Sub},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Getters, MutGetters)]
|
#[derive(Clone, Copy, Debug, Getters, MutGetters)]
|
||||||
@ -16,7 +16,6 @@ pub struct TensorIndex<const R: usize> {
|
|||||||
// ---- Construction and Initialization ---------------------------------------
|
// ---- Construction and Initialization ---------------------------------------
|
||||||
|
|
||||||
impl<const R: usize> TensorIndex<R> {
|
impl<const R: usize> TensorIndex<R> {
|
||||||
|
|
||||||
pub fn new(shape: TensorShape<R>, indices: [usize; R]) -> Self {
|
pub fn new(shape: TensorShape<R>, indices: [usize; R]) -> Self {
|
||||||
if !shape.check_indices(indices) {
|
if !shape.check_indices(indices) {
|
||||||
panic!("indices out of bounds");
|
panic!("indices out of bounds");
|
||||||
@ -65,10 +64,9 @@ impl<const R: usize> TensorIndex<R> {
|
|||||||
if self.indices()[0] >= self.shape().get(0) {
|
if self.indices()[0] >= self.shape().get(0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
let shape = self.shape().as_array().clone();
|
||||||
let mut carry = 1;
|
let mut carry = 1;
|
||||||
for (i, &dim_size) in
|
for (i, &dim_size) in self.indices.iter_mut().zip(&shape).rev() {
|
||||||
self.indices.iter_mut().zip(&self.shape.as_array()).rev()
|
|
||||||
{
|
|
||||||
if carry == 1 {
|
if carry == 1 {
|
||||||
*i += 1;
|
*i += 1;
|
||||||
if *i >= dim_size {
|
if *i >= dim_size {
|
||||||
@ -158,9 +156,8 @@ impl<const R: usize> TensorIndex<R> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut borrow = true;
|
let mut borrow = true;
|
||||||
for (i, &dim_size) in
|
let shape = self.shape().as_array().clone();
|
||||||
self.indices.iter_mut().zip(&self.shape.as_array()).rev()
|
for (i, &dim_size) in self.indices_mut().iter_mut().zip(&shape).rev() {
|
||||||
{
|
|
||||||
if borrow {
|
if borrow {
|
||||||
if *i == 0 {
|
if *i == 0 {
|
||||||
*i = dim_size - 1; // Wrap around to the maximum index of
|
*i = dim_size - 1; // Wrap around to the maximum index of
|
||||||
@ -271,7 +268,7 @@ impl<const R: usize> TensorIndex<R> {
|
|||||||
pub fn flat(&self) -> usize {
|
pub fn flat(&self) -> usize {
|
||||||
self.indices()
|
self.indices()
|
||||||
.iter()
|
.iter()
|
||||||
.zip(&self.shape().as_array())
|
.zip(&self.shape().as_array().clone())
|
||||||
.rev()
|
.rev()
|
||||||
.fold((0, 1), |(flat_index, product), (&idx, &dim_size)| {
|
.fold((0, 1), |(flat_index, product), (&idx, &dim_size)| {
|
||||||
(flat_index + idx * product, product * dim_size)
|
(flat_index + idx * product, product * dim_size)
|
||||||
@ -344,18 +341,14 @@ impl<const R: usize> IndexMut<usize> for TensorIndex<R> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<const R: usize> From<(TensorShape<R>, [usize; R])>
|
impl<const R: usize> From<(TensorShape<R>, [usize; R])> for TensorIndex<R> {
|
||||||
for TensorIndex<R>
|
|
||||||
{
|
|
||||||
fn from((shape, indices): (TensorShape<R>, [usize; R])) -> Self {
|
fn from((shape, indices): (TensorShape<R>, [usize; R])) -> Self {
|
||||||
assert!(shape.check_indices(indices));
|
assert!(shape.check_indices(indices));
|
||||||
Self::new(shape, indices)
|
Self::new(shape, indices)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<const R: usize> From<(TensorShape<R>, usize)>
|
impl<const R: usize> From<(TensorShape<R>, usize)> for TensorIndex<R> {
|
||||||
for TensorIndex<R>
|
|
||||||
{
|
|
||||||
fn from((shape, flat_index): (TensorShape<R>, usize)) -> Self {
|
fn from((shape, flat_index): (TensorShape<R>, usize)) -> Self {
|
||||||
let indices = shape.index_from_flat(flat_index).indices;
|
let indices = shape.index_from_flat(flat_index).indices;
|
||||||
Self::new(shape, indices)
|
Self::new(shape, indices)
|
||||||
@ -368,9 +361,7 @@ impl<const R: usize> From<TensorShape<R>> for TensorIndex<R> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Value, const R: usize> From<Tensor<T, R>>
|
impl<T: Value, const R: usize> From<Tensor<T, R>> for TensorIndex<R> {
|
||||||
for TensorIndex<R>
|
|
||||||
{
|
|
||||||
fn from(tensor: Tensor<T, R>) -> Self {
|
fn from(tensor: Tensor<T, R>) -> Self {
|
||||||
Self::zero(tensor.shape().clone())
|
Self::zero(tensor.shape().clone())
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@ pub mod shape;
|
|||||||
pub mod tensor;
|
pub mod tensor;
|
||||||
pub mod value;
|
pub mod value;
|
||||||
|
|
||||||
pub use {value::*, axis::*, error::*, index::*, shape::*, tensor::*};
|
pub use {axis::*, error::*, index::*, shape::*, tensor::*, value::*};
|
||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! tensor {
|
macro_rules! tensor {
|
||||||
@ -27,9 +27,9 @@ macro_rules! shape {
|
|||||||
|
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! index {
|
macro_rules! index {
|
||||||
($tensor:expr) => {
|
($tensor:expr) => {
|
||||||
TensorIndex::zero($tensor.shape().clone())
|
TensorIndex::zero($tensor.shape().clone())
|
||||||
};
|
};
|
||||||
($tensor:expr, $indices:expr) => {
|
($tensor:expr, $indices:expr) => {
|
||||||
TensorIndex::from(($tensor.shape().clone(), $indices))
|
TensorIndex::from(($tensor.shape().clone(), $indices))
|
||||||
};
|
};
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
use core::result::Result as SerdeResult;
|
||||||
use serde::de::{self, Deserialize, Deserializer, SeqAccess, Visitor};
|
use serde::de::{self, Deserialize, Deserializer, SeqAccess, Visitor};
|
||||||
use serde::ser::{Serialize, SerializeTuple, Serializer};
|
use serde::ser::{Serialize, SerializeTuple, Serializer};
|
||||||
use std::fmt::{Result as FmtResult, Formatter};
|
use std::fmt::{Formatter, Result as FmtResult};
|
||||||
use core::result::Result as SerdeResult;
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub struct TensorShape<const R: usize>([usize; R]);
|
pub struct TensorShape<const R: usize>([usize; R]);
|
||||||
@ -24,8 +24,8 @@ impl<const R: usize> TensorShape<R> {
|
|||||||
new_shape
|
new_shape
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const fn as_array(&self) -> [usize; R] {
|
pub const fn as_array(&self) -> &[usize; R] {
|
||||||
self.0
|
&self.0
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const fn rank(&self) -> usize {
|
pub const fn rank(&self) -> usize {
|
||||||
|
172
src/tensor.rs
172
src/tensor.rs
@ -4,7 +4,10 @@ use getset::{Getters, MutGetters};
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{
|
use std::{
|
||||||
fmt::{Display, Formatter, Result as FmtResult},
|
fmt::{Display, Formatter, Result as FmtResult},
|
||||||
ops::{Index, IndexMut},
|
ops::{
|
||||||
|
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Rem,
|
||||||
|
RemAssign, Sub, SubAssign,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A tensor is a multi-dimensional array of values. The rank of a tensor is the
|
/// A tensor is a multi-dimensional array of values. The rank of a tensor is the
|
||||||
@ -35,7 +38,7 @@ impl<T: Value, const R: usize> Tensor<T, R> {
|
|||||||
/// use manifold::Tensor;
|
/// use manifold::Tensor;
|
||||||
///
|
///
|
||||||
/// let t = Tensor::<f64, 2>::new([3, 3].into());
|
/// let t = Tensor::<f64, 2>::new([3, 3].into());
|
||||||
/// assert_eq!(t.shape().as_array(), [3, 3]);
|
/// assert_eq!(t.shape().as_array(), &[3, 3]);
|
||||||
/// ```
|
/// ```
|
||||||
pub fn new(shape: TensorShape<R>) -> Self {
|
pub fn new(shape: TensorShape<R>) -> Self {
|
||||||
// Handle rank 0 tensor (scalar) as a special case
|
// Handle rank 0 tensor (scalar) as a special case
|
||||||
@ -60,7 +63,7 @@ impl<T: Value, const R: usize> Tensor<T, R> {
|
|||||||
///
|
///
|
||||||
/// let buffer = vec![1, 2, 3, 4, 5, 6];
|
/// let buffer = vec![1, 2, 3, 4, 5, 6];
|
||||||
/// let t = Tensor::<i32, 2>::new_with_buffer([2, 3].into(), buffer);
|
/// let t = Tensor::<i32, 2>::new_with_buffer([2, 3].into(), buffer);
|
||||||
/// assert_eq!(t.shape().as_array(), [2, 3]);
|
/// assert_eq!(t.shape().as_array(), &[2, 3]);
|
||||||
/// assert_eq!(t.buffer(), &[1, 2, 3, 4, 5, 6]);
|
/// assert_eq!(t.buffer(), &[1, 2, 3, 4, 5, 6]);
|
||||||
/// ```
|
/// ```
|
||||||
pub fn new_with_buffer(shape: TensorShape<R>, buffer: Vec<T>) -> Self {
|
pub fn new_with_buffer(shape: TensorShape<R>, buffer: Vec<T>) -> Self {
|
||||||
@ -413,6 +416,158 @@ impl<T: Value, const R: usize> Tensor<T, R> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---- Operations ------------------------------------------------------------
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> Add for Tensor<T, R> {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
fn add(self, other: Self) -> Self::Output {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_add(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> Sub for Tensor<T, R> {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
fn sub(self, other: Self) -> Self::Output {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_subtract(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> Mul for Tensor<T, R> {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
fn mul(self, other: Self) -> Self::Output {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_multiply(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> Div for Tensor<T, R> {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
fn div(self, other: Self) -> Self::Output {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_divide(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> Rem for Tensor<T, R> {
|
||||||
|
type Output = Self;
|
||||||
|
|
||||||
|
fn rem(self, other: Self) -> Self::Output {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_modulo(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> AddAssign for Tensor<T, R> {
|
||||||
|
fn add_assign(&mut self, other: Self) {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_add(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
*self = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> SubAssign for Tensor<T, R> {
|
||||||
|
fn sub_assign(&mut self, other: Self) {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_subtract(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
*self = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> MulAssign for Tensor<T, R> {
|
||||||
|
fn mul_assign(&mut self, other: Self) {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_multiply(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
*self = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> DivAssign for Tensor<T, R> {
|
||||||
|
fn div_assign(&mut self, other: Self) {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_divide(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
*self = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Value, const R: usize> RemAssign for Tensor<T, R> {
|
||||||
|
fn rem_assign(&mut self, other: Self) {
|
||||||
|
if self.shape() != other.shape() {
|
||||||
|
todo!("Check for broadcasting");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = Self::new(self.shape().clone());
|
||||||
|
|
||||||
|
Self::ew_modulo(&self, &other, &mut result).unwrap();
|
||||||
|
|
||||||
|
*self = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ---- Indexing --------------------------------------------------------------
|
// ---- Indexing --------------------------------------------------------------
|
||||||
|
|
||||||
impl<T: Value, const R: usize> Index<TensorIndex<R>> for Tensor<T, R> {
|
impl<T: Value, const R: usize> Index<TensorIndex<R>> for Tensor<T, R> {
|
||||||
@ -423,9 +578,7 @@ impl<T: Value, const R: usize> Index<TensorIndex<R>> for Tensor<T, R> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Value, const R: usize> IndexMut<TensorIndex<R>>
|
impl<T: Value, const R: usize> IndexMut<TensorIndex<R>> for Tensor<T, R> {
|
||||||
for Tensor<T, R>
|
|
||||||
{
|
|
||||||
fn index_mut(&mut self, index: TensorIndex<R>) -> &mut Self::Output {
|
fn index_mut(&mut self, index: TensorIndex<R>) -> &mut Self::Output {
|
||||||
&mut self.buffer[index.flat()]
|
&mut self.buffer[index.flat()]
|
||||||
}
|
}
|
||||||
@ -479,7 +632,12 @@ where
|
|||||||
T: Display + Clone,
|
T: Display + Clone,
|
||||||
{
|
{
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
|
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
|
||||||
Tensor::<T, R>::fmt_helper(&self.buffer, &self.shape.as_array(), f, 1)
|
Tensor::<T, R>::fmt_helper(
|
||||||
|
&self.buffer,
|
||||||
|
&self.shape().as_array().clone(),
|
||||||
|
f,
|
||||||
|
1,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,9 +1,6 @@
|
|||||||
use num::{Num, One, Zero};
|
use num::{Num, One, Zero};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{
|
use std::{fmt::Display, iter::Sum};
|
||||||
fmt::Display,
|
|
||||||
iter::Sum,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// A trait for types that can be used as values in a tensor.
|
/// A trait for types that can be used as values in a tensor.
|
||||||
pub trait Value:
|
pub trait Value:
|
||||||
|
261
tests/basic_tests.rs
Normal file
261
tests/basic_tests.rs
Normal file
@ -0,0 +1,261 @@
|
|||||||
|
use manifold::*;
|
||||||
|
|
||||||
|
use serde_json;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_serde_shape_serialization() {
|
||||||
|
// Create a shape instance
|
||||||
|
let shape: TensorShape<3> = [1, 2, 3].into();
|
||||||
|
|
||||||
|
// Serialize the shape to a JSON string
|
||||||
|
let serialized =
|
||||||
|
serde_json::to_string(&shape).expect("Failed to serialize");
|
||||||
|
|
||||||
|
// Deserialize the JSON string back into a shape
|
||||||
|
let deserialized: TensorShape<3> =
|
||||||
|
serde_json::from_str(&serialized).expect("Failed to deserialize");
|
||||||
|
|
||||||
|
// Check that the deserialized shape is equal to the original
|
||||||
|
assert_eq!(shape, deserialized);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_tensor_serde_serialization() {
|
||||||
|
// Create an instance of Tensor
|
||||||
|
let tensor: Tensor<i32, 2> = Tensor::new(TensorShape::new([2, 2]));
|
||||||
|
|
||||||
|
// Serialize the Tensor to a JSON string
|
||||||
|
let serialized =
|
||||||
|
serde_json::to_string(&tensor).expect("Failed to serialize");
|
||||||
|
|
||||||
|
// Deserialize the JSON string back into a Tensor
|
||||||
|
let deserialized: Tensor<i32, 2> =
|
||||||
|
serde_json::from_str(&serialized).expect("Failed to deserialize");
|
||||||
|
|
||||||
|
// Check that the deserialized Tensor is equal to the original
|
||||||
|
assert_eq!(tensor.buffer(), deserialized.buffer());
|
||||||
|
assert_eq!(tensor.shape(), deserialized.shape());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_iterating_3d_tensor() {
|
||||||
|
let shape = TensorShape::new([2, 2, 2]); // 3D tensor with shape 2x2x2
|
||||||
|
let mut tensor = Tensor::new(shape);
|
||||||
|
let mut num = 0;
|
||||||
|
|
||||||
|
// Fill the tensor with sequential numbers
|
||||||
|
for i in 0..2 {
|
||||||
|
for j in 0..2 {
|
||||||
|
for k in 0..2 {
|
||||||
|
tensor.buffer_mut()[i * 4 + j * 2 + k] = num;
|
||||||
|
num += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("{}", tensor);
|
||||||
|
|
||||||
|
// Iterate over the tensor and check that the numbers are correct
|
||||||
|
|
||||||
|
let mut iter = TensorIterator::new(&tensor);
|
||||||
|
|
||||||
|
println!("{}", iter);
|
||||||
|
|
||||||
|
assert_eq!(iter.next(), Some(&0));
|
||||||
|
|
||||||
|
assert_eq!(iter.next(), Some(&1));
|
||||||
|
assert_eq!(iter.next(), Some(&2));
|
||||||
|
assert_eq!(iter.next(), Some(&3));
|
||||||
|
assert_eq!(iter.next(), Some(&4));
|
||||||
|
assert_eq!(iter.next(), Some(&5));
|
||||||
|
assert_eq!(iter.next(), Some(&6));
|
||||||
|
assert_eq!(iter.next(), Some(&7));
|
||||||
|
assert_eq!(iter.next(), None);
|
||||||
|
assert_eq!(iter.next(), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_iterating_rank_4_tensor() {
|
||||||
|
// Define the shape of the rank-4 tensor (e.g., 2x2x2x2)
|
||||||
|
let shape = TensorShape::new([2, 2, 2, 2]);
|
||||||
|
let mut tensor = Tensor::new(shape);
|
||||||
|
let mut num = 0;
|
||||||
|
|
||||||
|
// Fill the tensor with sequential numbers
|
||||||
|
for i in 0..tensor.len() {
|
||||||
|
tensor.buffer_mut()[i] = num;
|
||||||
|
num += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over the tensor and check that the numbers are correct
|
||||||
|
let mut iter = TensorIterator::new(&tensor);
|
||||||
|
for expected_value in 0..tensor.len() {
|
||||||
|
assert_eq!(*iter.next().unwrap(), expected_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the iterator is exhausted
|
||||||
|
assert!(iter.next().is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_index_dec_method() {
|
||||||
|
let shape = TensorShape::new([3, 3, 3]); // Example shape for a 3x3x3 tensor
|
||||||
|
let mut index = TensorIndex::zero(shape);
|
||||||
|
|
||||||
|
// Increment the index to the maximum
|
||||||
|
for _ in 0..26 {
|
||||||
|
// 3 * 3 * 3 - 1 = 26 increments to reach the end
|
||||||
|
index.inc();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the index is at the maximum
|
||||||
|
assert_eq!(index, TensorIndex::new(shape, [2, 2, 2]));
|
||||||
|
|
||||||
|
// Decrement step by step and check the index
|
||||||
|
let expected_indices = [
|
||||||
|
[2, 2, 2],
|
||||||
|
[2, 2, 1],
|
||||||
|
[2, 2, 0],
|
||||||
|
[2, 1, 2],
|
||||||
|
[2, 1, 1],
|
||||||
|
[2, 1, 0],
|
||||||
|
[2, 0, 2],
|
||||||
|
[2, 0, 1],
|
||||||
|
[2, 0, 0],
|
||||||
|
[1, 2, 2],
|
||||||
|
[1, 2, 1],
|
||||||
|
[1, 2, 0],
|
||||||
|
[1, 1, 2],
|
||||||
|
[1, 1, 1],
|
||||||
|
[1, 1, 0],
|
||||||
|
[1, 0, 2],
|
||||||
|
[1, 0, 1],
|
||||||
|
[1, 0, 0],
|
||||||
|
[0, 2, 2],
|
||||||
|
[0, 2, 1],
|
||||||
|
[0, 2, 0],
|
||||||
|
[0, 1, 2],
|
||||||
|
[0, 1, 1],
|
||||||
|
[0, 1, 0],
|
||||||
|
[0, 0, 2],
|
||||||
|
[0, 0, 1],
|
||||||
|
[0, 0, 0],
|
||||||
|
];
|
||||||
|
|
||||||
|
for (i, &expected) in expected_indices.iter().enumerate() {
|
||||||
|
assert_eq!(
|
||||||
|
index,
|
||||||
|
TensorIndex::new(shape, expected),
|
||||||
|
"Failed at index {}",
|
||||||
|
i
|
||||||
|
);
|
||||||
|
index.dec();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, the index should reach [0, 0, 0]
|
||||||
|
index.dec();
|
||||||
|
assert_eq!(index, TensorIndex::zero(shape));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_axis_iterator() {
|
||||||
|
// Creating a 2x2 Tensor for testing
|
||||||
|
let tensor = Tensor::from([[1.0, 2.0], [3.0, 4.0]]);
|
||||||
|
|
||||||
|
// Testing iteration over the first axis (axis = 0)
|
||||||
|
let axis = TensorAxis::new(&tensor, 0);
|
||||||
|
|
||||||
|
let mut axis_iter = axis.into_iter();
|
||||||
|
|
||||||
|
assert_eq!(axis_iter.next(), Some(&1.0));
|
||||||
|
assert_eq!(axis_iter.next(), Some(&2.0));
|
||||||
|
assert_eq!(axis_iter.next(), Some(&3.0));
|
||||||
|
assert_eq!(axis_iter.next(), Some(&4.0));
|
||||||
|
|
||||||
|
// Resetting the iterator for the second axis (axis = 1)
|
||||||
|
let axis = TensorAxis::new(&tensor, 1);
|
||||||
|
|
||||||
|
let mut axis_iter = axis.into_iter();
|
||||||
|
|
||||||
|
assert_eq!(axis_iter.next(), Some(&1.0));
|
||||||
|
assert_eq!(axis_iter.next(), Some(&3.0));
|
||||||
|
assert_eq!(axis_iter.next(), Some(&2.0));
|
||||||
|
assert_eq!(axis_iter.next(), Some(&4.0));
|
||||||
|
|
||||||
|
let shape = tensor.shape();
|
||||||
|
|
||||||
|
let mut a: TensorIndex<2> = (shape.clone(), [0, 0]).into();
|
||||||
|
let b: TensorIndex<2> = (shape.clone(), [1, 1]).into();
|
||||||
|
|
||||||
|
while a <= b {
|
||||||
|
println!("a: {}", a);
|
||||||
|
a.inc();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_3d_tensor_axis_iteration() {
|
||||||
|
// Create a 3D Tensor with specific values
|
||||||
|
// Tensor shape is 2x2x2 for simplicity
|
||||||
|
let t = Tensor::from([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]);
|
||||||
|
|
||||||
|
// TensorAxis 0 (Layer-wise):
|
||||||
|
//
|
||||||
|
// t[0][0][0] = 1
|
||||||
|
// t[0][0][1] = 2
|
||||||
|
// t[0][1][0] = 3
|
||||||
|
// t[0][1][1] = 4
|
||||||
|
// t[1][0][0] = 5
|
||||||
|
// t[1][0][1] = 6
|
||||||
|
// t[1][1][0] = 7
|
||||||
|
// t[1][1][1] = 8
|
||||||
|
// [1, 2, 3, 4, 5, 6, 7, 8]
|
||||||
|
//
|
||||||
|
// This order suggests that for each "layer" (first level of arrays),
|
||||||
|
// the iterator goes through all rows and columns. It first completes
|
||||||
|
// the entire first layer, then moves to the second.
|
||||||
|
|
||||||
|
let a0 = TensorAxis::new(&t, 0);
|
||||||
|
let a0_order = a0.into_iter().cloned().collect::<Vec<_>>();
|
||||||
|
assert_eq!(a0_order, [1, 2, 3, 4, 5, 6, 7, 8]);
|
||||||
|
|
||||||
|
// TensorAxis 1 (Row-wise within each layer):
|
||||||
|
//
|
||||||
|
// t[0][0][0] = 1
|
||||||
|
// t[0][0][1] = 2
|
||||||
|
// t[1][0][0] = 5
|
||||||
|
// t[1][0][1] = 6
|
||||||
|
// t[0][1][0] = 3
|
||||||
|
// t[0][1][1] = 4
|
||||||
|
// t[1][1][0] = 7
|
||||||
|
// t[1][1][1] = 8
|
||||||
|
// [1, 2, 5, 6, 3, 4, 7, 8]
|
||||||
|
//
|
||||||
|
// This indicates that within each "layer", the iterator first
|
||||||
|
// completes the first row across all layers, then the second row
|
||||||
|
// across all layers.
|
||||||
|
|
||||||
|
let a1 = TensorAxis::new(&t, 1);
|
||||||
|
let a1_order = a1.into_iter().cloned().collect::<Vec<_>>();
|
||||||
|
assert_eq!(a1_order, [1, 2, 5, 6, 3, 4, 7, 8]);
|
||||||
|
|
||||||
|
// TensorAxis 2 (Column-wise within each layer):
|
||||||
|
//
|
||||||
|
// t[0][0][0] = 1
|
||||||
|
// t[0][1][0] = 3
|
||||||
|
// t[1][0][0] = 5
|
||||||
|
// t[1][1][0] = 7
|
||||||
|
// t[0][0][1] = 2
|
||||||
|
// t[0][1][1] = 4
|
||||||
|
// t[1][0][1] = 6
|
||||||
|
// t[1][1][1] = 8
|
||||||
|
// [1, 3, 5, 7, 2, 4, 6, 8]
|
||||||
|
//
|
||||||
|
// This indicates that within each "layer", the iterator first
|
||||||
|
// completes the first column across all layers, then the second
|
||||||
|
// column across all layers.
|
||||||
|
|
||||||
|
let a2 = TensorAxis::new(&t, 2);
|
||||||
|
let a2_order = a2.into_iter().cloned().collect::<Vec<_>>();
|
||||||
|
assert_eq!(a2_order, [1, 3, 5, 7, 2, 4, 6, 8]);
|
||||||
|
}
|
1
tests/mod.rs
Normal file
1
tests/mod.rs
Normal file
@ -0,0 +1 @@
|
|||||||
|
mod basic_tests;
|
Loading…
Reference in New Issue
Block a user