From 9b5330151394fe9e8dd4c6ec2393257bbacde516 Mon Sep 17 00:00:00 2001 From: Julius Koskela Date: Thu, 4 Jan 2024 11:59:31 +0000 Subject: [PATCH] =?UTF-8?q?=F0=9F=94=A7=20Tests=20and=20benchmarks=20(#16)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Introduce benchmarks with Criterion - Introduce separate tests context - Small corrections to types Reviewed-on: https://nordic-dev.net/julius/manifold/pulls/16 Co-authored-by: Julius Koskela Co-committed-by: Julius Koskela --- Cargo.lock | 30 ++++ Cargo.toml | 14 +- benches/manifold_benchmark.rs | 66 +++++++++ src/index.rs | 29 ++-- src/lib.rs | 8 +- src/shape.rs | 8 +- src/tensor.rs | 172 +++++++++++++++++++++- src/value.rs | 7 +- tests/basic_tests.rs | 261 ++++++++++++++++++++++++++++++++++ tests/mod.rs | 1 + 10 files changed, 551 insertions(+), 45 deletions(-) create mode 100644 benches/manifold_benchmark.rs create mode 100644 tests/basic_tests.rs create mode 100644 tests/mod.rs diff --git a/Cargo.lock b/Cargo.lock index c86a244..5bbfa80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -299,6 +299,7 @@ dependencies = [ "criterion", "getset", "itertools 0.12.0", + "ndarray", "num", "rand", "serde", @@ -307,12 +308,35 @@ dependencies = [ "thiserror", ] +[[package]] +name = "matrixmultiply" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7574c1cf36da4798ab73da5b215bbf444f50718207754cb522201d78d1cd0ff2" +dependencies = [ + "autocfg", + "rawpointer", +] + [[package]] name = "memchr" version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +[[package]] +name = "ndarray" +version = "0.15.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb12d4e967ec485a5f71c6311fe28158e9d6f4bc4a447b474184d0f91a8fa32" +dependencies = [ + "matrixmultiply", + "num-complex", + "num-integer", + "num-traits", + "rawpointer", +] + [[package]] name = "num" version = "0.4.1" @@ -507,6 +531,12 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + [[package]] name = "rayon" version = "1.8.0" diff --git a/Cargo.toml b/Cargo.toml index 040901a..22566c0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,10 +6,7 @@ license = "MIT/Apache-2.0" authors = ["Julius Koskela "] description = """ -GDSL is a graph data-structure library including graph containers, -connected node strutures and efficient algorithms on those structures. -Nodes are independent of a graph container and can be used as connected -smart pointers. +Manifold is a Tensor library for Rust. """ repository = "https://nordic-dev.net/julius/manifold" @@ -23,10 +20,15 @@ getset = "0.1.2" itertools = "0.12.0" num = "0.4.1" serde = { version = "1.0.193", features = ["derive"] } -serde_json = "1.0.108" -static_assertions = "1.1.0" thiserror = "1.0.52" [dev-dependencies] rand = "0.8.5" criterion = "0.5.1" +serde_json = "1.0.108" +static_assertions = "1.1.0" +ndarray = "0.15.6" + +[[bench]] +name = "manifold_benchmark" +harness = false diff --git a/benches/manifold_benchmark.rs b/benches/manifold_benchmark.rs new file mode 100644 index 0000000..59e4d62 --- /dev/null +++ b/benches/manifold_benchmark.rs @@ -0,0 +1,66 @@ +use criterion::Throughput; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use manifold::*; +use rand::Rng; + +fn random_tensor_r2_manifold() -> Tensor { + let mut rng = rand::thread_rng(); + let mut tensor = tensor!([[0.0; 1000]; 1000]); + for i in 0..tensor.len() { + tensor[i] = rng.gen(); + } + tensor +} + +fn random_tensor_r2_ndarray() -> ndarray::Array2 { + let mut rng = rand::thread_rng(); + let (rows, cols) = (1000, 1000); + let mut tensor = ndarray::Array2::::zeros((rows, cols)); + for i in 0..rows { + for j in 0..cols { + tensor[[i, j]] = rng.gen(); + } + } + tensor +} + +fn tensor_product(c: &mut Criterion) { + let b = 1000; + + let mut group = c.benchmark_group("element-wise addition"); + + for (i, size) in [b].iter().enumerate() { + group.throughput(Throughput::Elements(*size as u64)); + + group.bench_with_input( + BenchmarkId::new("manifold", size), + &i, + |b, _| { + b.iter(|| { + let a = random_tensor_r2_manifold(); + let b = random_tensor_r2_manifold(); + let c = a + b; + assert!(c.shape().as_array() == &[1000, 1000]); + }) + }, + ); + + group.bench_with_input( + BenchmarkId::new("ndarray", size), + &i, + |b, _| { + b.iter(|| { + let a = random_tensor_r2_ndarray(); + let b = random_tensor_r2_ndarray(); + let c = a + b; + assert!(c.shape() == &[1000, 1000]); + }) + }, + ); + } + group.finish(); +} + +criterion_group!(benches, tensor_product); + +criterion_main!(benches); diff --git a/src/index.rs b/src/index.rs index ac91c6c..4632e6e 100644 --- a/src/index.rs +++ b/src/index.rs @@ -1,8 +1,8 @@ use super::*; use getset::{Getters, MutGetters}; use std::{ - ops::{Index, IndexMut, Add, Sub}, - cmp::Ordering, + cmp::Ordering, + ops::{Add, Index, IndexMut, Sub}, }; #[derive(Clone, Copy, Debug, Getters, MutGetters)] @@ -16,7 +16,6 @@ pub struct TensorIndex { // ---- Construction and Initialization --------------------------------------- impl TensorIndex { - pub fn new(shape: TensorShape, indices: [usize; R]) -> Self { if !shape.check_indices(indices) { panic!("indices out of bounds"); @@ -65,10 +64,9 @@ impl TensorIndex { if self.indices()[0] >= self.shape().get(0) { return false; } + let shape = self.shape().as_array().clone(); let mut carry = 1; - for (i, &dim_size) in - self.indices.iter_mut().zip(&self.shape.as_array()).rev() - { + for (i, &dim_size) in self.indices.iter_mut().zip(&shape).rev() { if carry == 1 { *i += 1; if *i >= dim_size { @@ -158,9 +156,8 @@ impl TensorIndex { } let mut borrow = true; - for (i, &dim_size) in - self.indices.iter_mut().zip(&self.shape.as_array()).rev() - { + let shape = self.shape().as_array().clone(); + for (i, &dim_size) in self.indices_mut().iter_mut().zip(&shape).rev() { if borrow { if *i == 0 { *i = dim_size - 1; // Wrap around to the maximum index of @@ -271,7 +268,7 @@ impl TensorIndex { pub fn flat(&self) -> usize { self.indices() .iter() - .zip(&self.shape().as_array()) + .zip(&self.shape().as_array().clone()) .rev() .fold((0, 1), |(flat_index, product), (&idx, &dim_size)| { (flat_index + idx * product, product * dim_size) @@ -344,18 +341,14 @@ impl IndexMut for TensorIndex { } } -impl From<(TensorShape, [usize; R])> - for TensorIndex -{ +impl From<(TensorShape, [usize; R])> for TensorIndex { fn from((shape, indices): (TensorShape, [usize; R])) -> Self { assert!(shape.check_indices(indices)); Self::new(shape, indices) } } -impl From<(TensorShape, usize)> - for TensorIndex -{ +impl From<(TensorShape, usize)> for TensorIndex { fn from((shape, flat_index): (TensorShape, usize)) -> Self { let indices = shape.index_from_flat(flat_index).indices; Self::new(shape, indices) @@ -368,9 +361,7 @@ impl From> for TensorIndex { } } -impl From> - for TensorIndex -{ +impl From> for TensorIndex { fn from(tensor: Tensor) -> Self { Self::zero(tensor.shape().clone()) } diff --git a/src/lib.rs b/src/lib.rs index 3234e67..396c220 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,7 +9,7 @@ pub mod shape; pub mod tensor; pub mod value; -pub use {value::*, axis::*, error::*, index::*, shape::*, tensor::*}; +pub use {axis::*, error::*, index::*, shape::*, tensor::*, value::*}; #[macro_export] macro_rules! tensor { @@ -27,9 +27,9 @@ macro_rules! shape { #[macro_export] macro_rules! index { - ($tensor:expr) => { - TensorIndex::zero($tensor.shape().clone()) - }; + ($tensor:expr) => { + TensorIndex::zero($tensor.shape().clone()) + }; ($tensor:expr, $indices:expr) => { TensorIndex::from(($tensor.shape().clone(), $indices)) }; diff --git a/src/shape.rs b/src/shape.rs index 06c65c2..a26112f 100644 --- a/src/shape.rs +++ b/src/shape.rs @@ -1,8 +1,8 @@ use super::*; +use core::result::Result as SerdeResult; use serde::de::{self, Deserialize, Deserializer, SeqAccess, Visitor}; use serde::ser::{Serialize, SerializeTuple, Serializer}; -use std::fmt::{Result as FmtResult, Formatter}; -use core::result::Result as SerdeResult; +use std::fmt::{Formatter, Result as FmtResult}; #[derive(Clone, Copy, Debug)] pub struct TensorShape([usize; R]); @@ -24,8 +24,8 @@ impl TensorShape { new_shape } - pub const fn as_array(&self) -> [usize; R] { - self.0 + pub const fn as_array(&self) -> &[usize; R] { + &self.0 } pub const fn rank(&self) -> usize { diff --git a/src/tensor.rs b/src/tensor.rs index b40784d..6224859 100644 --- a/src/tensor.rs +++ b/src/tensor.rs @@ -4,7 +4,10 @@ use getset::{Getters, MutGetters}; use serde::{Deserialize, Serialize}; use std::{ fmt::{Display, Formatter, Result as FmtResult}, - ops::{Index, IndexMut}, + ops::{ + Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Rem, + RemAssign, Sub, SubAssign, + }, }; /// A tensor is a multi-dimensional array of values. The rank of a tensor is the @@ -35,7 +38,7 @@ impl Tensor { /// use manifold::Tensor; /// /// let t = Tensor::::new([3, 3].into()); - /// assert_eq!(t.shape().as_array(), [3, 3]); + /// assert_eq!(t.shape().as_array(), &[3, 3]); /// ``` pub fn new(shape: TensorShape) -> Self { // Handle rank 0 tensor (scalar) as a special case @@ -60,7 +63,7 @@ impl Tensor { /// /// let buffer = vec![1, 2, 3, 4, 5, 6]; /// let t = Tensor::::new_with_buffer([2, 3].into(), buffer); - /// assert_eq!(t.shape().as_array(), [2, 3]); + /// assert_eq!(t.shape().as_array(), &[2, 3]); /// assert_eq!(t.buffer(), &[1, 2, 3, 4, 5, 6]); /// ``` pub fn new_with_buffer(shape: TensorShape, buffer: Vec) -> Self { @@ -413,6 +416,158 @@ impl Tensor { } } +// ---- Operations ------------------------------------------------------------ + +impl Add for Tensor { + type Output = Self; + + fn add(self, other: Self) -> Self::Output { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_add(&self, &other, &mut result).unwrap(); + + result + } +} + +impl Sub for Tensor { + type Output = Self; + + fn sub(self, other: Self) -> Self::Output { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_subtract(&self, &other, &mut result).unwrap(); + + result + } +} + +impl Mul for Tensor { + type Output = Self; + + fn mul(self, other: Self) -> Self::Output { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_multiply(&self, &other, &mut result).unwrap(); + + result + } +} + +impl Div for Tensor { + type Output = Self; + + fn div(self, other: Self) -> Self::Output { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_divide(&self, &other, &mut result).unwrap(); + + result + } +} + +impl Rem for Tensor { + type Output = Self; + + fn rem(self, other: Self) -> Self::Output { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_modulo(&self, &other, &mut result).unwrap(); + + result + } +} + +impl AddAssign for Tensor { + fn add_assign(&mut self, other: Self) { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_add(&self, &other, &mut result).unwrap(); + + *self = result; + } +} + +impl SubAssign for Tensor { + fn sub_assign(&mut self, other: Self) { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_subtract(&self, &other, &mut result).unwrap(); + + *self = result; + } +} + +impl MulAssign for Tensor { + fn mul_assign(&mut self, other: Self) { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_multiply(&self, &other, &mut result).unwrap(); + + *self = result; + } +} + +impl DivAssign for Tensor { + fn div_assign(&mut self, other: Self) { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_divide(&self, &other, &mut result).unwrap(); + + *self = result; + } +} + +impl RemAssign for Tensor { + fn rem_assign(&mut self, other: Self) { + if self.shape() != other.shape() { + todo!("Check for broadcasting"); + } + + let mut result = Self::new(self.shape().clone()); + + Self::ew_modulo(&self, &other, &mut result).unwrap(); + + *self = result; + } +} + // ---- Indexing -------------------------------------------------------------- impl Index> for Tensor { @@ -423,9 +578,7 @@ impl Index> for Tensor { } } -impl IndexMut> - for Tensor -{ +impl IndexMut> for Tensor { fn index_mut(&mut self, index: TensorIndex) -> &mut Self::Output { &mut self.buffer[index.flat()] } @@ -479,7 +632,12 @@ where T: Display + Clone, { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { - Tensor::::fmt_helper(&self.buffer, &self.shape.as_array(), f, 1) + Tensor::::fmt_helper( + &self.buffer, + &self.shape().as_array().clone(), + f, + 1, + ) } } diff --git a/src/value.rs b/src/value.rs index 54c9eb2..ae93e38 100644 --- a/src/value.rs +++ b/src/value.rs @@ -1,9 +1,6 @@ use num::{Num, One, Zero}; use serde::{Deserialize, Serialize}; -use std::{ - fmt::Display, - iter::Sum, -}; +use std::{fmt::Display, iter::Sum}; /// A trait for types that can be used as values in a tensor. pub trait Value: @@ -22,4 +19,4 @@ impl Value for T where + Deserialize<'static> + Sum { -} \ No newline at end of file +} diff --git a/tests/basic_tests.rs b/tests/basic_tests.rs new file mode 100644 index 0000000..c96996f --- /dev/null +++ b/tests/basic_tests.rs @@ -0,0 +1,261 @@ +use manifold::*; + +use serde_json; + +#[test] +fn test_serde_shape_serialization() { + // Create a shape instance + let shape: TensorShape<3> = [1, 2, 3].into(); + + // Serialize the shape to a JSON string + let serialized = + serde_json::to_string(&shape).expect("Failed to serialize"); + + // Deserialize the JSON string back into a shape + let deserialized: TensorShape<3> = + serde_json::from_str(&serialized).expect("Failed to deserialize"); + + // Check that the deserialized shape is equal to the original + assert_eq!(shape, deserialized); +} + +#[test] +fn test_tensor_serde_serialization() { + // Create an instance of Tensor + let tensor: Tensor = Tensor::new(TensorShape::new([2, 2])); + + // Serialize the Tensor to a JSON string + let serialized = + serde_json::to_string(&tensor).expect("Failed to serialize"); + + // Deserialize the JSON string back into a Tensor + let deserialized: Tensor = + serde_json::from_str(&serialized).expect("Failed to deserialize"); + + // Check that the deserialized Tensor is equal to the original + assert_eq!(tensor.buffer(), deserialized.buffer()); + assert_eq!(tensor.shape(), deserialized.shape()); +} + +#[test] +fn test_iterating_3d_tensor() { + let shape = TensorShape::new([2, 2, 2]); // 3D tensor with shape 2x2x2 + let mut tensor = Tensor::new(shape); + let mut num = 0; + + // Fill the tensor with sequential numbers + for i in 0..2 { + for j in 0..2 { + for k in 0..2 { + tensor.buffer_mut()[i * 4 + j * 2 + k] = num; + num += 1; + } + } + } + + println!("{}", tensor); + + // Iterate over the tensor and check that the numbers are correct + + let mut iter = TensorIterator::new(&tensor); + + println!("{}", iter); + + assert_eq!(iter.next(), Some(&0)); + + assert_eq!(iter.next(), Some(&1)); + assert_eq!(iter.next(), Some(&2)); + assert_eq!(iter.next(), Some(&3)); + assert_eq!(iter.next(), Some(&4)); + assert_eq!(iter.next(), Some(&5)); + assert_eq!(iter.next(), Some(&6)); + assert_eq!(iter.next(), Some(&7)); + assert_eq!(iter.next(), None); + assert_eq!(iter.next(), None); +} + +#[test] +fn test_iterating_rank_4_tensor() { + // Define the shape of the rank-4 tensor (e.g., 2x2x2x2) + let shape = TensorShape::new([2, 2, 2, 2]); + let mut tensor = Tensor::new(shape); + let mut num = 0; + + // Fill the tensor with sequential numbers + for i in 0..tensor.len() { + tensor.buffer_mut()[i] = num; + num += 1; + } + + // Iterate over the tensor and check that the numbers are correct + let mut iter = TensorIterator::new(&tensor); + for expected_value in 0..tensor.len() { + assert_eq!(*iter.next().unwrap(), expected_value); + } + + // Ensure the iterator is exhausted + assert!(iter.next().is_none()); +} + +#[test] +fn test_index_dec_method() { + let shape = TensorShape::new([3, 3, 3]); // Example shape for a 3x3x3 tensor + let mut index = TensorIndex::zero(shape); + + // Increment the index to the maximum + for _ in 0..26 { + // 3 * 3 * 3 - 1 = 26 increments to reach the end + index.inc(); + } + + // Check if the index is at the maximum + assert_eq!(index, TensorIndex::new(shape, [2, 2, 2])); + + // Decrement step by step and check the index + let expected_indices = [ + [2, 2, 2], + [2, 2, 1], + [2, 2, 0], + [2, 1, 2], + [2, 1, 1], + [2, 1, 0], + [2, 0, 2], + [2, 0, 1], + [2, 0, 0], + [1, 2, 2], + [1, 2, 1], + [1, 2, 0], + [1, 1, 2], + [1, 1, 1], + [1, 1, 0], + [1, 0, 2], + [1, 0, 1], + [1, 0, 0], + [0, 2, 2], + [0, 2, 1], + [0, 2, 0], + [0, 1, 2], + [0, 1, 1], + [0, 1, 0], + [0, 0, 2], + [0, 0, 1], + [0, 0, 0], + ]; + + for (i, &expected) in expected_indices.iter().enumerate() { + assert_eq!( + index, + TensorIndex::new(shape, expected), + "Failed at index {}", + i + ); + index.dec(); + } + + // Finally, the index should reach [0, 0, 0] + index.dec(); + assert_eq!(index, TensorIndex::zero(shape)); +} + +#[test] +fn test_axis_iterator() { + // Creating a 2x2 Tensor for testing + let tensor = Tensor::from([[1.0, 2.0], [3.0, 4.0]]); + + // Testing iteration over the first axis (axis = 0) + let axis = TensorAxis::new(&tensor, 0); + + let mut axis_iter = axis.into_iter(); + + assert_eq!(axis_iter.next(), Some(&1.0)); + assert_eq!(axis_iter.next(), Some(&2.0)); + assert_eq!(axis_iter.next(), Some(&3.0)); + assert_eq!(axis_iter.next(), Some(&4.0)); + + // Resetting the iterator for the second axis (axis = 1) + let axis = TensorAxis::new(&tensor, 1); + + let mut axis_iter = axis.into_iter(); + + assert_eq!(axis_iter.next(), Some(&1.0)); + assert_eq!(axis_iter.next(), Some(&3.0)); + assert_eq!(axis_iter.next(), Some(&2.0)); + assert_eq!(axis_iter.next(), Some(&4.0)); + + let shape = tensor.shape(); + + let mut a: TensorIndex<2> = (shape.clone(), [0, 0]).into(); + let b: TensorIndex<2> = (shape.clone(), [1, 1]).into(); + + while a <= b { + println!("a: {}", a); + a.inc(); + } +} + +#[test] +fn test_3d_tensor_axis_iteration() { + // Create a 3D Tensor with specific values + // Tensor shape is 2x2x2 for simplicity + let t = Tensor::from([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]); + + // TensorAxis 0 (Layer-wise): + // + // t[0][0][0] = 1 + // t[0][0][1] = 2 + // t[0][1][0] = 3 + // t[0][1][1] = 4 + // t[1][0][0] = 5 + // t[1][0][1] = 6 + // t[1][1][0] = 7 + // t[1][1][1] = 8 + // [1, 2, 3, 4, 5, 6, 7, 8] + // + // This order suggests that for each "layer" (first level of arrays), + // the iterator goes through all rows and columns. It first completes + // the entire first layer, then moves to the second. + + let a0 = TensorAxis::new(&t, 0); + let a0_order = a0.into_iter().cloned().collect::>(); + assert_eq!(a0_order, [1, 2, 3, 4, 5, 6, 7, 8]); + + // TensorAxis 1 (Row-wise within each layer): + // + // t[0][0][0] = 1 + // t[0][0][1] = 2 + // t[1][0][0] = 5 + // t[1][0][1] = 6 + // t[0][1][0] = 3 + // t[0][1][1] = 4 + // t[1][1][0] = 7 + // t[1][1][1] = 8 + // [1, 2, 5, 6, 3, 4, 7, 8] + // + // This indicates that within each "layer", the iterator first + // completes the first row across all layers, then the second row + // across all layers. + + let a1 = TensorAxis::new(&t, 1); + let a1_order = a1.into_iter().cloned().collect::>(); + assert_eq!(a1_order, [1, 2, 5, 6, 3, 4, 7, 8]); + + // TensorAxis 2 (Column-wise within each layer): + // + // t[0][0][0] = 1 + // t[0][1][0] = 3 + // t[1][0][0] = 5 + // t[1][1][0] = 7 + // t[0][0][1] = 2 + // t[0][1][1] = 4 + // t[1][0][1] = 6 + // t[1][1][1] = 8 + // [1, 3, 5, 7, 2, 4, 6, 8] + // + // This indicates that within each "layer", the iterator first + // completes the first column across all layers, then the second + // column across all layers. + + let a2 = TensorAxis::new(&t, 2); + let a2_order = a2.into_iter().cloned().collect::>(); + assert_eq!(a2_order, [1, 3, 5, 7, 2, 4, 6, 8]); +} diff --git a/tests/mod.rs b/tests/mod.rs new file mode 100644 index 0000000..ddf7753 --- /dev/null +++ b/tests/mod.rs @@ -0,0 +1 @@ +mod basic_tests;