refactor: cleanup backends

This commit is contained in:
nathaniel 2022-07-27 11:56:30 -04:00
parent fbf5fe3dda
commit 7698dd7684
28 changed files with 112 additions and 708 deletions

View File

@ -4,20 +4,19 @@ version = "0.1.0"
edition = "2021"
[features]
default = ["tch"]
full = ["arrayfire", "tch"]
arrayfire = ["dep:arrayfire"]
default = ["full"]
full = ["tch", "ndarray"]
tch = ["dep:tch"]
ndarray = ["dep:ndarray"]
[dependencies]
derive-new = "0.5"
rand = "0.8"
half = "1.6" # needs to be 1.6 to work with tch
# Backends
arrayfire = { version = "3.8", optional = true }
tch = { version = "0.8", optional = true }
half = "1.6" # needs to be 1.6 to work with tch
ndarray = "0.15"
ndarray = { version = "0.15", optional = true }
# Autodiff
nanoid = "0.4"

View File

@ -1,12 +0,0 @@
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum GPUBackend {
CUDA,
OPENCL,
}
pub type DeviceNumber = usize;
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Device {
CPU,
GPU(DeviceNumber, GPUBackend),
}

View File

@ -1,35 +0,0 @@
pub mod device;
mod ops;
mod shape;
mod tensor;
use self::device::Device;
pub use tensor::*;
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::backend::arrayfire::device::GPUBackend;
use crate::{Data, Shape};
use std::thread;
#[test]
fn should_support_multiple_devices_on_different_thread() {
let function = |device| {
for _ in 0..10 {
let data_1 = Data::random(Shape::new([1000]));
let data_2 = Data::random(Shape::new([1000]));
let tensor_1 = ArrayfireTensor::<f64, 1>::from_data(data_1, device);
let tensor_2 = ArrayfireTensor::<f64, 1>::from_data(data_2, device);
let _data = tensor_1 + tensor_2;
}
};
let handler_1 = thread::spawn(move || function(Device::CPU));
let handler_2 = thread::spawn(move || function(Device::GPU(0, GPUBackend::OPENCL)));
handler_1.join().unwrap();
handler_2.join().unwrap();
}
}

View File

@ -1,77 +0,0 @@
use crate::{backend::arrayfire::ArrayfireTensor, TensorOpsAdd};
use arrayfire::{ConstGenerator, HasAfEnum};
use std::ops::Add;
impl<P: HasAfEnum, const D: usize> TensorOpsAdd<P, D> for ArrayfireTensor<P, D>
where
P: ConstGenerator<OutType = P> + Clone + Copy,
{
fn add(&self, other: &Self) -> Self {
self.set_backend_binary_ops(other);
let array = (&self.array).add(&other.array);
let shape = self.shape.clone();
let device = self.device;
Self {
array,
shape,
device,
}
}
fn add_scalar(&self, other: &P) -> Self {
self.set_backend_single_ops();
let array = arrayfire::add(&self.array, other, false);
let shape = self.shape.clone();
let device = self.device;
Self {
array,
shape,
device,
}
}
}
impl<P: HasAfEnum, const D: usize> std::ops::Add<Self> for ArrayfireTensor<P, D>
where
P: ConstGenerator<OutType = P> + Clone + Copy,
{
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
TensorOpsAdd::add(&self, &rhs)
}
}
impl<P: HasAfEnum, const D: usize> std::ops::Add<P> for ArrayfireTensor<P, D>
where
P: ConstGenerator<OutType = P> + Clone + Copy,
{
type Output = Self;
fn add(self, rhs: P) -> Self::Output {
TensorOpsAdd::add_scalar(&self, &rhs)
}
}
#[cfg(test)]
mod tests {
use crate::{backend::arrayfire::Device, Data, TensorBase};
use super::*;
#[test]
fn should_support_add_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[6.0, 7.0, 8.0], [9.0, 10.0, 11.0]]);
let data_expected = Data::from([[6.0, 8.0, 10.0], [12.0, 14.0, 16.0]]);
let tensor_1 = ArrayfireTensor::<f64, 2>::from_data(data_1, Device::CPU);
let tensor_2 = ArrayfireTensor::<f64, 2>::from_data(data_2, Device::CPU);
let data_actual = (tensor_1 + tensor_2).into_data();
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,87 +0,0 @@
use crate::{backend::arrayfire::ArrayfireTensor, TensorOpsIndex};
use arrayfire::{HasAfEnum, Seq};
use std::ops::Range;
impl<P: HasAfEnum + std::fmt::Debug + Copy + Default, const D1: usize, const D2: usize>
TensorOpsIndex<P, D1, D2> for ArrayfireTensor<P, D1>
{
fn index(&self, index: [Range<usize>; D2]) -> Self {
self.set_backend_single_ops();
let shape = self.shape.index(index.clone());
let mut seqs = Vec::new();
for i in 0..D2 {
let range = index[i].clone();
let start = range.start;
let end = range.end - 1;
seqs.push(Seq::new(start as f64, end as f64, 1.0));
}
for i in D2..D1 {
let dim = self.shape.dims[i];
let start = 0;
let end = dim - 1;
seqs.push(Seq::new(start as f64, end as f64, 1.0));
}
let array = arrayfire::index(&self.array, &seqs);
let device = self.device;
Self {
array,
shape,
device,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::backend::arrayfire::device::Device;
use crate::{Data, TensorBase};
#[test]
fn should_support_full_indexing_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = ArrayfireTensor::<f64, 1>::from_data(data.clone(), Device::CPU);
let data_actual = tensor.index([0..3]).into_data();
assert_eq!(data, data_actual);
}
#[test]
fn should_support_partial_indexing_1d() {
let data = Data::from([0.0, 1.0, 2.0]);
let tensor = ArrayfireTensor::<f64, 1>::from_data(data, Device::CPU);
let data_actual = tensor.index([1..3]).into_data();
let data_expected = Data::from([1.0, 2.0]);
assert_eq!(data_expected, data_actual);
}
#[test]
fn should_support_full_indexing_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = ArrayfireTensor::<f64, 2>::from_data(data.clone(), Device::CPU);
let data_actual_1 = tensor.index([0..2]).into_data();
let data_actual_2 = tensor.index([0..2, 0..3]).into_data();
assert_eq!(data, data_actual_1);
assert_eq!(data, data_actual_2);
}
#[test]
fn should_support_partial_indexing_2d() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = ArrayfireTensor::<f64, 2>::from_data(data, Device::CPU);
let data_actual = tensor.index([0..2, 0..2]).into_data();
let data_expected = Data::from([[0.0, 1.0], [3.0, 4.0]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,61 +0,0 @@
use crate::{backend::arrayfire::ArrayfireTensor, Shape, TensorOpsMatmul};
use arrayfire::{FloatingPoint, HasAfEnum};
impl<P: HasAfEnum + FloatingPoint, const D: usize> TensorOpsMatmul<P, D> for ArrayfireTensor<P, D> {
fn matmul(&self, other: &Self) -> Self {
self.set_backend_binary_ops(other);
let array = arrayfire::matmul(
&self.array,
&other.array,
arrayfire::MatProp::NONE,
arrayfire::MatProp::NONE,
);
let device = self.device;
let shape = Shape::from(array.dims());
Self {
array,
shape,
device,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{backend::arrayfire::Device, Data, TensorBase};
#[test]
fn should_support_matmul_2_dims() {
let data_1 = Data::from([[4.0, 3.0], [8.0, 7.0]]);
let data_2 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = ArrayfireTensor::<f64, 2>::from_data(data_1, Device::CPU);
let tensor_2 = ArrayfireTensor::<f64, 2>::from_data(data_2, Device::CPU);
let data_actual = tensor_1.matmul(&tensor_2).into_data();
let data_expected = Data::from([[9.0, 16.0, 23.0], [21.0, 36.0, 51.0]]);
assert_eq!(data_expected, data_actual);
}
#[test]
#[ignore = "batch operation not supported yet..."]
fn should_support_matmul_3_dims() {
let data_1 = Data::from([[[4.0, 3.0], [8.0, 7.0]], [[4.0, 3.0], [8.0, 7.0]]]);
let data_2 = Data::from([
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
]);
let tensor_1 = ArrayfireTensor::<f32, 3>::from_data(data_1, Device::CPU);
let tensor_2 = ArrayfireTensor::<f32, 3>::from_data(data_2, Device::CPU);
let data_actual = tensor_1.matmul(&tensor_2).into_data();
let data_expected = Data::from([
[[9.0, 16.0, 23.0], [21.0, 36.0, 51.0]],
[[9.0, 16.0, 23.0], [21.0, 36.0, 51.0]],
]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,6 +0,0 @@
mod add;
mod index;
mod matmul;
mod mul;
mod neg;
mod reshape;

View File

@ -1,85 +0,0 @@
use crate::{backend::arrayfire::ArrayfireTensor, TensorOpsMul};
use arrayfire::{ConstGenerator, HasAfEnum};
impl<P: HasAfEnum, const D: usize> TensorOpsMul<P, D> for ArrayfireTensor<P, D>
where
P: ConstGenerator<OutType = P> + Clone + Copy,
{
fn mul(&self, other: &Self) -> Self {
self.set_backend_binary_ops(other);
let array = arrayfire::mul(&self.array, &other.array, false);
let shape = self.shape.clone();
let device = self.device;
Self {
array,
shape,
device,
}
}
fn mul_scalar(&self, other: &P) -> Self {
self.set_backend_single_ops();
let array = arrayfire::mul(&self.array, other, false);
let shape = self.shape.clone();
let device = self.device;
Self {
array,
shape,
device,
}
}
}
impl<P: HasAfEnum, const D: usize> std::ops::Mul<P> for ArrayfireTensor<P, D>
where
P: ConstGenerator<OutType = P> + Clone + Copy,
{
type Output = ArrayfireTensor<P, D>;
fn mul(self, rhs: P) -> Self::Output {
TensorOpsMul::mul_scalar(&self, &rhs)
}
}
impl<P: HasAfEnum, const D: usize> std::ops::Mul<ArrayfireTensor<P, D>> for ArrayfireTensor<P, D>
where
P: ConstGenerator<OutType = P> + Clone + Copy,
{
type Output = ArrayfireTensor<P, D>;
fn mul(self, rhs: Self) -> Self::Output {
TensorOpsMul::mul(&self, &rhs)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{backend::arrayfire::Device, Data, TensorBase};
#[test]
fn should_support_mul_ops() {
let data_1 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let data_2 = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor_1 = ArrayfireTensor::<f64, 2>::from_data(data_1, Device::CPU);
let tensor_2 = ArrayfireTensor::<f64, 2>::from_data(data_2, Device::CPU);
let data_actual = tensor_1.mul(&tensor_2).into_data();
let data_expected = Data::from([[0.0, 1.0, 4.0], [9.0, 16.0, 25.0]]);
assert_eq!(data_expected, data_actual);
}
#[test]
fn should_support_mul_scalar_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let scalar = 2.0;
let tensor = ArrayfireTensor::<f64, 2>::from_data(data, Device::CPU);
let data_actual = tensor.mul_scalar(&scalar).into_data();
let data_expected = Data::from([[0.0, 2.0, 4.0], [6.0, 8.0, 10.0]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,44 +0,0 @@
use arrayfire::{ConstGenerator, HasAfEnum};
use num_traits::One;
use std::ops::Neg;
use crate::{backend::arrayfire::ArrayfireTensor, TensorOpsMul, TensorOpsNeg};
impl<P: HasAfEnum, const D: usize> TensorOpsNeg<P, D> for ArrayfireTensor<P, D>
where
P: ConstGenerator<OutType = P> + Neg<Output = P> + One + Neg + Clone + Copy,
{
fn neg(&self) -> Self {
self.set_backend_single_ops();
let minus_one = Neg::neg(P::one());
self.mul_scalar(&minus_one)
}
}
impl<P: HasAfEnum, const D: usize> std::ops::Neg for ArrayfireTensor<P, D>
where
P: ConstGenerator<OutType = P> + Neg<Output = P> + One + Neg + Clone + Copy,
{
type Output = ArrayfireTensor<P, D>;
fn neg(self) -> Self::Output {
TensorOpsNeg::neg(&self)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{backend::arrayfire::device::Device, Data, TensorBase};
#[test]
fn should_support_neg_ops() {
let data = Data::from([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
let tensor = ArrayfireTensor::<f64, 2>::from_data(data, Device::CPU);
let data_actual = tensor.neg().into_data();
let data_expected = Data::from([[-0.0, -1.0, -2.0], [-3.0, -4.0, -5.0]]);
assert_eq!(data_expected, data_actual);
}
}

View File

@ -1,19 +0,0 @@
use crate::{backend::arrayfire::ArrayfireTensor, Shape, TensorOpsReshape};
use arrayfire::HasAfEnum;
impl<P: HasAfEnum + std::fmt::Debug + Copy + Default, const D1: usize, const D2: usize>
TensorOpsReshape<P, D1, D2, ArrayfireTensor<P, D2>> for ArrayfireTensor<P, D1>
{
fn reshape(&self, shape: Shape<D2>) -> ArrayfireTensor<P, D2> {
self.set_backend_single_ops();
let array = arrayfire::moddims(&self.array, shape.clone().into());
let device = self.device;
ArrayfireTensor {
array,
shape,
device,
}
}
}

View File

@ -1,28 +0,0 @@
use crate::Shape;
use arrayfire::Dim4;
impl<const D: usize> Into<Dim4> for Shape<D> {
fn into(self) -> Dim4 {
if D > 4 {
panic!(
"Can't create arrayfire Tensor with more than 4 dimensions, got {}",
D
);
}
let mut dims = [1; 4];
for i in 0..D {
dims[i] = self.dims[i] as u64;
}
Dim4::new(&dims)
}
}
impl<const D: usize> From<Dim4> for Shape<D> {
fn from(dim: Dim4) -> Self {
let mut values = [0; D];
for i in 0..D {
values[i] = dim[i] as usize;
}
Shape::new(values)
}
}

View File

@ -1,130 +0,0 @@
use super::{device::GPUBackend, Device};
use crate::{
backend::conversion::{Convertor, Order},
Data, FloatTensor, Shape, TensorBase,
};
use arrayfire::{Array, ConstGenerator, Dim4, FloatingPoint, HasAfEnum};
use num_traits::Float;
pub struct ArrayfireTensor<P: HasAfEnum, const D: usize> {
pub device: Device,
pub array: Array<P>,
pub shape: Shape<D>,
}
impl<P: HasAfEnum, const D: usize> ArrayfireTensor<P, D> {
pub(crate) fn set_backend_binary_ops(&self, other: &Self) {
if self.device != other.device {
panic!("Not on same device");
}
set_backend(&self.device);
}
pub(crate) fn set_backend_single_ops(&self) {
set_backend(&self.device);
}
}
pub(crate) fn set_backend(device: &Device) {
match device {
Device::CPU => arrayfire::set_backend(arrayfire::Backend::CPU),
&Device::GPU(device_num, backend) => {
match backend {
GPUBackend::CUDA => arrayfire::set_backend(arrayfire::Backend::CUDA),
GPUBackend::OPENCL => arrayfire::set_backend(arrayfire::Backend::OPENCL),
};
arrayfire::set_device(device_num as i32);
}
}
}
impl<P: Float + HasAfEnum + Default + Copy + std::fmt::Debug, const D: usize> FloatTensor<P, D>
for ArrayfireTensor<P, D>
where
P: ConstGenerator<OutType = P> + FloatingPoint + Clone + Copy,
{
}
impl<P: HasAfEnum + Default + Copy + std::fmt::Debug, const D: usize> TensorBase<P, D>
for ArrayfireTensor<P, D>
{
fn empty(shape: Shape<D>) -> Self {
let device = Device::CPU;
set_backend(&device);
let mut dims_arrayfire = [1; 4];
for i in 0..D {
dims_arrayfire[i] = shape.dims[i] as u64;
}
let array = Array::new_empty(Dim4::new(&dims_arrayfire));
Self {
array,
shape,
device,
}
}
fn shape(&self) -> &Shape<D> {
&self.shape
}
fn into_data(self) -> Data<P, D> {
let mut data = vec![P::default(); self.array.elements()];
self.array.host(&mut data);
let convertor = Convertor::new(&self.shape, Order::Right, Order::Left);
let values = convertor.convert(&data);
Data::new(values, self.shape)
}
}
impl<P: HasAfEnum + Default + Copy + std::fmt::Debug, const D: usize> ArrayfireTensor<P, D> {
pub fn from_data(data: Data<P, D>, device: Device) -> Self {
set_backend(&device);
let shape = data.shape.clone();
let dims: Dim4 = data.shape.into();
let convertor = Convertor::new(&shape, Order::Left, Order::Right);
let values = convertor.convert(&data.value);
let array = Array::new(&values, dims);
Self {
array,
shape,
device,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn should_not_create_tensor_with_more_than_4_dims() {
let data_expected = Data::random(Shape::new([2, 3, 1, 4, 5]));
let _tensor = ArrayfireTensor::<f64, 5>::from_data(data_expected.clone(), Device::CPU);
}
#[test]
fn should_support_into_and_from_data_1d() {
let data_expected = Data::random(Shape::new([3]));
let tensor = ArrayfireTensor::<f64, 1>::from_data(data_expected.clone(), Device::CPU);
let data_actual = tensor.into_data();
assert_eq!(data_expected, data_actual);
}
#[test]
fn should_support_into_and_from_data_2d() {
let data_expected = Data::random(Shape::new([2, 3]));
let tensor = ArrayfireTensor::<f64, 2>::from_data(data_expected.clone(), Device::CPU);
let data_actual = tensor.into_data();
assert_eq!(data_expected, data_actual);
}
}

View File

@ -75,15 +75,15 @@ where
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::{backend::autodiff::helper::ADTchTensor, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, Data};
#[test]
fn should_diff_add() {
let data_1 = Data::from([2.0, 5.0]);
let data_2 = Data::from([4.0, 1.0]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.clone() + tensor_2.clone();
let grads = tensor_3.backward();
@ -100,7 +100,7 @@ mod tests {
fn should_diff_add_scalar() {
let data = Data::from([2.0, 10.0]);
let tensor = ADTchTensor::from_data(data.clone());
let tensor = TestADTensor::from_data(data.clone());
let tensor_out = tensor.clone() + 5.0;
let grads = tensor_out.backward();
@ -116,9 +116,9 @@ mod tests {
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f64, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_3 = ADTchTensor::from_data(data_3.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = TestADTensor::from_data(data_3.clone());
let tensor_4 = tensor_1.add(&tensor_2);
let tensor_5 = tensor_4

View File

@ -117,15 +117,15 @@ where
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::{backend::autodiff::helper::ADTchTensor, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, Data};
#[test]
fn should_diff_matmul_with_index() {
let data_1: Data<f64, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0, 100.0], [2.0, 3.0, 15.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_2.index([0..2, 0..2]);
let tensor_4 = &tensor_1.matmul(&tensor_3);
@ -147,9 +147,9 @@ mod tests {
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_assigned: Data<f64, 2> = Data::from([[9.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_assigned = ADTchTensor::from_data(data_assigned.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_assigned = TestADTensor::from_data(data_assigned.clone());
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.index_assign([0..1, 0..1], &tensor_assigned);
@ -170,9 +170,9 @@ mod tests {
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f64, 2> = Data::from([[9.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_3 = ADTchTensor::from_data(data_3.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = TestADTensor::from_data(data_3.clone());
let tensor_4 = tensor_1.matmul(&tensor_2);
let tensor_5 = tensor_2.index([0..1, 0..1]);

View File

@ -38,15 +38,15 @@ where
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::{backend::autodiff::helper::ADTchTensor, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, Data};
#[test]
fn should_diff_matmul() {
let data_1: Data<f64, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = &tensor_1.matmul(&tensor_2);
let grads = tensor_3.backward();
@ -68,9 +68,9 @@ mod tests {
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f64, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_3 = ADTchTensor::from_data(data_3.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = TestADTensor::from_data(data_3.clone());
let tensor_4 = tensor_1.matmul(&tensor_2);
let tensor_5 = tensor_4.matmul(&tensor_3);
@ -89,9 +89,9 @@ mod tests {
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f64, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_3 = ADTchTensor::from_data(data_3.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = TestADTensor::from_data(data_3.clone());
let tensor_4 = tensor_1.matmul(&tensor_2);
let tensor_5 = tensor_4.matmul(&tensor_3);

View File

@ -77,15 +77,15 @@ where
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::{backend::autodiff::helper::ADTchTensor, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, Data};
#[test]
fn should_diff_mul() {
let data_1 = Data::from([1.0, 7.0]);
let data_2 = Data::from([4.0, 7.0]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.clone() * tensor_2.clone();
let grads = tensor_3.backward();
@ -102,7 +102,7 @@ mod tests {
fn should_diff_mul_scalar() {
let data = Data::from([2.0, 5.0]);
let tensor = ADTchTensor::from_data(data.clone());
let tensor = TestADTensor::from_data(data.clone());
let tensor_out = tensor.clone() * 4.0;
let grads = tensor_out.backward();
@ -118,9 +118,9 @@ mod tests {
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f64, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_3 = ADTchTensor::from_data(data_3.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = TestADTensor::from_data(data_3.clone());
let tensor_4 = tensor_1.mul(&tensor_2);
let tensor_5 = tensor_4.mul(&tensor_3);

View File

@ -44,15 +44,15 @@ where
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::{backend::autodiff::helper::ADTchTensor, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, Data};
#[test]
fn should_diff_neg() {
let data_1 = Data::<f64, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f64, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.matmul(&tensor_2.neg());
let tensor_4 = tensor_3.neg();

View File

@ -66,15 +66,15 @@ where
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::{backend::autodiff::helper::ADTchTensor, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, Data};
#[test]
fn should_diff_mul() {
let data_1: Data<f64, 2> = Data::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2: Data<f64, 1> = Data::from([4.0, 7.0, 2.0, 3.0]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_2.reshape(Shape::new([2, 2]));
let tensor_4 = &tensor_1.matmul(&tensor_3);

View File

@ -77,15 +77,15 @@ where
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::{backend::autodiff::helper::ADTchTensor, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, Data};
#[test]
fn should_diff_sub() {
let data_1 = Data::from([2.0, 5.0]);
let data_2 = Data::from([4.0, 1.0]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.clone() - tensor_2.clone();
let grads = tensor_3.backward();
@ -101,7 +101,7 @@ mod tests {
#[test]
fn should_diff_sub_scalar() {
let data = Data::from([2.0, 10.0]);
let tensor = ADTchTensor::from_data(data.clone());
let tensor = TestADTensor::from_data(data.clone());
let tensor_out = tensor.clone() - 5.0;
let grads = tensor_out.backward();
@ -117,9 +117,9 @@ mod tests {
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let data_3: Data<f64, 2> = Data::from([[2.0, 2.0], [2.0, 2.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_3 = ADTchTensor::from_data(data_3.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = TestADTensor::from_data(data_3.clone());
let tensor_4 = tensor_1.sub(&tensor_2);
let tensor_5 = tensor_4.sub(&tensor_3).sub_scalar(&5.0);

View File

@ -26,15 +26,15 @@ impl<T: Tensor<P, D>, P: Element, const D: usize> TensorOpsTranspose<P, D> for A
#[cfg(test)]
mod tests {
use super::*;
use crate::tensor::{backend::autodiff::helper::ADTchTensor, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, Data};
#[test]
fn should_diff_transpose() {
let data_1 = Data::<f64, 2>::from([[1.0, 7.0], [2.0, 3.0]]);
let data_2 = Data::<f64, 2>::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.matmul(&tensor_2.transpose());
let tensor_4 = tensor_3.transpose();

View File

@ -31,15 +31,15 @@ impl<T, P, const D: usize> AsNode<T> for ADTensor<P, D, T> {
#[cfg(test)]
mod tests {
use crate::tensor::{backend::autodiff::helper::ADTchTensor, ops::*, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, ops::*, Data};
#[test]
fn should_diff_full_complex_1() {
let data_1: Data<f32, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.matmul(&tensor_1);
@ -65,8 +65,8 @@ mod tests {
let data_1: Data<f64, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.matmul(&tensor_1);
@ -89,8 +89,8 @@ mod tests {
let data_1: Data<f64, 2> = Data::from([[1.0, 7.0], [13.0, -3.0]]);
let data_2: Data<f64, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.matmul(&tensor_1);

View File

@ -1,6 +1,6 @@
#[cfg(test)]
mod tests {
use crate::tensor::{backend::autodiff::helper::ADTchTensor, ops::*, Data};
use crate::tensor::{backend::autodiff::helper::TestADTensor, ops::*, Data};
#[test]
fn should_behave_the_same_with_multithread() {
@ -8,8 +8,8 @@ mod tests {
let data_2: Data<f32, 2> = Data::from([[4.0, 7.0], [2.0, 3.0]]);
let with_move = || {
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.matmul(&tensor_2);
@ -48,8 +48,8 @@ mod tests {
return (grad_1.clone(), grad_2.clone());
};
let without_move = || {
let tensor_1 = ADTchTensor::from_data(data_1.clone());
let tensor_2 = ADTchTensor::from_data(data_2.clone());
let tensor_1 = TestADTensor::from_data(data_1.clone());
let tensor_2 = TestADTensor::from_data(data_2.clone());
let tensor_3 = tensor_1.matmul(&tensor_2);
let tensor_4 = tensor_3.matmul(&tensor_2);

View File

@ -61,12 +61,16 @@ impl<T: Clone + std::fmt::Debug, P, const D: usize> ADTensor<P, D, T> {
#[cfg(test)]
pub mod helper {
use super::*;
#[cfg(feature = "ndarray")]
mod helper_impl {
use super::*;
use crate::tensor::{backend::ndarray::NdArrayTensor, Data};
use ndarray::{Dim, Dimension};
pub type ADTchTensor<P, const D: usize> = ADTensor<P, D, NdArrayTensor<P, D>>;
pub type TestADTensor<P, const D: usize> = ADTensor<P, D, NdArrayTensor<P, D>>;
impl<P: Element + ndarray::ScalarOperand + ndarray::LinalgScalar, const D: usize> ADTchTensor<P, D>
impl<P: Element + ndarray::ScalarOperand + ndarray::LinalgScalar, const D: usize> TestADTensor<P, D>
where
Dim<[usize; D]>: Dimension,
{
@ -75,13 +79,22 @@ pub mod helper {
ADTensor::from_tensor(tensor)
}
}
// pub type ADTchTensor<P, const D: usize> = ADTensor<P, D, TchTensor<P, D>>;
// impl<P: ADElement + tch::kind::Element + Into<f64>, const D: usize> ADTchTensor<P, D> {
// pub fn from_data(data: Data<P, D>) -> Self {
// let tensor = TchTensor::from_data(data, tch::Device::Cpu);
// ADTensor::from_tensor(tensor)
// }
// }
}
pub use helper_impl::*;
#[cfg(feature = "tch")]
#[cfg(not(feature = "ndarray"))]
mod helper_impl {
use super::*;
use crate::tensor::backend::tch::TchTensor;
pub type TestADTensor<P, const D: usize> = ADTensor<P, D, TchTensor<P, D>>;
impl<P: Element + tch::kind::Element + Into<f64>, const D: usize> TestADTensor<P, D> {
pub fn from_data(data: Data<P, D>) -> Self {
let tensor = TchTensor::from_data(data, tch::Device::Cpu);
ADTensor::from_tensor(tensor)
}
}
}
pub use helper_impl::*;
}

View File

@ -1,24 +1,12 @@
#[cfg(feature = "tch")]
use ::tch::Device;
#[cfg(feature = "arrayfire")]
pub mod arrayfire;
pub mod autodiff;
pub mod conversion;
#[cfg(feature = "ndarray")]
pub mod ndarray;
#[cfg(feature = "tch")]
pub mod tch;
#[cfg(feature = "tch")]
pub type TchDevice = Device;
pub enum Backend {
#[cfg(feature = "tch")]
Tch(TchDevice),
NdArray,
}
impl Default for Backend {
fn default() -> Self {
Backend::Tch(TchDevice::Cpu)
}
}

View File

@ -2,11 +2,8 @@ use crate::tensor::{backend::ndarray::NdArrayTensor, ops::*};
use ndarray::SliceInfoElem;
use std::ops::Range;
impl<
P: tch::kind::Element + std::fmt::Debug + Copy + Default,
const D1: usize,
const D2: usize,
> TensorOpsIndex<P, D1, D2> for NdArrayTensor<P, D1>
impl<P: std::fmt::Debug + Copy + Default, const D1: usize, const D2: usize>
TensorOpsIndex<P, D1, D2> for NdArrayTensor<P, D1>
{
fn index(&self, indexes: [Range<usize>; D2]) -> Self {
let slices = to_slice_args::<D1, D2>(indexes.clone());

View File

@ -1,5 +1,7 @@
use crate::tensor::{ops::TensorOpsUtilities, Data, Shape};
use ndarray::{s, ArcArray, Array, Axis, Dim, Dimension, Ix2, Ix3, IxDyn};
use crate::tensor::{ops::TensorOpsUtilities, Data, Shape, Tensor};
use ndarray::{
s, ArcArray, Array, Axis, Dim, Dimension, Ix2, Ix3, IxDyn, LinalgScalar, ScalarOperand,
};
#[derive(Debug, Clone)]
pub struct NdArrayTensor<P, const D: usize> {
@ -149,6 +151,13 @@ where
}
}
impl<P: crate::tensor::Element + ScalarOperand + LinalgScalar, const D: usize> Tensor<P, D>
for NdArrayTensor<P, D>
where
Dim<[usize; D]>: Dimension,
{
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -1,4 +1,4 @@
use crate::tensor::{ops::TensorOpsUtilities, Data, Shape};
use crate::tensor::{ops::TensorOpsUtilities, Data, Element, Shape, Tensor};
#[derive(Debug, PartialEq)]
pub struct TchTensor<P: tch::kind::Element, const D: usize> {
@ -107,6 +107,8 @@ impl<P: tch::kind::Element + Default + Copy + std::fmt::Debug, const D: usize>
}
}
impl<P: Element + Into<f64> + tch::kind::Element, const D: usize> Tensor<P, D> for TchTensor<P, D> {}
#[cfg(test)]
mod tests {
use crate::tensor::Distribution;

View File

@ -72,26 +72,6 @@ ad_items!(int u32);
ad_items!(int u16);
ad_items!(int u8);
#[cfg(feature = "tch")]
mod tch {
use super::*;
use crate::tensor::backend::tch::TchTensor;
use ::tch::kind::Element as TchElement;
impl<P: Element + Into<f64> + TchElement, const D: usize> Tensor<P, D> for TchTensor<P, D> {}
}
mod ndarray {
use super::*;
use crate::tensor::backend::ndarray::NdArrayTensor;
use ::ndarray::{Dim, Dimension, LinalgScalar, ScalarOperand};
impl<P: Element + ScalarOperand + LinalgScalar, const D: usize> Tensor<P, D> for NdArrayTensor<P, D> where
Dim<[usize; D]>: Dimension
{
}
}
mod ad {
use super::*;
use crate::tensor::backend::autodiff::ADTensor;