mirror of https://github.com/tracel-ai/burn.git
refactor: elems (#206)
This commit is contained in:
parent
019c5f9c44
commit
be96160065
|
@ -512,7 +512,7 @@ impl<B: Backend> TensorOps<ADBackendDecorator<B>> for ADBackendDecorator<B> {
|
|||
|
||||
fn backward(self, ops: Ops<Self::State, 1>, grads: &mut Gradients) {
|
||||
unary::<B, D, D, _>(ops.parents, ops.node, grads, |grad| {
|
||||
B::mask_fill(grad, ops.state, 0.to_elem())
|
||||
B::mask_fill(grad, ops.state, 0.elem())
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -529,8 +529,8 @@ impl<B: Backend> TensorOps<ADBackendDecorator<B>> for ADBackendDecorator<B> {
|
|||
B::equal(lhs.primitive, rhs.primitive)
|
||||
}
|
||||
|
||||
fn equal_scalar<const D: usize>(lhs: ADTensor<B, D>, rhs: FloatElem<B>) -> BoolTensor<B, D> {
|
||||
B::equal_scalar(lhs.primitive, rhs)
|
||||
fn equal_elem<const D: usize>(lhs: ADTensor<B, D>, rhs: FloatElem<B>) -> BoolTensor<B, D> {
|
||||
B::equal_elem(lhs.primitive, rhs)
|
||||
}
|
||||
|
||||
fn greater<const D: usize>(lhs: ADTensor<B, D>, rhs: ADTensor<B, D>) -> BoolTensor<B, D> {
|
||||
|
@ -596,7 +596,7 @@ impl<B: Backend> TensorOps<ADBackendDecorator<B>> for ADBackendDecorator<B> {
|
|||
let shape = ops.state;
|
||||
let val = 1_f64 / shape.num_elements() as f64;
|
||||
let ones = B::ones(shape, &B::device(&grad));
|
||||
let val = B::mul_scalar(ones, val.to_elem());
|
||||
let val = B::mul_scalar(ones, val.elem());
|
||||
|
||||
let grad: Tensor<B, 1> = Tensor::from_primitive(grad);
|
||||
let val: Tensor<B, D> = Tensor::from_primitive(val);
|
||||
|
@ -821,7 +821,7 @@ impl<B: Backend> TensorOps<ADBackendDecorator<B>> for ADBackendDecorator<B> {
|
|||
unary::<B, D, D, _>(ops.parents, ops.node, grads, |grad| {
|
||||
let input = ops.state;
|
||||
let ones = B::ones(B::shape(&input), &B::device(&input));
|
||||
let value = B::div(ones, B::add_scalar(input, 1.to_elem()));
|
||||
let value = B::div(ones, B::add_scalar(input, 1.elem()));
|
||||
|
||||
B::mul(grad, value)
|
||||
});
|
||||
|
@ -848,7 +848,7 @@ impl<B: Backend> TensorOps<ADBackendDecorator<B>> for ADBackendDecorator<B> {
|
|||
|
||||
unary::<B, D, D, _>(ops.parents, ops.node, grads, |grad| {
|
||||
let tmp = B::powf(tensor, value - 1.0);
|
||||
let value = B::mul_scalar(tmp, value.to_elem());
|
||||
let value = B::mul_scalar(tmp, value.elem());
|
||||
|
||||
B::mul(grad, value)
|
||||
});
|
||||
|
@ -874,7 +874,7 @@ impl<B: Backend> TensorOps<ADBackendDecorator<B>> for ADBackendDecorator<B> {
|
|||
fn backward(self, ops: Ops<Self::State, 1>, grads: &mut Gradients) {
|
||||
unary::<B, D, D, _>(ops.parents, ops.node, grads, |grad| {
|
||||
let input = ops.state;
|
||||
let value = B::div_scalar(B::powf(input, -0.5), 2.to_elem());
|
||||
let value = B::div_scalar(B::powf(input, -0.5), 2.elem());
|
||||
|
||||
B::mul(grad, value)
|
||||
});
|
||||
|
@ -946,7 +946,7 @@ impl<B: Backend> TensorOps<ADBackendDecorator<B>> for ADBackendDecorator<B> {
|
|||
|
||||
fn backward(self, ops: Ops<Self::State, 1>, grads: &mut Gradients) {
|
||||
unary::<B, D, D, _>(ops.parents, ops.node, grads, |grad| {
|
||||
let value = B::add_scalar(B::neg(B::powf(ops.state, 2.0)), 1.to_elem());
|
||||
let value = B::add_scalar(B::neg(B::powf(ops.state, 2.0)), 1.elem());
|
||||
B::mul(grad, value)
|
||||
});
|
||||
}
|
||||
|
@ -971,8 +971,8 @@ impl<B: Backend> TensorOps<ADBackendDecorator<B>> for ADBackendDecorator<B> {
|
|||
fn backward(self, ops: Ops<Self::State, 1>, grads: &mut Gradients) {
|
||||
unary::<B, D, D, _>(ops.parents, ops.node, grads, |grad| {
|
||||
let exponent = B::neg(B::powf(ops.state, 2.0));
|
||||
let numerator = B::mul_scalar(B::exp(exponent), 2.0.to_elem());
|
||||
let denominator = std::f64::consts::PI.sqrt().to_elem();
|
||||
let numerator = B::mul_scalar(B::exp(exponent), 2.0.elem());
|
||||
let denominator = std::f64::consts::PI.sqrt().elem();
|
||||
let value = B::div_scalar(numerator, denominator);
|
||||
|
||||
B::mul(grad, value)
|
||||
|
@ -1055,7 +1055,7 @@ impl<B: Backend> TensorOps<ADBackendDecorator<B>> for ADBackendDecorator<B> {
|
|||
|
||||
fn backward(self, ops: Ops<Self::State, 1>, grads: &mut Gradients) {
|
||||
unary::<B, D, D, _>(ops.parents, ops.node, grads, |grad| {
|
||||
let zero = 0.to_elem();
|
||||
let zero = 0.elem();
|
||||
let mask = B::lower_equal_scalar(ops.state, zero);
|
||||
B::mask_fill(grad, mask, zero)
|
||||
});
|
||||
|
|
|
@ -19,7 +19,7 @@ pub fn generate_autoregressive_mask<B: Backend>(
|
|||
|
||||
mask = mask.to_device(device).repeat(0, batch_size);
|
||||
|
||||
mask.equal_scalar(1_i64)
|
||||
mask.equal_elem(1_i64)
|
||||
}
|
||||
|
||||
pub struct GeneratePaddingMask<B: Backend> {
|
||||
|
@ -67,7 +67,7 @@ pub fn generate_padding_mask<B: Backend>(
|
|||
tensor = tensor.index_assign(
|
||||
[index..index + 1, 0..tokens.len()],
|
||||
Tensor::from_data(Data::new(
|
||||
tokens.into_iter().map(|e| (e as i64).to_elem()).collect(),
|
||||
tokens.into_iter().map(|e| (e as i64).elem()).collect(),
|
||||
Shape::new([1, seq_length]),
|
||||
)),
|
||||
);
|
||||
|
@ -75,7 +75,7 @@ pub fn generate_padding_mask<B: Backend>(
|
|||
|
||||
let mask = tensor
|
||||
.clone()
|
||||
.equal_scalar(pad_token as i64)
|
||||
.equal_elem(pad_token as i64)
|
||||
.to_device(device);
|
||||
let tensor = tensor.to_device(device);
|
||||
|
||||
|
|
|
@ -325,7 +325,7 @@ mod tests {
|
|||
[0..batch_size, seq_length - num_padded..seq_length],
|
||||
Tensor::ones([batch_size, num_padded]),
|
||||
);
|
||||
let mask_pad = mask_pad.equal_scalar(1);
|
||||
let mask_pad = mask_pad.equal_elem(1);
|
||||
|
||||
let tensor_1 = Tensor::<TestBackend, 3>::random(
|
||||
[batch_size, seq_length, d_model],
|
||||
|
|
|
@ -64,8 +64,8 @@ impl<B: Backend> Conv1d<B> {
|
|||
let k = (config.channels_in * config.kernel_size) as f64;
|
||||
let k = sqrt(1.0 / k);
|
||||
|
||||
let k1: B::FloatElem = (-k).to_elem();
|
||||
let k2: B::FloatElem = k.to_elem();
|
||||
let k1: B::FloatElem = (-k).elem();
|
||||
let k2: B::FloatElem = k.elem();
|
||||
|
||||
let weight = Tensor::random(
|
||||
[config.channels_out, config.channels_in, config.kernel_size],
|
||||
|
|
|
@ -64,8 +64,8 @@ impl<B: Backend> Conv2d<B> {
|
|||
let k = (config.channels[0] * config.kernel_size[0] * config.kernel_size[1]) as f64;
|
||||
let k = sqrt(1.0 / k);
|
||||
|
||||
let k1: B::FloatElem = (-k).to_elem();
|
||||
let k2: B::FloatElem = k.to_elem();
|
||||
let k1: B::FloatElem = (-k).elem();
|
||||
let k2: B::FloatElem = k.elem();
|
||||
|
||||
let weight = Tensor::random(
|
||||
[
|
||||
|
|
|
@ -43,7 +43,7 @@ impl<B: Backend> Linear<B> {
|
|||
/// Create the module from the given configuration.
|
||||
pub fn new(config: &LinearConfig) -> Self {
|
||||
let k = sqrt(1.0 / config.d_input as f64);
|
||||
let distribution = Distribution::Uniform((-1.0 * k).to_elem(), k.to_elem());
|
||||
let distribution = Distribution::Uniform((-1.0 * k).elem(), k.elem());
|
||||
|
||||
let weight = Tensor::random([config.d_input, config.d_output], distribution);
|
||||
let bias = match config.bias {
|
||||
|
|
|
@ -37,7 +37,7 @@ pub struct Adam<B: ADBackend> {
|
|||
impl<B: ADBackend> Adam<B> {
|
||||
pub fn new(config: &AdamConfig) -> Self {
|
||||
Self {
|
||||
learning_rate: config.learning_rate.to_elem(),
|
||||
learning_rate: config.learning_rate.elem(),
|
||||
momentum: AdaptiveMomentum {
|
||||
beta_1: config.beta_1,
|
||||
beta_2: config.beta_2,
|
||||
|
@ -139,7 +139,7 @@ impl AdaptiveMomentum {
|
|||
self.moment_2.register(id.clone(), moment_2.clone());
|
||||
self.time.register(id.clone(), time.clone());
|
||||
|
||||
let time = time.single_value().to_elem();
|
||||
let time = time.single_value().elem();
|
||||
let moment_1_corrected = moment_1.div_scalar(1f32 - self.beta_1.powf(time));
|
||||
let moment_2_corrected = moment_2.div_scalar(1f32 - self.beta_2.powf(time));
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ pub struct WeightDecay<B: ADBackend> {
|
|||
impl<B: ADBackend> WeightDecay<B> {
|
||||
pub fn new(config: &WeightDecayConfig) -> Self {
|
||||
Self {
|
||||
penalty: config.penalty.to_elem(),
|
||||
penalty: config.penalty.elem(),
|
||||
gradients: GradientsParams::new(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ pub struct Momentum<B: ADBackend> {
|
|||
impl<B: ADBackend> Momentum<B> {
|
||||
pub fn new(config: &MomentumConfig) -> Self {
|
||||
Self {
|
||||
momentum: config.momentum.to_elem(),
|
||||
momentum: config.momentum.elem(),
|
||||
dampening: config.dampening,
|
||||
velocity: GradientsParams::new(),
|
||||
nesterov: config.nesterov,
|
||||
|
|
|
@ -30,7 +30,7 @@ pub struct Sgd<B: ADBackend> {
|
|||
|
||||
impl<B: ADBackend> Sgd<B> {
|
||||
pub fn new(config: &SgdConfig) -> Self {
|
||||
let learning_rate = config.learning_rate.to_elem();
|
||||
let learning_rate = config.learning_rate.elem();
|
||||
let momentum = config.momentum.as_ref().map(|config| Momentum::new(config));
|
||||
let weight_decay = config
|
||||
.weight_decay
|
||||
|
|
|
@ -3,7 +3,13 @@ use libm::{exp, log, log1p, pow, sqrt};
|
|||
use libm::{expf, log1pf, logf, powf, sqrtf};
|
||||
|
||||
pub(crate) trait NdArrayElement:
|
||||
Element + ndarray::LinalgScalar + ndarray::ScalarOperand + ExpElement + num_traits::FromPrimitive
|
||||
Element
|
||||
+ ndarray::LinalgScalar
|
||||
+ ndarray::ScalarOperand
|
||||
+ ExpElement
|
||||
+ num_traits::FromPrimitive
|
||||
+ core::cmp::PartialEq
|
||||
+ core::cmp::PartialOrd<Self>
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
}
|
||||
|
||||
fn neg<const D: usize>(tensor: NdArrayTensor<E, D>) -> NdArrayTensor<E, D> {
|
||||
Self::mul_scalar(tensor, (-1f32).to_elem::<E>())
|
||||
Self::mul_scalar(tensor, (-1f32).elem::<E>())
|
||||
}
|
||||
|
||||
fn swap_dims<const D: usize>(
|
||||
|
@ -174,12 +174,12 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
value: E,
|
||||
) -> NdArrayTensor<E, D> {
|
||||
let mask_mul = mask.array.mapv(|x| match x {
|
||||
true => 0.to_elem(),
|
||||
false => 1.to_elem(),
|
||||
true => 0.elem(),
|
||||
false => 1.elem(),
|
||||
});
|
||||
let mask_add = mask.array.mapv(|x| match x {
|
||||
true => value,
|
||||
false => 0.to_elem(),
|
||||
false => 0.elem(),
|
||||
});
|
||||
let array = (tensor.array * mask_mul) + mask_add;
|
||||
|
||||
|
@ -191,12 +191,12 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
rhs: NdArrayTensor<E, D>,
|
||||
) -> NdArrayTensor<bool, D> {
|
||||
let tensor = NdArrayBackend::<E>::sub(lhs, rhs);
|
||||
let zero = 0.to_elem();
|
||||
let zero = 0.elem();
|
||||
|
||||
Self::equal_scalar(tensor, zero)
|
||||
Self::equal_elem(tensor, zero)
|
||||
}
|
||||
|
||||
fn equal_scalar<const D: usize>(lhs: NdArrayTensor<E, D>, rhs: E) -> NdArrayTensor<bool, D> {
|
||||
fn equal_elem<const D: usize>(lhs: NdArrayTensor<E, D>, rhs: E) -> NdArrayTensor<bool, D> {
|
||||
let array = lhs.array.mapv(|a| a == rhs).into_shared();
|
||||
|
||||
NdArrayTensor { array }
|
||||
|
@ -207,7 +207,7 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
rhs: NdArrayTensor<E, D>,
|
||||
) -> NdArrayTensor<bool, D> {
|
||||
let tensor = NdArrayBackend::<E>::sub(lhs, rhs);
|
||||
let zero = 0.to_elem();
|
||||
let zero = 0.elem();
|
||||
Self::greater_scalar(tensor, zero)
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
rhs: NdArrayTensor<E, D>,
|
||||
) -> NdArrayTensor<bool, D> {
|
||||
let tensor = NdArrayBackend::<E>::sub(lhs, rhs);
|
||||
let zero = 0.to_elem();
|
||||
let zero = 0.elem();
|
||||
Self::greater_equal_scalar(tensor, zero)
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
rhs: NdArrayTensor<E, D>,
|
||||
) -> NdArrayTensor<bool, D> {
|
||||
let tensor = NdArrayBackend::<E>::sub(lhs, rhs);
|
||||
let zero = 0.to_elem();
|
||||
let zero = 0.elem();
|
||||
Self::lower_scalar(tensor, zero)
|
||||
}
|
||||
|
||||
|
@ -255,7 +255,7 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
rhs: NdArrayTensor<E, D>,
|
||||
) -> NdArrayTensor<bool, D> {
|
||||
let tensor = NdArrayBackend::<E>::sub(lhs, rhs);
|
||||
let zero = 0.to_elem();
|
||||
let zero = 0.elem();
|
||||
Self::lower_equal_scalar(tensor, zero)
|
||||
}
|
||||
|
||||
|
@ -289,13 +289,13 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
}
|
||||
|
||||
fn to_full_precision<const D: usize>(tensor: &NdArrayTensor<E, D>) -> NdArrayTensor<f32, D> {
|
||||
let array = tensor.array.mapv(|a| a.to_elem()).into_shared();
|
||||
let array = tensor.array.mapv(|a| a.elem()).into_shared();
|
||||
|
||||
NdArrayTensor { array }
|
||||
}
|
||||
|
||||
fn from_full_precision<const D: usize>(tensor: NdArrayTensor<f32, D>) -> NdArrayTensor<E, D> {
|
||||
let array = tensor.array.mapv(|a| a.to_elem()).into_shared();
|
||||
let array = tensor.array.mapv(|a| a.elem()).into_shared();
|
||||
|
||||
NdArrayTensor { array }
|
||||
}
|
||||
|
@ -341,7 +341,7 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
fn cos<const D: usize>(tensor: NdArrayTensor<E, D>) -> NdArrayTensor<E, D> {
|
||||
let array = tensor
|
||||
.array
|
||||
.mapv_into(|a| cos(a.to_f64().unwrap()).to_elem())
|
||||
.mapv_into(|a| cos(a.to_f64().unwrap()).elem())
|
||||
.into_shared();
|
||||
|
||||
NdArrayTensor { array }
|
||||
|
@ -350,7 +350,7 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
fn sin<const D: usize>(tensor: NdArrayTensor<E, D>) -> NdArrayTensor<E, D> {
|
||||
let array = tensor
|
||||
.array
|
||||
.mapv_into(|a| sin(a.to_f64().unwrap()).to_elem())
|
||||
.mapv_into(|a| sin(a.to_f64().unwrap()).elem())
|
||||
.into_shared();
|
||||
|
||||
NdArrayTensor { array }
|
||||
|
@ -359,7 +359,7 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
fn tanh<const D: usize>(tensor: NdArrayTensor<E, D>) -> NdArrayTensor<E, D> {
|
||||
let array = tensor
|
||||
.array
|
||||
.mapv_into(|a| tanh(a.to_f64().unwrap()).to_elem())
|
||||
.mapv_into(|a| tanh(a.to_f64().unwrap()).elem())
|
||||
.into_shared();
|
||||
|
||||
NdArrayTensor { array }
|
||||
|
@ -368,7 +368,7 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
fn erf<const D: usize>(tensor: NdArrayTensor<E, D>) -> NdArrayTensor<E, D> {
|
||||
let array = tensor
|
||||
.array
|
||||
.mapv_into(|a| erf(a.to_f64().unwrap()).to_elem())
|
||||
.mapv_into(|a| erf(a.to_f64().unwrap()).elem())
|
||||
.into_shared();
|
||||
|
||||
NdArrayTensor { array }
|
||||
|
@ -379,11 +379,11 @@ impl<E: NdArrayElement> TensorOps<NdArrayBackend<E>> for NdArrayBackend<E> {
|
|||
}
|
||||
|
||||
fn relu<const D: usize>(tensor: NdArrayTensor<E, D>) -> NdArrayTensor<E, D> {
|
||||
let zero = 0.to_elem();
|
||||
let zero = 0.elem();
|
||||
let array = tensor
|
||||
.array
|
||||
.mapv_into(|elem| match elem < zero {
|
||||
true => 0.0.to_elem(),
|
||||
true => 0.0.elem(),
|
||||
false => elem,
|
||||
})
|
||||
.into_shared();
|
||||
|
@ -409,7 +409,7 @@ where
|
|||
|
||||
while end <= data.value.len() {
|
||||
let data_dim = &mut data.value[start..end];
|
||||
let mut sorted: Vec<f64> = data_dim.iter().map(|a| a.to_elem()).collect();
|
||||
let mut sorted: Vec<f64> = data_dim.iter().map(|a| a.elem()).collect();
|
||||
sorted.sort_by(&cmp);
|
||||
|
||||
let max = sorted[0];
|
||||
|
@ -417,7 +417,7 @@ where
|
|||
let data_dim = &mut data.value[start..end];
|
||||
let mut index: i64 = 0;
|
||||
for elem in data_dim {
|
||||
let as_float: f64 = elem.to_elem();
|
||||
let as_float: f64 = elem.elem();
|
||||
if as_float == max {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1,24 +1,7 @@
|
|||
use burn_tensor::Element;
|
||||
use half::f16;
|
||||
|
||||
pub trait IsInt {
|
||||
fn is_int(&self) -> bool;
|
||||
}
|
||||
pub trait TchElement: Element + tch::kind::Element + IsInt {}
|
||||
|
||||
macro_rules! make_element {
|
||||
(
|
||||
$ty:ident,
|
||||
$bool:expr
|
||||
|
||||
) => {
|
||||
impl IsInt for $ty {
|
||||
fn is_int(&self) -> bool {
|
||||
$bool
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
pub trait TchElement: Element + tch::kind::Element {}
|
||||
|
||||
impl TchElement for f64 {}
|
||||
impl TchElement for f32 {}
|
||||
|
@ -29,11 +12,3 @@ impl TchElement for i32 {}
|
|||
impl TchElement for i16 {}
|
||||
|
||||
impl TchElement for u8 {}
|
||||
|
||||
make_element!(f64, false);
|
||||
make_element!(f32, false);
|
||||
make_element!(f16, false);
|
||||
make_element!(i64, true);
|
||||
make_element!(i32, true);
|
||||
make_element!(i16, true);
|
||||
make_element!(u8, false);
|
||||
|
|
|
@ -110,7 +110,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
}
|
||||
|
||||
fn add_scalar<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<E, D> {
|
||||
let rhs: f64 = rhs.to_elem();
|
||||
let rhs: f64 = rhs.elem();
|
||||
|
||||
let tensor = lhs.unary_ops(
|
||||
|mut tensor| tensor.f_add_scalar_(rhs).unwrap(),
|
||||
|
@ -125,7 +125,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
}
|
||||
|
||||
fn sub_scalar<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<E, D> {
|
||||
let rhs: f64 = rhs.to_elem();
|
||||
let rhs: f64 = rhs.elem();
|
||||
let tensor = lhs.unary_ops(
|
||||
|mut tensor| tensor.f_sub_scalar_(rhs).unwrap(),
|
||||
|tensor| tensor.f_sub_scalar(rhs).unwrap(),
|
||||
|
@ -139,7 +139,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
}
|
||||
|
||||
fn mul_scalar<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<E, D> {
|
||||
let rhs: f64 = rhs.to_elem();
|
||||
let rhs: f64 = rhs.elem();
|
||||
let tensor = lhs.unary_ops(
|
||||
|mut tensor| tensor.f_mul_scalar_(rhs).unwrap(),
|
||||
|tensor| tensor.f_mul_scalar(rhs).unwrap(),
|
||||
|
@ -153,7 +153,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
}
|
||||
|
||||
fn div_scalar<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<E, D> {
|
||||
let rhs: f64 = rhs.to_elem();
|
||||
let rhs: f64 = rhs.elem();
|
||||
let tensor = lhs.unary_ops(
|
||||
|mut tensor| tensor.f_div_scalar_(rhs).unwrap(),
|
||||
|tensor| tensor.f_div_scalar(rhs).unwrap(),
|
||||
|
@ -168,7 +168,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
}
|
||||
|
||||
fn neg<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D> {
|
||||
Self::mul_scalar(tensor, (-1f32).to_elem::<E>())
|
||||
Self::mul_scalar(tensor, (-1f32).elem::<E>())
|
||||
}
|
||||
|
||||
fn swap_dims<const D: usize>(
|
||||
|
@ -210,7 +210,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
mask: TchTensor<bool, D>,
|
||||
value: E,
|
||||
) -> TchTensor<E, D> {
|
||||
let value: f64 = value.to_elem();
|
||||
let value: f64 = value.elem();
|
||||
let tensor = tensor.unary_ops(
|
||||
|mut tensor| tensor.f_masked_fill_(&mask.tensor, value).unwrap(),
|
||||
|tensor| tensor.f_masked_fill(&mask.tensor, value).unwrap(),
|
||||
|
@ -223,8 +223,8 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
TchOps::equal(lhs, rhs)
|
||||
}
|
||||
|
||||
fn equal_scalar<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<bool, D> {
|
||||
let rhs: f64 = rhs.to_elem();
|
||||
fn equal_elem<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<bool, D> {
|
||||
let rhs: f64 = rhs.elem();
|
||||
let tensor = lhs.unary_ops(
|
||||
|mut tensor| tensor.eq_(rhs).to_kind(tch::Kind::Bool),
|
||||
|tensor| tensor.eq(rhs),
|
||||
|
@ -245,7 +245,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
}
|
||||
|
||||
fn greater_scalar<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<bool, D> {
|
||||
let rhs: f64 = rhs.to_elem();
|
||||
let rhs: f64 = rhs.elem();
|
||||
let tensor = lhs.unary_ops(
|
||||
|mut tensor| tensor.greater_(rhs).to_kind(tch::Kind::Bool),
|
||||
|tensor| tensor.greater(rhs),
|
||||
|
@ -269,7 +269,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
}
|
||||
|
||||
fn greater_equal_scalar<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<bool, D> {
|
||||
let rhs: f64 = rhs.to_elem();
|
||||
let rhs: f64 = rhs.elem();
|
||||
let tensor = lhs.unary_ops(
|
||||
|mut tensor| tensor.greater_equal_(rhs).to_kind(tch::Kind::Bool),
|
||||
|tensor| tensor.greater_equal(rhs),
|
||||
|
@ -290,7 +290,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
}
|
||||
|
||||
fn lower_scalar<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<bool, D> {
|
||||
let rhs: f64 = rhs.to_elem();
|
||||
let rhs: f64 = rhs.elem();
|
||||
let tensor = lhs.unary_ops(
|
||||
|mut tensor| tensor.less_(rhs).to_kind(tch::Kind::Bool),
|
||||
|tensor| tensor.less(rhs),
|
||||
|
@ -314,7 +314,7 @@ impl<E: TchElement> TensorOps<TchBackend<E>> for TchBackend<E> {
|
|||
}
|
||||
|
||||
fn lower_equal_scalar<const D: usize>(lhs: TchTensor<E, D>, rhs: E) -> TchTensor<bool, D> {
|
||||
let rhs: f64 = rhs.to_elem();
|
||||
let rhs: f64 = rhs.elem();
|
||||
let tensor = lhs.unary_ops(
|
||||
|mut tensor| tensor.less_equal_(rhs).to_kind(tch::Kind::Bool),
|
||||
|tensor| tensor.less_equal(rhs),
|
||||
|
|
|
@ -150,9 +150,9 @@ where
|
|||
}
|
||||
|
||||
/// Applies element wise equal comparison and returns a boolean tensor.
|
||||
pub fn equal_scalar<E: Into<K::Elem>>(self, other: E) -> Tensor<B, D, Bool> {
|
||||
pub fn equal_elem<E: Into<K::Elem>>(self, other: E) -> Tensor<B, D, Bool> {
|
||||
let elem: K::Elem = other.into();
|
||||
K::equal_scalar::<D>(self.primitive, elem)
|
||||
K::equal_elem::<D>(self.primitive, elem)
|
||||
}
|
||||
|
||||
/// Concatenates all tensors into a new one along the given dimension.
|
||||
|
@ -206,13 +206,12 @@ pub trait BasicOps<B: Backend>: TensorKind<B> {
|
|||
dim: usize,
|
||||
times: usize,
|
||||
) -> Self::Primitive<D>;
|
||||
fn cat<const D: usize>(vectors: Vec<Self::Primitive<D>>, dim: usize) -> Self::Primitive<D>;
|
||||
fn equal<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
rhs: Self::Primitive<D>,
|
||||
) -> Tensor<B, D, Bool>;
|
||||
fn equal_scalar<const D: usize>(lhs: Self::Primitive<D>, rhs: Self::Elem)
|
||||
-> Tensor<B, D, Bool>;
|
||||
fn cat<const D: usize>(vectors: Vec<Self::Primitive<D>>, dim: usize) -> Self::Primitive<D>;
|
||||
fn equal_elem<const D: usize>(lhs: Self::Primitive<D>, rhs: Self::Elem) -> Tensor<B, D, Bool>;
|
||||
}
|
||||
|
||||
impl<B: Backend> BasicOps<B> for Float {
|
||||
|
@ -277,6 +276,10 @@ impl<B: Backend> BasicOps<B> for Float {
|
|||
B::repeat(tensor, dim, times)
|
||||
}
|
||||
|
||||
fn cat<const D: usize>(vectors: Vec<Self::Primitive<D>>, dim: usize) -> Self::Primitive<D> {
|
||||
B::cat(vectors, dim)
|
||||
}
|
||||
|
||||
fn equal<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
rhs: Self::Primitive<D>,
|
||||
|
@ -284,15 +287,8 @@ impl<B: Backend> BasicOps<B> for Float {
|
|||
Tensor::new(B::equal(lhs, rhs))
|
||||
}
|
||||
|
||||
fn equal_scalar<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
rhs: Self::Elem,
|
||||
) -> Tensor<B, D, Bool> {
|
||||
Tensor::new(B::equal_scalar(lhs, rhs))
|
||||
}
|
||||
|
||||
fn cat<const D: usize>(vectors: Vec<Self::Primitive<D>>, dim: usize) -> Self::Primitive<D> {
|
||||
B::cat(vectors, dim)
|
||||
fn equal_elem<const D: usize>(lhs: Self::Primitive<D>, rhs: Self::Elem) -> Tensor<B, D, Bool> {
|
||||
Tensor::new(B::equal_elem(lhs, rhs))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -365,10 +361,7 @@ impl<B: Backend> BasicOps<B> for Int {
|
|||
Tensor::new(B::int_equal(lhs, rhs))
|
||||
}
|
||||
|
||||
fn equal_scalar<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
rhs: Self::Elem,
|
||||
) -> Tensor<B, D, Bool> {
|
||||
fn equal_elem<const D: usize>(lhs: Self::Primitive<D>, rhs: Self::Elem) -> Tensor<B, D, Bool> {
|
||||
Tensor::new(B::int_equal_elem(lhs, rhs))
|
||||
}
|
||||
|
||||
|
@ -446,10 +439,7 @@ impl<B: Backend> BasicOps<B> for Bool {
|
|||
Tensor::new(B::bool_equal(lhs, rhs))
|
||||
}
|
||||
|
||||
fn equal_scalar<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
rhs: Self::Elem,
|
||||
) -> Tensor<B, D, Bool> {
|
||||
fn equal_elem<const D: usize>(lhs: Self::Primitive<D>, rhs: Self::Elem) -> Tensor<B, D, Bool> {
|
||||
Tensor::new(B::bool_equal_elem(lhs, rhs))
|
||||
}
|
||||
|
||||
|
|
|
@ -272,22 +272,22 @@ where
|
|||
|
||||
/// Applies element wise greater comparison and returns a boolean tensor.
|
||||
pub fn greater_scalar<E: ElementConversion>(self, other: E) -> Tensor<B, D, Bool> {
|
||||
Tensor::new(B::greater_scalar(self.primitive, other.to_elem()))
|
||||
Tensor::new(B::greater_scalar(self.primitive, other.elem()))
|
||||
}
|
||||
|
||||
/// Applies element wise greater-equal comparison and returns a boolean tensor.
|
||||
pub fn greater_equal_scalar<E: ElementConversion>(self, other: E) -> Tensor<B, D, Bool> {
|
||||
Tensor::new(B::greater_equal_scalar(self.primitive, other.to_elem()))
|
||||
Tensor::new(B::greater_equal_scalar(self.primitive, other.elem()))
|
||||
}
|
||||
|
||||
/// Applies element wise lower comparison and returns a boolean tensor.
|
||||
pub fn lower_scalar<E: ElementConversion>(self, other: E) -> Tensor<B, D, Bool> {
|
||||
Tensor::new(B::lower_scalar(self.primitive, other.to_elem()))
|
||||
Tensor::new(B::lower_scalar(self.primitive, other.elem()))
|
||||
}
|
||||
|
||||
/// Applies element wise lower-equal comparison and returns a boolean tensor.
|
||||
pub fn lower_equal_scalar<E: ElementConversion>(self, other: E) -> Tensor<B, D, Bool> {
|
||||
Tensor::new(B::lower_equal_scalar(self.primitive, other.to_elem()))
|
||||
Tensor::new(B::lower_equal_scalar(self.primitive, other.elem()))
|
||||
}
|
||||
|
||||
/// Create a random tensor of the given shape where each element is sampled from the given
|
||||
|
@ -299,11 +299,7 @@ where
|
|||
|
||||
/// Fill each element with the given value based on the given mask.
|
||||
pub fn mask_fill<E: ElementConversion>(self, mask: Tensor<B, D, Bool>, value: E) -> Self {
|
||||
Self::new(B::mask_fill(
|
||||
self.primitive,
|
||||
mask.primitive,
|
||||
value.to_elem(),
|
||||
))
|
||||
Self::new(B::mask_fill(self.primitive, mask.primitive, value.elem()))
|
||||
}
|
||||
|
||||
/// Returns a tensor with full precision based on the selected backend.
|
||||
|
|
|
@ -160,7 +160,7 @@ impl<B: Backend> Numeric<B> for Int {
|
|||
lhs: Self::Primitive<D>,
|
||||
rhs: E,
|
||||
) -> Self::Primitive<D> {
|
||||
B::int_add_scalar(lhs, rhs.to_elem())
|
||||
B::int_add_scalar(lhs, rhs.elem())
|
||||
}
|
||||
fn sub<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
|
@ -172,7 +172,7 @@ impl<B: Backend> Numeric<B> for Int {
|
|||
lhs: Self::Primitive<D>,
|
||||
rhs: E,
|
||||
) -> Self::Primitive<D> {
|
||||
B::int_sub_scalar(lhs, rhs.to_elem())
|
||||
B::int_sub_scalar(lhs, rhs.elem())
|
||||
}
|
||||
fn div<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
|
@ -184,7 +184,7 @@ impl<B: Backend> Numeric<B> for Int {
|
|||
lhs: Self::Primitive<D>,
|
||||
rhs: E,
|
||||
) -> Self::Primitive<D> {
|
||||
B::int_div_scalar(lhs, rhs.to_elem())
|
||||
B::int_div_scalar(lhs, rhs.elem())
|
||||
}
|
||||
fn mul<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
|
@ -196,7 +196,7 @@ impl<B: Backend> Numeric<B> for Int {
|
|||
lhs: Self::Primitive<D>,
|
||||
rhs: E,
|
||||
) -> Self::Primitive<D> {
|
||||
B::int_mul_scalar(lhs, rhs.to_elem())
|
||||
B::int_mul_scalar(lhs, rhs.elem())
|
||||
}
|
||||
fn neg<const D: usize>(tensor: Self::Primitive<D>) -> Self::Primitive<D> {
|
||||
B::int_neg(tensor)
|
||||
|
@ -232,7 +232,7 @@ impl<B: Backend> Numeric<B> for Float {
|
|||
lhs: Self::Primitive<D>,
|
||||
rhs: E,
|
||||
) -> Self::Primitive<D> {
|
||||
B::add_scalar(lhs, rhs.to_elem())
|
||||
B::add_scalar(lhs, rhs.elem())
|
||||
}
|
||||
fn sub<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
|
@ -244,7 +244,7 @@ impl<B: Backend> Numeric<B> for Float {
|
|||
lhs: Self::Primitive<D>,
|
||||
rhs: E,
|
||||
) -> Self::Primitive<D> {
|
||||
B::sub_scalar(lhs, rhs.to_elem())
|
||||
B::sub_scalar(lhs, rhs.elem())
|
||||
}
|
||||
fn div<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
|
@ -256,7 +256,7 @@ impl<B: Backend> Numeric<B> for Float {
|
|||
lhs: Self::Primitive<D>,
|
||||
rhs: E,
|
||||
) -> Self::Primitive<D> {
|
||||
B::div_scalar(lhs, rhs.to_elem())
|
||||
B::div_scalar(lhs, rhs.elem())
|
||||
}
|
||||
fn mul<const D: usize>(
|
||||
lhs: Self::Primitive<D>,
|
||||
|
@ -268,7 +268,7 @@ impl<B: Backend> Numeric<B> for Float {
|
|||
lhs: Self::Primitive<D>,
|
||||
rhs: E,
|
||||
) -> Self::Primitive<D> {
|
||||
B::mul_scalar(lhs, rhs.to_elem())
|
||||
B::mul_scalar(lhs, rhs.elem())
|
||||
}
|
||||
fn neg<const D: usize>(tensor: Self::Primitive<D>) -> Self::Primitive<D> {
|
||||
B::neg(tensor)
|
||||
|
|
|
@ -61,14 +61,12 @@ where
|
|||
DistributionSamplerKind::Uniform(distribution) => self.rng.sample(distribution),
|
||||
DistributionSamplerKind::Bernoulli(distribution) => {
|
||||
if self.rng.sample(distribution) {
|
||||
1.to_elem()
|
||||
1.elem()
|
||||
} else {
|
||||
0.to_elem()
|
||||
0.elem()
|
||||
}
|
||||
}
|
||||
DistributionSamplerKind::Normal(distribution) => {
|
||||
self.rng.sample(distribution).to_elem()
|
||||
}
|
||||
DistributionSamplerKind::Normal(distribution) => self.rng.sample(distribution).elem(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +112,7 @@ where
|
|||
|
||||
impl<const D: usize, P: Element> Data<P, D> {
|
||||
pub fn convert<E: Element>(self) -> Data<E, D> {
|
||||
let value: Vec<E> = self.value.into_iter().map(|a| a.to_elem()).collect();
|
||||
let value: Vec<E> = self.value.into_iter().map(|a| a.elem()).collect();
|
||||
|
||||
Data {
|
||||
value,
|
||||
|
@ -125,7 +123,7 @@ impl<const D: usize, P: Element> Data<P, D> {
|
|||
|
||||
impl<P: Element> DataSerialize<P> {
|
||||
pub fn convert<E: Element>(self) -> DataSerialize<E> {
|
||||
let value: Vec<E> = self.value.into_iter().map(|a| a.to_elem()).collect();
|
||||
let value: Vec<E> = self.value.into_iter().map(|a| a.elem()).collect();
|
||||
|
||||
DataSerialize {
|
||||
value,
|
||||
|
@ -136,11 +134,7 @@ impl<P: Element> DataSerialize<P> {
|
|||
|
||||
impl<const D: usize> Data<bool, D> {
|
||||
pub fn convert<E: Element>(self) -> Data<E, D> {
|
||||
let value: Vec<E> = self
|
||||
.value
|
||||
.into_iter()
|
||||
.map(|a| (a as i64).to_elem())
|
||||
.collect();
|
||||
let value: Vec<E> = self.value.into_iter().map(|a| (a as i64).elem()).collect();
|
||||
|
||||
Data {
|
||||
value,
|
||||
|
@ -170,7 +164,7 @@ where
|
|||
let mut data = Vec::with_capacity(num_elements);
|
||||
|
||||
for _ in 0..num_elements {
|
||||
data.push(0.to_elem());
|
||||
data.push(0.elem());
|
||||
}
|
||||
|
||||
Data::new(data, shape)
|
||||
|
@ -189,7 +183,7 @@ where
|
|||
let mut data = Vec::with_capacity(num_elements);
|
||||
|
||||
for _ in 0..num_elements {
|
||||
data.push(1.to_elem());
|
||||
data.push(1.elem());
|
||||
}
|
||||
|
||||
Data::new(data, shape)
|
||||
|
|
|
@ -8,21 +8,18 @@ pub trait Element:
|
|||
+ ElementRandom
|
||||
+ ElementConversion
|
||||
+ ElementPrecision
|
||||
+ ElementValue
|
||||
+ core::ops::Mul<Self, Output = Self>
|
||||
+ core::fmt::Debug
|
||||
+ Default
|
||||
+ Send
|
||||
+ Sync
|
||||
+ Copy
|
||||
+ core::cmp::PartialOrd<Self>
|
||||
+ 'static
|
||||
{
|
||||
}
|
||||
|
||||
pub trait ElementConversion {
|
||||
fn from_elem<E: ToPrimitive>(elem: E) -> Self;
|
||||
fn to_elem<E: Element>(&self) -> E;
|
||||
fn elem<E: Element>(self) -> E;
|
||||
}
|
||||
|
||||
pub trait ElementRandom {
|
||||
|
@ -31,14 +28,6 @@ pub trait ElementRandom {
|
|||
Self: Sized;
|
||||
}
|
||||
|
||||
pub trait ElementValue {
|
||||
fn inf() -> Self;
|
||||
fn inf_neg() -> Self;
|
||||
fn nan() -> Self;
|
||||
fn zero() -> Self;
|
||||
fn one() -> Self;
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Copy, Debug)]
|
||||
pub enum Precision {
|
||||
Double,
|
||||
|
@ -55,8 +44,6 @@ pub trait ElementPrecision {
|
|||
macro_rules! make_element {
|
||||
(
|
||||
ty $type:ident $precision:expr,
|
||||
zero $zero:expr,
|
||||
one $one:expr,
|
||||
convert $convert:expr,
|
||||
random $random:expr
|
||||
|
||||
|
@ -67,26 +54,8 @@ macro_rules! make_element {
|
|||
fn from_elem<E: ToPrimitive>(elem: E) -> Self {
|
||||
$convert(&elem)
|
||||
}
|
||||
fn to_elem<E: Element>(&self) -> E {
|
||||
E::from_elem(*self)
|
||||
}
|
||||
}
|
||||
|
||||
impl ElementValue for $type {
|
||||
fn inf() -> Self {
|
||||
Self::from_elem(f64::INFINITY)
|
||||
}
|
||||
fn inf_neg() -> Self {
|
||||
Self::from_elem(core::ops::Neg::neg(f64::INFINITY))
|
||||
}
|
||||
fn nan() -> Self {
|
||||
Self::from_elem(f64::NAN)
|
||||
}
|
||||
fn zero() -> Self {
|
||||
$zero
|
||||
}
|
||||
fn one() -> Self {
|
||||
$one
|
||||
fn elem<E: Element>(self) -> E {
|
||||
E::from_elem(self)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,78 +70,53 @@ macro_rules! make_element {
|
|||
$random(distribution, rng)
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
(
|
||||
float $float:ident $precision:expr,
|
||||
convert $convert:expr,
|
||||
random $random:expr
|
||||
) => {
|
||||
make_element!(
|
||||
ty $float $precision,
|
||||
zero 0.0,
|
||||
one 1.0,
|
||||
convert $convert,
|
||||
random $random
|
||||
);
|
||||
};
|
||||
(
|
||||
int $int:ident $precision:expr,
|
||||
convert $convert:expr,
|
||||
random $random:expr
|
||||
) => {
|
||||
make_element!(
|
||||
ty $int $precision,
|
||||
zero 0,
|
||||
one 1,
|
||||
convert $convert,
|
||||
random $random
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
make_element!(
|
||||
float f64 Precision::Double,
|
||||
ty f64 Precision::Double,
|
||||
convert |elem: &dyn ToPrimitive| elem.to_f64().unwrap(),
|
||||
random |distribution: Distribution<f64>, rng: &mut R| distribution.sampler(rng).sample()
|
||||
);
|
||||
|
||||
make_element!(
|
||||
float f32 Precision::Full,
|
||||
ty f32 Precision::Full,
|
||||
convert |elem: &dyn ToPrimitive| elem.to_f32().unwrap(),
|
||||
random |distribution: Distribution<f32>, rng: &mut R| distribution.sampler(rng).sample()
|
||||
);
|
||||
|
||||
make_element!(
|
||||
int i64 Precision::Double,
|
||||
ty i64 Precision::Double,
|
||||
convert |elem: &dyn ToPrimitive| elem.to_i64().unwrap(),
|
||||
random |distribution: Distribution<i64>, rng: &mut R| distribution.sampler(rng).sample()
|
||||
);
|
||||
|
||||
make_element!(
|
||||
int i32 Precision::Full,
|
||||
ty i32 Precision::Full,
|
||||
convert |elem: &dyn ToPrimitive| elem.to_i32().unwrap(),
|
||||
random |distribution: Distribution<i32>, rng: &mut R| distribution.sampler(rng).sample()
|
||||
);
|
||||
|
||||
make_element!(
|
||||
int i16 Precision::Half,
|
||||
ty i16 Precision::Half,
|
||||
convert |elem: &dyn ToPrimitive| elem.to_i16().unwrap(),
|
||||
random |distribution: Distribution<i16>, rng: &mut R| distribution.sampler(rng).sample()
|
||||
);
|
||||
|
||||
make_element!(
|
||||
int i8 Precision::Other,
|
||||
ty i8 Precision::Other,
|
||||
convert |elem: &dyn ToPrimitive| elem.to_i8().unwrap(),
|
||||
random |distribution: Distribution<i8>, rng: &mut R| distribution.sampler(rng).sample()
|
||||
);
|
||||
|
||||
make_element!(
|
||||
int u8 Precision::Other,
|
||||
ty u8 Precision::Other,
|
||||
convert |elem: &dyn ToPrimitive| elem.to_u8().unwrap(),
|
||||
random |distribution: Distribution<u8>, rng: &mut R| distribution.sampler(rng).sample()
|
||||
);
|
||||
|
||||
make_element!(
|
||||
ty f16 Precision::Half,
|
||||
zero <f16 as num_traits::Zero>::zero(),
|
||||
one <f16 as num_traits::One>::one(),
|
||||
convert |elem: &dyn ToPrimitive| f16::from_f32(elem.to_f32().unwrap()),
|
||||
random |distribution: Distribution<f16>, rng: &mut R| {
|
||||
let distribution: Distribution<f32> = distribution.convert();
|
||||
|
|
|
@ -72,7 +72,7 @@ pub(crate) fn conv1d_backward<B: Backend>(
|
|||
weight_grad,
|
||||
bias.map(|b| {
|
||||
let elem = batch_size * length_out;
|
||||
let elem = (elem as i32).to_elem();
|
||||
let elem = (elem as i32).elem();
|
||||
|
||||
let b = B::zeros(B::shape(&b), &B::device(&b));
|
||||
|
||||
|
@ -127,7 +127,7 @@ pub(crate) fn conv2d_backward<B: Backend>(
|
|||
weight_grad,
|
||||
bias.map(|b| {
|
||||
let elem = batch_size * width_out * height_out;
|
||||
let elem = (elem as i32).to_elem();
|
||||
let elem = (elem as i32).elem();
|
||||
|
||||
let b = B::zeros(B::shape(&b), &B::device(&b));
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ pub trait TensorOps<B: Backend> {
|
|||
let shape = Shape::new([range.end - range.start]);
|
||||
let value = range
|
||||
.into_iter()
|
||||
.map(|i| (i as i64).to_elem())
|
||||
.map(|i| (i as i64).elem())
|
||||
.collect::<Vec<B::IntElem>>();
|
||||
let data = Data::new(value, shape);
|
||||
B::int_from_data(data, device)
|
||||
|
@ -132,7 +132,7 @@ pub trait TensorOps<B: Backend> {
|
|||
lhs: B::TensorPrimitive<D>,
|
||||
rhs: B::TensorPrimitive<D>,
|
||||
) -> B::BoolTensorPrimitive<D>;
|
||||
fn equal_scalar<const D: usize>(
|
||||
fn equal_elem<const D: usize>(
|
||||
lhs: B::TensorPrimitive<D>,
|
||||
rhs: B::FloatElem,
|
||||
) -> B::BoolTensorPrimitive<D>;
|
||||
|
|
|
@ -31,7 +31,7 @@ impl<B: Backend> Batcher<MNISTItem, MNISTBatch<B>> for MNISTBatcher<B> {
|
|||
|
||||
let targets = items
|
||||
.iter()
|
||||
.map(|item| Tensor::<B, 1, Int>::from_data(Data::from([(item.label as i64).to_elem()])))
|
||||
.map(|item| Tensor::<B, 1, Int>::from_data(Data::from([(item.label as i64).elem()])))
|
||||
.collect();
|
||||
|
||||
let images = Tensor::cat(images, 0).to_device(&self.device);
|
||||
|
|
|
@ -29,9 +29,7 @@ impl<B: Backend> Batcher<TextClassificationItem, TextClassificationBatch<B>>
|
|||
|
||||
for item in items {
|
||||
tokens_list.push(self.tokenizer.encode(&item.text));
|
||||
labels_list.push(Tensor::from_data(Data::from([
|
||||
(item.label as i64).to_elem()
|
||||
])));
|
||||
labels_list.push(Tensor::from_data(Data::from([(item.label as i64).elem()])));
|
||||
}
|
||||
|
||||
let mask = generate_padding_mask(
|
||||
|
|
Loading…
Reference in New Issue