diff --git a/lab4/src/algo/compute.rs b/lab4/src/algo/compute.rs new file mode 100644 index 0000000..a66abc9 --- /dev/null +++ b/lab4/src/algo/compute.rs @@ -0,0 +1,17 @@ +fn compute_potential( + weights: &[[f64; Is]; Os], + input_data: &[f64; Is], + potential_data: &mut [f64; Is], + output_data: &mut [f64; Is], + f: impl Fn(f64) -> f64 +) { + for (i, n) in weights.iter().enumerate() { + let P = input_data + .iter() + .zip(n) + .map(|(x, w)| x.apply_weight(*w)) + .sum(); + potential_data[i] = P; + output_data[i] = f(P); + }; +} diff --git a/lab4/src/algo/fix.rs b/lab4/src/algo/fix.rs new file mode 100644 index 0000000..40465f3 --- /dev/null +++ b/lab4/src/algo/fix.rs @@ -0,0 +1,30 @@ +fn calc_error( + next_errors: &[f64; Ns], + weights: &[[f64; Cs]; Ns], + current_errors: &mut [f64; Cs] +) { + for i in 0..Cs { + current_errors[i] = weights + .iter() + .enumerate() + .map(|(j, ww)| ww[i] * next_errors[j]) + .sum(); + } +} + +fn apply_error( + n: f64, + errors: &[f64; Ns], + weights: &mut [[f64; Cs]; Ns], + current_potentials: &[f64; Cs], + next_potentials: &[f64; Cs], + f: impl Fn(f64) -> f64, + f1: impl Fn(f64) -> f64 +) { + for i in 0..Cs { + for j in 0..Ns { + let dw = n * errors[j] * f1(next_potentials[j]) * f(current_potentials[i]); + weights[j][i] += dw; + } + } +} diff --git a/lab4/src/algo/layer.rs b/lab4/src/algo/layer.rs deleted file mode 100644 index 8c961cc..0000000 --- a/lab4/src/algo/layer.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub trait Layer { - type InputType; - type OutputType; - - fn compute(&self, input_data: &[Self::InputType], output_data: &mut [Self::OutputType]); - - fn input_size(&self) -> usize; - fn output_size(&self) -> usize; -} diff --git a/lab4/src/algo/layer_impl.rs b/lab4/src/algo/layer_impl.rs deleted file mode 100644 index 546ba32..0000000 --- a/lab4/src/algo/layer_impl.rs +++ /dev/null @@ -1,102 +0,0 @@ -use crate::algo::layer::Layer; -use crate::algo::layers_union::LayersUnion; -use std::iter::Sum; -use std::marker::PhantomData; -use std::ops::Add; - -struct Neuron { - input_weights: [f64; PrevLayerSize], -} - -pub trait ApplyWeight { - fn apply_weight(&self, w: f64) -> Output; -} - -impl ApplyWeight for f64 { - fn apply_weight(&self, w: f64) -> f64 { - return *self * w; - } -} - -pub struct LayerImpl< - const PrevLayerSize: usize, - const CurrentLayerSize: usize, - InputType: ApplyWeight, - WeightedType, - ActivationType: Sum, - ActivationFunction: Fn(ActivationType) -> OutputType, - OutputType, -> { - neurons: [Neuron; CurrentLayerSize], - activation_function: ActivationFunction, - __phantom: PhantomData<(InputType, WeightedType, ActivationType, OutputType)>, -} - -impl< - const PrevLayerSize: usize, - const CurrentLayerSize: usize, - InputType: ApplyWeight, - WeightedType, - ActivationType: Sum, - ActivationFunction: Fn(ActivationType) -> OutputType, - OutputType, -> Layer - for LayerImpl< - PrevLayerSize, - CurrentLayerSize, - InputType, - WeightedType, - ActivationType, - ActivationFunction, - OutputType, - > -{ - type InputType = InputType; - type OutputType = OutputType; - - fn compute(&self, input_data: &[InputType], output_data: &mut [OutputType]) { - for (i, n) in self.neurons.iter().enumerate() { - let P = input_data - .iter() - .zip(n.input_weights) - .map(|(x, w)| x.apply_weight(w)) - .sum(); - output_data[i] = (self.activation_function)(P); - } - } - - fn input_size(&self) -> usize { - return PrevLayerSize; - } - - fn output_size(&self) -> usize { - return CurrentLayerSize; - } -} - -impl< - const PrevPrevLayerSize: usize, - const PrevLayerSize: usize, - PrevLayerInputType: ApplyWeight, - PrevLayerWeightedType, - PrevLayerActivationType: Sum, - PrevLayerActivationFunction: Fn(PrevLayerActivationType) -> PrevLayerOutputType, - PrevLayerOutputType, - CurrentLayer: Layer, -> Add - for LayerImpl< - PrevPrevLayerSize, - PrevLayerSize, - PrevLayerInputType, - PrevLayerWeightedType, - PrevLayerActivationType, - PrevLayerActivationFunction, - PrevLayerOutputType, - > -{ - type Output = LayersUnion; - - fn add(self, rhs: CurrentLayer) -> Self::Output { - return LayersUnion::join(self, rhs); - } -} diff --git a/lab4/src/algo/layers_union.rs b/lab4/src/algo/layers_union.rs deleted file mode 100644 index b619f6a..0000000 --- a/lab4/src/algo/layers_union.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crate::algo::layer::Layer; -use std::ops::Add; - -pub struct LayersUnion< - PrevLayer: Layer, - CurrentLayer: Layer, -> { - prev_layer: PrevLayer, - current_layer: CurrentLayer, -} - -impl> - LayersUnion -{ - pub fn join(l1: PrevLayer, l2: CurrentLayer) -> Self { - assert_eq!(l1.output_size(), l2.input_size()); - return Self { - prev_layer: l1, - current_layer: l2, - }; - } -} - -impl> Layer - for LayersUnion -{ - type InputType = PrevLayer::InputType; - type OutputType = CurrentLayer::OutputType; - - fn compute(&self, input_data: &[Self::InputType], output_data: &mut [Self::OutputType]) { - let mut intermediate_data_s = - vec![0u8; self.prev_layer.output_size() * size_of::()] - .into_boxed_slice(); - - let intermediate_data; - unsafe { - intermediate_data = std::slice::from_raw_parts_mut( - intermediate_data_s.as_mut_ptr().cast::(), - self.prev_layer.output_size(), - ) - } - self.prev_layer.compute(input_data, intermediate_data); - self.current_layer.compute(intermediate_data, output_data); - } - - fn input_size(&self) -> usize { - return self.prev_layer.input_size(); - } - - fn output_size(&self) -> usize { - return self.current_layer.output_size(); - } -} - -impl< - PrevLayer: Layer, - CurrentLayer: Layer, - NextLayer: Layer, -> Add - for LayersUnion -{ - type Output = LayersUnion; - - fn add(self, rhs: NextLayer) -> Self::Output { - return LayersUnion::join(self, rhs); - } -} diff --git a/lab4/src/algo/mod.rs b/lab4/src/algo/mod.rs index 5f0c50a..0a290fc 100644 --- a/lab4/src/algo/mod.rs +++ b/lab4/src/algo/mod.rs @@ -1,3 +1,2 @@ -mod layer; -mod layers_union; -mod layer_impl; +mod compute; +mod fix;