[lab4] Simplified to procedures on f64

This commit is contained in:
Andrew Golovashevich 2026-02-08 19:30:37 +03:00
parent 491baf2bfb
commit 91fa4cefe2
6 changed files with 49 additions and 181 deletions

17
lab4/src/algo/compute.rs Normal file
View File

@ -0,0 +1,17 @@
fn compute_potential<const Is: usize, const Os: usize>(
weights: &[[f64; Is]; Os],
input_data: &[f64; Is],
potential_data: &mut [f64; Is],
output_data: &mut [f64; Is],
f: impl Fn(f64) -> f64
) {
for (i, n) in weights.iter().enumerate() {
let P = input_data
.iter()
.zip(n)
.map(|(x, w)| x.apply_weight(*w))
.sum();
potential_data[i] = P;
output_data[i] = f(P);
};
}

30
lab4/src/algo/fix.rs Normal file
View File

@ -0,0 +1,30 @@
fn calc_error<const Cs: usize, const Ns: usize>(
next_errors: &[f64; Ns],
weights: &[[f64; Cs]; Ns],
current_errors: &mut [f64; Cs]
) {
for i in 0..Cs {
current_errors[i] = weights
.iter()
.enumerate()
.map(|(j, ww)| ww[i] * next_errors[j])
.sum();
}
}
fn apply_error<const Cs: usize, const Ns: usize>(
n: f64,
errors: &[f64; Ns],
weights: &mut [[f64; Cs]; Ns],
current_potentials: &[f64; Cs],
next_potentials: &[f64; Cs],
f: impl Fn(f64) -> f64,
f1: impl Fn(f64) -> f64
) {
for i in 0..Cs {
for j in 0..Ns {
let dw = n * errors[j] * f1(next_potentials[j]) * f(current_potentials[i]);
weights[j][i] += dw;
}
}
}

View File

@ -1,9 +0,0 @@
pub trait Layer {
type InputType;
type OutputType;
fn compute(&self, input_data: &[Self::InputType], output_data: &mut [Self::OutputType]);
fn input_size(&self) -> usize;
fn output_size(&self) -> usize;
}

View File

@ -1,102 +0,0 @@
use crate::algo::layer::Layer;
use crate::algo::layers_union::LayersUnion;
use std::iter::Sum;
use std::marker::PhantomData;
use std::ops::Add;
struct Neuron<const PrevLayerSize: usize> {
input_weights: [f64; PrevLayerSize],
}
pub trait ApplyWeight<Output> {
fn apply_weight(&self, w: f64) -> Output;
}
impl ApplyWeight<f64> for f64 {
fn apply_weight(&self, w: f64) -> f64 {
return *self * w;
}
}
pub struct LayerImpl<
const PrevLayerSize: usize,
const CurrentLayerSize: usize,
InputType: ApplyWeight<WeightedType>,
WeightedType,
ActivationType: Sum<WeightedType>,
ActivationFunction: Fn(ActivationType) -> OutputType,
OutputType,
> {
neurons: [Neuron<PrevLayerSize>; CurrentLayerSize],
activation_function: ActivationFunction,
__phantom: PhantomData<(InputType, WeightedType, ActivationType, OutputType)>,
}
impl<
const PrevLayerSize: usize,
const CurrentLayerSize: usize,
InputType: ApplyWeight<WeightedType>,
WeightedType,
ActivationType: Sum<WeightedType>,
ActivationFunction: Fn(ActivationType) -> OutputType,
OutputType,
> Layer
for LayerImpl<
PrevLayerSize,
CurrentLayerSize,
InputType,
WeightedType,
ActivationType,
ActivationFunction,
OutputType,
>
{
type InputType = InputType;
type OutputType = OutputType;
fn compute(&self, input_data: &[InputType], output_data: &mut [OutputType]) {
for (i, n) in self.neurons.iter().enumerate() {
let P = input_data
.iter()
.zip(n.input_weights)
.map(|(x, w)| x.apply_weight(w))
.sum();
output_data[i] = (self.activation_function)(P);
}
}
fn input_size(&self) -> usize {
return PrevLayerSize;
}
fn output_size(&self) -> usize {
return CurrentLayerSize;
}
}
impl<
const PrevPrevLayerSize: usize,
const PrevLayerSize: usize,
PrevLayerInputType: ApplyWeight<PrevLayerWeightedType>,
PrevLayerWeightedType,
PrevLayerActivationType: Sum<PrevLayerWeightedType>,
PrevLayerActivationFunction: Fn(PrevLayerActivationType) -> PrevLayerOutputType,
PrevLayerOutputType,
CurrentLayer: Layer<InputType = PrevLayerOutputType>,
> Add<CurrentLayer>
for LayerImpl<
PrevPrevLayerSize,
PrevLayerSize,
PrevLayerInputType,
PrevLayerWeightedType,
PrevLayerActivationType,
PrevLayerActivationFunction,
PrevLayerOutputType,
>
{
type Output = LayersUnion<Self, CurrentLayer>;
fn add(self, rhs: CurrentLayer) -> Self::Output {
return LayersUnion::join(self, rhs);
}
}

View File

@ -1,67 +0,0 @@
use crate::algo::layer::Layer;
use std::ops::Add;
pub struct LayersUnion<
PrevLayer: Layer,
CurrentLayer: Layer<InputType = PrevLayer::OutputType>,
> {
prev_layer: PrevLayer,
current_layer: CurrentLayer,
}
impl<PrevLayer: Layer, CurrentLayer: Layer<InputType = PrevLayer::OutputType>>
LayersUnion<PrevLayer, CurrentLayer>
{
pub fn join(l1: PrevLayer, l2: CurrentLayer) -> Self {
assert_eq!(l1.output_size(), l2.input_size());
return Self {
prev_layer: l1,
current_layer: l2,
};
}
}
impl<PrevLayer: Layer, CurrentLayer: Layer<InputType = PrevLayer::OutputType>> Layer
for LayersUnion<PrevLayer, CurrentLayer>
{
type InputType = PrevLayer::InputType;
type OutputType = CurrentLayer::OutputType;
fn compute(&self, input_data: &[Self::InputType], output_data: &mut [Self::OutputType]) {
let mut intermediate_data_s =
vec![0u8; self.prev_layer.output_size() * size_of::<PrevLayer::OutputType>()]
.into_boxed_slice();
let intermediate_data;
unsafe {
intermediate_data = std::slice::from_raw_parts_mut(
intermediate_data_s.as_mut_ptr().cast::<PrevLayer::OutputType>(),
self.prev_layer.output_size(),
)
}
self.prev_layer.compute(input_data, intermediate_data);
self.current_layer.compute(intermediate_data, output_data);
}
fn input_size(&self) -> usize {
return self.prev_layer.input_size();
}
fn output_size(&self) -> usize {
return self.current_layer.output_size();
}
}
impl<
PrevLayer: Layer,
CurrentLayer: Layer<InputType = PrevLayer::OutputType>,
NextLayer: Layer<InputType = CurrentLayer::OutputType>,
> Add<NextLayer>
for LayersUnion<PrevLayer, CurrentLayer>
{
type Output = LayersUnion<Self, NextLayer>;
fn add(self, rhs: NextLayer) -> Self::Output {
return LayersUnion::join(self, rhs);
}
}

View File

@ -1,3 +1,2 @@
mod layer;
mod layers_union;
mod layer_impl;
mod compute;
mod fix;