Newer
Older
NN / NeuralNetwork.py
@lukas lukas on 3 Dec 2021 3 KB initial commit
import numpy as np
import random

class NeuralNetwork:
    def __init__(self, layout, activation):
        self.activation = activation
        self.layout = layout
        self.weights = [[[random.random()*2-1 for connection in range(layout[layer-1])] for neuron in range(layout[layer])] for layer in range(1, len(layout))]
        self.biases =  [[ random.random()*2-1                                           for neuron in range(layout[layer])] for layer in range(1, len(layout))]

    def toString(self):
        return "weights:\n" + str(self.weights) + "\nbiases:\n" + str(self.biases)

    def getActivations(self, inputs):
        lastLayer = inputs
        result = []
        for layer in range(len(self.weights)):
            lastLayer = [ self.activation(self.biases[layer][neuron]+sum(
                            [ self.weights[layer][neuron][connection]*lastLayer[connection] for connection in range(len(self.weights[layer][0])) ] ) )
                        for neuron in range(len(self.weights[layer])) ]
            result.append(lastLayer)
        return result

    def evaluate(self, inputs):
        return self.getActivations(inputs)[-1]
    
    def fit(self, data, trainingSpeed):
        weightAdjust=[[[0 for connection in range(len(self.weights[layer][neuron]))] for neuron in range(len(self.weights[layer]))] for layer in range(len(self.weights))]
        biasAdjust =  [[0                                                            for neuron in range(len(self.weights[layer]))] for layer in range(len(self.weights))]
        error = 0
        for dataEntry in data:
            activations = [ dataEntry[0] ] + self.getActivations(dataEntry[0])
            optimum = dataEntry[1]
            adjust = [ optimum[i] - activations[-1][i] for i in range(len(optimum)) ]
            error += sum([ x**2 for x in adjust ])
            print(activations, adjust)
            for layerNr in range(len(self.weights)): # backpropagation
                layer = len(self.weights) - layerNr - 1
                newAdjust = [0] * len(self.weights[layer][0])
                for neuron in range(len(self.weights[layer])):
                    for connection in range(len(self.weights[layer][neuron])):
                        weightAdjust[layer][neuron][connection] += adjust[neuron] * activations[layer][connection]
                        newAdjust[connection] += activations[layer][connection] * adjust[neuron]
                    biasAdjust[layer][neuron] += adjust[neuron]
                adjust = newAdjust
        maxAdjust = max(
                max(max(max(weightAdjust))),
                max(max(biasAdjust)))
        self.weights = [[[self.weights[layer][neuron][connection] + weightAdjust[layer][neuron][connection] / maxAdjust * trainingSpeed \
                for connection in range(len(self.weights[layer][neuron]))] for neuron in range(len(self.weights[layer]))] for layer in range(len(self.weights))]
        self.biases =  [[ self.biases[layer][neuron]              + biasAdjust[layer][neuron]               / maxAdjust * trainingSpeed \
                                                                           for neuron in range(len(self.biases[layer]))] for layer in range(len(self.biases))]
        return error