def test_should_add_itself_to_other_10x10_matrix(self): matrix = Matrix(10, 10, 1) other_matrix = Matrix(10, 10, 1336) matrix.add(other_matrix) self.assertEquals(1337, matrix.get_item(9, 9))
class OutputLayer(Layer): def __init__(self, size): Layer.__init__(self, size) self.previous_layer = None self.W = None self.B = None self.E = None self.activation_function = lambda x: 1 / (1 + math.exp(-x)) def initialize(self, previous_layer): self.previous_layer = previous_layer self.W = Matrix(self.size, previous_layer.size).randomize(-1, 1) self.B = Matrix(self.size, 1).randomize(-1, 1) def feed_forward(self): self.values = ((self.W * self.previous_layer.values) + self.B).map_function(self.activation_function) def calculate_errors(self, target_arr): if len(target_arr) == self.size: self.E = Matrix.from_list(target_arr, self.size, 1) - self.values else: raise ValueError("Incorrect target size.") def adjust_parameters(self, learning_rate): gradients = self.values.map_function( lambda x: x * (1 - x)).get_hadamard_product( self.E).get_scalar_multiple(learning_rate) self.W.add(gradients * self.previous_layer.values.get_transpose()) self.B.add(gradients)
def test_should_add_itself_to_other_1x1_matrix(self): matrix = Matrix(1, 1, 1) other_matrix = Matrix(1, 1, 1336) matrix.add(other_matrix) self.assertEquals(1337, matrix.get_item(0, 0))
def test_add_scalar_to_matrix(self): m = Matrix(3, 3) m.data[0] = [1, 2, 3] m.data[1] = [4, 5, 6] m.data[2] = [7, 8, 9] m.add(1) self.assertEqual(m.rows, 3) self.assertEqual(m.cols, 3) self.assertEqual(m.data, [[2, 3, 4], [5, 6, 7], [8, 9, 10]])
def feed_forward(self, input_list): inputs = Matrix.fromList(input_list) print(inputs) hidden = Matrix.multiply(self.weights_ih, inputs) Matrix.add(hidden, self.bias_h) hidden.map_matrix(sigmoid) output = Matrix.multiply(self.weights_ho, hidden) Matrix.add(output, self.bias_o) output.map_matrix(sigmoid) return output
def test_matrix_add(): from matrix import Matrix m_1 = Matrix((4, 3)) m_1[0][1] = 2 m_2 = Matrix((2, 2), value=[[1, 2], [3, 4]]) with pytest.raises(Exception) as _: _ = Matrix.add(m_1, m_2) m_3 = Matrix((4, 3), value=[[1, 2, 3], [0, 2, 1], [4, 5, -1], [1, 2, 0]]) m_add = Matrix.add(m_1, m_3) assert m_add[0][0] == 1 assert m_add[0][1] == 4 assert m_add[0][2] == 3
def test_add_matrix_to_matrix(self): m = Matrix(2, 2) m.data[0] = [1, 2] m.data[1] = [3, 4] n = Matrix(2, 2) n.data[0] = [10, 11] n.data[1] = [12, 13] m.add(n) self.assertEqual(m.rows, 2) self.assertEqual(m.cols, 2) self.assertEqual(m.data, [[11, 13], [15, 17]])
def parse(string): if string in name_space: value = name_space.get(string) print value else: mat = Matrix() l = string.replace(' ', '') if l.find('=') != -1: if l[2:] not in name_space and l.find('zeros') != -1: zer = mat.matrix_creation_using_zeros(l[2:]) name_space[str(l[0])] = zer elif l[3] not in name_space: matr = mat.matrix_creation(l[2:]) name_space[str(l[0])] = matr elif l[3] and l[5] in name_space: if (l.find(',') != -1) or (l.find(';') != -1): conc = mat.concat(l[2:]) name_space[str(l[0])] = conc if l.find('+') != -1: add = mat.add(l) print add if l.find("\'") != -1: tr = mat.transposer(l) print tr if l.find('inv') != -1: inv = mat.inverser(l[-2]) print inv
def test_add_matrix(): passed = True failed_tests = [] options = [[[1, 2, 3], [2, 3, 4], [3, 4, 5]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]] for o_index, option in enumerate(options): test_matrix_a = Matrix.build_from_rows([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) test_matrix_b = Matrix.build_from_rows(option) matrices_to_add = [test_matrix_a, test_matrix_b] correct_values = [] for i in range(test_matrix_a.dimensions[0]): correct_values.append([]) for j in range(test_matrix_a.dimensions[1]): correct_values[i].append(0) for k in range(len(matrices_to_add)): correct_values[i][j] += matrices_to_add[k].values[i][j] result_matrix = Matrix.add(matrices_to_add) for i in range(result_matrix.dimensions[0]): for j in range(result_matrix.dimensions[1]): if result_matrix.values[i][j] != correct_values[i][j]: passed = False failed_tests.append(o_index) if not passed: print( 'TEST FAILED: Add matrices failed tests: {}.'.format(failed_tests)) else: print('TEST PASSED: Add matrices test passed.')
def test_add_matrix_initial(): passed = True failed_tests = [] options = [ [[1, 2], [1, 2]], [[1, 2, 3], [1, 2, 3]], ] for o_index, option in enumerate(options): test_matrix_a = Matrix.build_from_rows([[1, 2, 3], [1, 2, 3]]) test_matrix_b = Matrix.build_from_rows(option) result_matrix = Matrix.add( [test_matrix_a, test_matrix_a, test_matrix_a, test_matrix_b]) if o_index != len(options) - 1: if result_matrix.valid: passed = False failed_tests.append(o_index) else: if not result_matrix.valid: passed = False failed_tests.append(o_index) if not passed: print('TEST FAILED: Add matrices initial failed tests: {}.'.format( failed_tests)) else: print('TEST PASSED: Add matrices initial test passed.')
def test_add_None(self): mat_a = Matrix(2,2) mat_b = Matrix(3,2) mat_a.map(lambda x: x+1) mat_b.map(lambda x: x+2) mat_ret_none = mat_a.add(mat_b) self.assertNotIsInstance(type(mat_ret_none), Matrix)
def test_add(self): a = [[1,2,3],[3,5,6],[6,1,1]] b = [[4,2,5],[1,7,5],[3,9,2]] A = Matrix([3,3], elems = a) B = Matrix([3,3], elems = b) correct = np.add(a,b) self.assertEqual(A.add(B).elems[1][1] , correct[1][1])
def test_add_scalar(self): mat = Matrix(2,2) mat.map(lambda x: x+1) mat_ret = mat.add(2) self.assertEqual(mat_ret.rows, 2) self.assertEqual(mat_ret.cols, 2) self.assertEqual(mat_ret.data, [[3,3], [3,3]])
def main(): A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) # B = Matrix([1,2,3]) # C = Matrix([1, [2, 3]]) B = Matrix([[10, 2, 3], [4, 50, 6]]) print(A) print(B) print(A.add(B))
def test(): matrix_1 = Matrix(4,5,6,7) matrix_2 = Matrix(2,2,2,1) matrix_3 = matrix_2.add(matrix_1) matrix_4 = matrix_3.prod(matrix_2) print(matrix_3) print(matrix_4) matrix_1[0][1] = 23 print(matrix_1)
def test_add_elementwise(self): mat_a = Matrix(2,2) mat_b = Matrix(2,2) mat_a.map(lambda x: x+1) mat_b.map(lambda x: x+2) mat_ret = mat_a.add(mat_b) self.assertEqual(mat_ret.rows, 2) self.assertEqual(mat_ret.cols, 2) self.assertEqual(mat_ret.data, [[3,3], [3,3]])
class HiddenLayer(Layer): def __init__(self, size): Layer.__init__(self, size) self.previous_layer = None self.next_layer = None self.W = None self.B = None self.E = None self.activation_function = lambda x: 1 / (1 + math.exp(-x)) def initialize(self, previous_layer, next_layer): self.previous_layer = previous_layer self.next_layer = next_layer self.W = Matrix(self.size, previous_layer.size).randomize(-1, 1) self.B = Matrix(self.size, 1).randomize(-1, 1) def feed_forward(self): self.values = (self.W * self.previous_layer.values + self.B).map_function(self.activation_function) def calculate_errors(self): self.E = self.next_layer.W.get_transpose() * self.next_layer.E def adjust_parameters(self, learning_rate): # print(self.values) # print(self.E) gradients = self.values.map_function( lambda x: x * (1 - x)).get_hadamard_product( self.E).get_scalar_multiple(learning_rate) # print(gradients) self.W.add(gradients * self.previous_layer.values.get_transpose()) self.B.add(gradients)
def add(self, a, b): if isinstance(a, float): if isinstance(b, float): return a + b elif isinstance(b, Complex): return Complex.add(Complex(a), b) elif isinstance(b, Matrix): raise ComputorException('Illegal operation: Rational + Matrix') elif isinstance(a, Complex): if isinstance(b, float): return Complex.add(a, Complex(b)) elif isinstance(b, Complex): return Complex.add(a, b) elif isinstance(b, Matrix): raise ComputorException('Illegal operation: Complex + Matrix') elif isinstance(a, Matrix): if isinstance(b, float): raise ComputorException('Illegal operation: Matrix + Rational') elif isinstance(b, Complex): raise ComputorException('Illegal operation: Matrix + Complex') elif isinstance(b, Matrix): return Matrix.add(a, b) raise ComputorException('Computor.add(): something bad happened 🤷')
class NeuralNetwork: def __init__(self, inputs, hidden, output): self.num_inputs = inputs self.num_hidden = hidden self.num_output = output # Weights randomize self.weights_ih = Matrix(self.num_hidden, self.num_inputs) self.weights_ho = Matrix(self.num_output, self.num_hidden) self.weights_ih.randomize() self.weights_ho.randomize() # Bias randomize self.bias_h = Matrix(self.num_hidden, 1) self.bias_o = Matrix(self.num_output, 1) self.bias_h.randomize() self.bias_o.randomize() self.learning_rate = 0.1 @staticmethod def sigmoid(x): return (1 / (1 + math.exp(-x))) @staticmethod def dsigmoid(y): return (y * (1 - y)) def feedforward(self, input_array): # Transform input array into inputs matrix inputs = Matrix.static_fromArray(input_array) # Generating the hidden layer hidden = Matrix.static_multiply(self.weights_ih, inputs) hidden.add(self.bias_h) hidden.map(self.sigmoid) # Generating the output layer output = Matrix.static_multiply(self.weights_ho, hidden) output.add(self.bias_o) output.map(self.sigmoid) # Return output array return Matrix.static_toArray(output) def train(self, input_array, target_array): # Transform input array into inputs matrix inputs = Matrix.static_fromArray(input_array) self.last_input = inputs # Generating the hidden layer hidden = Matrix.static_multiply(self.weights_ih, inputs) hidden.add(self.bias_h) hidden.map(self.sigmoid) # Generating the output layer output = Matrix.static_multiply(self.weights_ho, hidden) output.add(self.bias_o) output.map(self.sigmoid) # Transform target array into target Matrix target = Matrix.static_fromArray(target_array) # 1. Calculate the errors ## a. Output errors output_errors = Matrix.static_substract(target, output) # 2. Calculate the gradient ## a. Output gradient (gradient of our output cost function) output_gradient = Matrix.static_map(output, self.dsigmoid) output_gradient.multiply(output_errors) # 3. Calculate the deltas adjustment ## a. Output deltas hidden_t = Matrix.static_transpose(hidden) weights_ho_delta = Matrix.static_multiply(output_gradient, hidden_t) weights_ho_delta.multiply(self.learning_rate) # 4. Adjust the original weights ## a. Output weights & output bias weights_ho = self.weights_ho self.weights_ho.add(weights_ho_delta) self.bias_o.add(output_gradient) # 1. Calculate the errors ## b. Hidden layer errors who_t = Matrix.static_transpose(self.weights_ho) hidden_errors = Matrix.static_multiply(who_t, output_errors) # 2. Calculate the gradient ## b. Hidden gradient (gradient of our hidden cost function) hidden_gradient = Matrix.static_map(hidden, self.dsigmoid) hidden_gradient = Matrix.static_map(hidden, self.dsigmoid) hidden_gradient.multiply(hidden_errors) # 3. Calculate the deltas adjustment ## b. Hidden deltas inputs_t = Matrix.static_transpose(inputs) weights_ih_delta = Matrix.static_multiply(hidden_gradient, inputs_t) weights_ih_delta.multiply(self.learning_rate) self.weights_ih_delta = weights_ih_delta # 4. Adjust the original weights ## b. Biases weights & hidden bias self.weights_ih.add(weights_ih_delta) self.bias_h.add(hidden_gradient)
class NeuralNetwork: num_of_input_nurons = None num_of_hiddden_nurons = None num_of_output_nurons = None error = 0 lr = 0.1 #initialize the neural network layers, weights and biases def __init__(self, no_of_input, no_of_hidden, no_of_output, weight_file="weights.txt"): user_cancel = False if (weight_file == ""): user_cancel = True self.num_of_input_nurons = no_of_input self.num_of_hiddden_nurons = no_of_hidden self.num_of_output_nurons = no_of_output #check if exist the weights of the local drive isWeightsExist = Path(weight_file).exists() if (isWeightsExist and not user_cancel): f = open(weight_file, "r") f1 = f.readlines() w = [] w_count = 0 b = [] b_count = 0 w_i_h = [] w_h_o = [] b_h = [] b_o = [] for text in f1: if (text.find("w")) != -1: w.append(ast.literal_eval(text.split(":")[1])) elif (text.find("b")) != -1: b.append(ast.literal_eval(text.split(":")[1])) for i in range(no_of_hidden): w_i_h.append([]) for j in range(no_of_input): w_i_h[i].append(w[w_count]) w_count = w_count + 1 for i in range(no_of_hidden): b_h.append([]) for j in range(1): b_h[i] = b[b_count] b_count = b_count + 1 for i in range(no_of_output): w_h_o.append([]) for j in range(no_of_hidden): w_h_o[i].append(w[w_count]) w_count = w_count + 1 for i in range(no_of_output): b_o.append([]) for j in range(1): b_o[i] = b[b_count] b_count = b_count + 1 self.weight_iput_to_hidden = Matrix.array_to_matrix( w_i_h, no_of_hidden, no_of_input) self.weight_hidden_to_output = Matrix.array_to_matrix( w_h_o, no_of_output, no_of_hidden) self.bias_of_hidden = Matrix.array_to_vector(b_h) self.bias_of_output = Matrix.array_to_vector(b_o) else: self.weight_iput_to_hidden = Matrix(no_of_hidden, no_of_input) self.weight_hidden_to_output = Matrix(no_of_output, no_of_hidden) self.weight_iput_to_hidden.randomize() self.weight_hidden_to_output.randomize() self.bias_of_hidden = Matrix(no_of_hidden, 1) self.bias_of_output = Matrix(no_of_output, 1) self.bias_of_hidden.randomize() self.bias_of_output.randomize() #sigmoid activation function @staticmethod def sigmoid(x): return 1 / (1 + math.exp(-x)) #derivatives of the sigmoid activation function @staticmethod def d_sigmoid(y): return y * (1 - y) #this is the method which predict the result def predict(self, input_array): inputs = Matrix.array_to_vector(input_array) hidden = Matrix.dot_product(self.weight_iput_to_hidden, inputs) hidden.add(self.bias_of_hidden) hidden.map(NeuralNetwork.sigmoid) output = Matrix.dot_product(self.weight_hidden_to_output, hidden) output.add(self.bias_of_output) output.map(NeuralNetwork.sigmoid) return output.to_array() #this is the method which train the network with backpropagation in gradient decent algorithm def train(self, input_array, target_array): #converts the input array to a vector inputs = Matrix.array_to_vector(input_array) hidden = Matrix.dot_product(self.weight_iput_to_hidden, inputs) hidden.add(self.bias_of_hidden) hidden.map(NeuralNetwork.sigmoid) output = Matrix.dot_product(self.weight_hidden_to_output, hidden) output.add(self.bias_of_output) output.map(NeuralNetwork.sigmoid) target_Matrix = Matrix.array_to_vector(target_array) #calculate error error = Matrix.sub(target_Matrix, output) self.error = error weight_hidden_to_output_t = Matrix.transpose( self.weight_hidden_to_output) hidden_error = Matrix.dot_product(weight_hidden_to_output_t, error) #calculate the gradient of the hidden layer hidden_gradient_matrix = Matrix.map_s(output, NeuralNetwork.d_sigmoid) hidden_gradient_matrix.scale(error) hidden_gradient_matrix.scale(self.lr) delta_weights_ho = Matrix.dot_product(output, Matrix.transpose(hidden)) #update weights and biases self.weight_hidden_to_output.add(delta_weights_ho) self.bias_of_output.add(hidden_gradient_matrix) #calculate the gradient of the output layer output_gradient_matrix = Matrix.map_s(hidden, NeuralNetwork.d_sigmoid) output_gradient_matrix.scale(hidden_error) output_gradient_matrix.scale(self.lr) delta_weights_ih = Matrix.dot_product(hidden, Matrix.transpose(inputs)) #update weights and biases self.bias_of_hidden.add(output_gradient_matrix) self.weight_iput_to_hidden.add(delta_weights_ih)
class NeuralNetwork: def __init__(self, input_nodes, hidden_nodes=False, output_nodes=False): if not hidden_nodes: a = input_nodes self.input_nodes = a.input_nodes self.hidden_nodes = a.hidden_nodes self.output_nodes = a.output_nodes self.weights_ih = a.weights_ih.copy() self.weights_ho = a.weights_ho.copy() self.weights_ho_t = a.weights_ho_t.copy() self.bias_h = a.bias_h.copy() self.bias_o = a.bias_o.copy() else: self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes) self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes) self.weights_ho_t = Matrix.transpose(self.weights_ho) self.bias_h = Matrix(self.hidden_nodes, 1) self.bias_o = Matrix(self.output_nodes, 1) self.weights_ih.randomize() self.weights_ho.randomize() self.bias_h.randomize() self.bias_o.randomize() self.learning_rate = 0.1 def predict(self, input_array): # Computing Hidden Outputs inputs = Matrix.fromArray(input_array) hidden = Matrix.multiply1(self.weights_ih, inputs) hidden.add(self.bias_h) hidden.map1(sigmoid) # Activation Function # Computing Output Layer's Output! outputs = Matrix.multiply1(self.weights_ho, hidden) outputs.add(self.bias_o) outputs.map1(sigmoid) return outputs.toArray() def train(self, input_array, target_array): # Computing Hidden Outputs inputs = Matrix.fromArray(input_array) hidden = Matrix.multiply1(self.weights_ih, inputs) hidden.add(self.bias_h) hidden.map1(sigmoid) # Activation function # Computing Output Layer's Output! outputs = Matrix.multiply1(self.weights_ho, hidden) outputs.add(self.bias_o) outputs.map1(sigmoid) # Neural Net's Guess # Converting target array to matrix object targets = Matrix.fromArray(target_array) # Calculate Error output_errors = Matrix.subtract(targets, outputs) # Calculate Hidden Errors hidden_errors = Matrix.multiply1(self.weights_ho_t, output_errors) # Calculate gradients gradients = Matrix.map2(outputs, dsigmoid) gradients.multiply2(output_errors) gradients.multiply2(self.learning_rate) # Calculate Deltas hidden_t = Matrix.transpose(hidden) weight_ho_deltas = Matrix.multiply1(gradients, hidden_t) # Adjust Hidden -> Output weights and output layer's biases self.weights_ho.add(weight_ho_deltas) self.bias_o.add(gradients) # Calculate the hidden gradients hidden_gradients = Matrix.map2(hidden, dsigmoid) hidden_gradients.multiply2(hidden_errors) hidden_gradients.multiply2(self.learning_rate) # Calculate Deltas input_t = Matrix.transpose(inputs) weight_ih_deltas = Matrix.multiply1(hidden_gradients, input_t) # Adjust Input -> Hidden weights and hidden biases self.weights_ih.add(weight_ih_deltas) self.bias_h.add(hidden_gradients) def mutate(self, rate): def mutate(x): return x + randomGaussian(0, 0.1) if random() < rate else x self.weights_ih.map1(mutate) self.weights_ho.map1(mutate) self.bias_h.map1(mutate) self.bias_o.map1(mutate) def copy(self): return NeuralNetwork(self)
class NeuralNetwork: def __init__(self, input_nodes, hidden_nodes, output_nodes): self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes self.learning_rate = 0.1 self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes) self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes) self.weights_ih.randomize() self.weights_ho.randomize() self.bias_h = Matrix(self.hidden_nodes, 1) self.bias_o = Matrix(self.output_nodes, 1) self.bias_h.randomize() self.bias_o.randomize() def predict(self, input_array): # Gerando as saídas intermediárias inputs = Matrix.fromArray(input_array) hidden = Matrix.multiply(self.weights_ih, inputs) hidden.add(self.bias_h) # Função de ativação hidden.map(sigmoid) # Gerando as saídas de facto output = Matrix.multiply(self.weights_ho, hidden) output.add(self.bias_o) output.map(sigmoid) return output.toArray() def feedfoward(self, input_array): # GERANDO OS HIDDEN OUTPUTS (SAIDAS DOS NÓS INTERMEDIÁRIOS) # Transforma a lista de entrada em um objeto vetor (ou matriz unidimensional, como quiser) input = Matrix.fromArray(input_array) # Multiplica as entradas pelos respectivos pesos hidden = Matrix.multiply(self.weights_ih, input) # Adiciona o bias hidden.add(self.bias_h) # Função de ativação! hidden.map(sigmoid) # GERANDO A SAÍDA # Multiplica as saidas dos nós intermediários pelos respectivos pesos output = Matrix.multiply(self.weights_ho, hidden) output.add(self.bias_o) output.map(sigmoid) # Transforma o objeto vetor em uma lista return output.toArray() # E manda de volta! hehe ;) def train(self, input_array, target_array): # Backpropagation # GERANDO OS HIDDEN OUTPUTS (SAIDAS DOS NÓS INTERMEDIÁRIOS) # Transforma a lista de entrada em um objeto vetor (ou matriz unidimensional, como quiser) inputs = Matrix.fromArray(input_array) # Multiplica as entradas pelos respectivos pesos hidden = Matrix.multiply(self.weights_ih, inputs) # Adiciona o bias hidden.add(self.bias_h) # Função de ativação! hidden.map(sigmoid) # GERANDO A SAÍDA # Multiplica as saidas dos nós intermediários pelos respectivos pesos outputs = Matrix.multiply(self.weights_ho, hidden) outputs.add(self.bias_o) outputs.map(sigmoid) # Coverte para uma matriz objeto targets = Matrix.fromArray(target_array) # Calcula o erro # ERRO = ALVOS - SAIDAS output_errors = Matrix.subtract(targets, outputs) # Calcula o gradiente gradients = Matrix.mapIt(outputs, dsigmoid) gradients.multiplyBy(output_errors) gradients.multiplyBy(self.learning_rate) # Calcula deltas hidden_T = Matrix.transpose(hidden) weight_ho_deltas = Matrix.multiply(gradients, hidden_T) # Ajusta os pesos pelos deltas self.weights_ho.add(weight_ho_deltas) # Ajusta os bias pelos seus deltas self.bias_o.add(gradients) # Calcula os erros da camada intermediária who_t = Matrix.transpose(self.weights_ho) hidden_errors = Matrix.multiply(who_t, output_errors) # Calcula o gradiente intermediário hidden_gradient = Matrix.mapIt(hidden, dsigmoid) hidden_gradient.multiplyBy(hidden_errors) hidden_gradient.multiplyBy(self.learning_rate) # Calcula o delta intermediário inputs_T = Matrix.transpose(inputs) weight_ih_deltas = Matrix.multiply(hidden_gradient, inputs_T) self.weights_ih.add(weight_ih_deltas) # Ajusta os bias pelos seus deltas self.bias_h.add(hidden_gradient) def copy(self): return self def mutate(self, rate): def mutateIt(val): if random.random() < rate: return val + random.gauss(0, 0.1) else: return val self.weights_ih.map(mutateIt) self.weights_ho.map(mutateIt) self.bias_h.map(mutateIt) self.bias_o.map(mutateIt)
#Good luck. from matrix import Matrix if __name__ == "__main__": matrix_square_ascending = Matrix(1, 2, 3, 4) matrix_square = Matrix(2, 2, 2, 2) matrix_cubic_ascending = Matrix(1, 2, 3, 4, 5, 6, 7, 8, 9) matrix_cubic = Matrix(1, 1, 1, 1, 1, 1, 1, 1, 1) print("Add:") print(matrix_cubic_ascending) print(matrix_cubic) print("Result:") matrix_added = matrix_cubic_ascending.add(matrix_cubic) print(matrix_added) print("------------------------\n") print("Subtract:") print(matrix_square_ascending) print(matrix_square) print("Result:") matrix_subtracted = matrix_square_ascending.subtract(matrix_square) print(matrix_subtracted) print("------------------------\n") print("Dummy multiply:") print(matrix_square_ascending) print(matrix_square) print("Result:")
class NeuralNetwork(): def __init__(self, number_inputs, number_hiddens, number_outputs): self.number_inputs = number_inputs self.number_hiddens = number_hiddens self.number_outputs = number_outputs self.weights_ih = Matrix(rows=self.number_hiddens, cols=self.number_inputs) self.weights_ho = Matrix(rows=self.number_outputs, cols=self.number_hiddens) self.bias_h = Matrix(rows=self.number_hiddens, cols=1) self.bias_h.map(lambda x: 1.0) self.bias_o = Matrix(rows=self.number_outputs, cols=1) self.bias_o.map(lambda x: 1.0) self.learning_rate = 0.1 # initialize the Weights with random values self.weights_ih.randomize() self.weights_ho.randomize() def feedforward(self, input): # Extracts the output array from the 3-tuple returned by self.__guess return self.__guess(input)[0] def __guess(self, input): in_matrix = Matrix.fromList(input) hidden = Matrix.product(self.weights_ih, in_matrix) hidden.add(self.bias_h) hidden.map(self.__activate) output = Matrix.product(self.weights_ho, hidden) output.add(self.bias_o) output.map(self.__activate) return (output.toList(), hidden, in_matrix) def train(self, inputs, target_label): guess_r = self.__guess( inputs) # (output as list, hidden, input matrix) guess = Matrix.fromList(guess_r[0]) hidden = guess_r[1] input_matrix = guess_r[2] target_matrix = Matrix.fromList(target_label) # Calculate output errors output_errors = Matrix.subtract(target_matrix, guess) # Calculating gradients for Hidden -> output gradients_ho = Matrix(guess.data) gradients_ho.map(self.__activate_derivative) gradients_ho.multiply(output_errors) gradients_ho.multiply(self.learning_rate) # Calculating deltas weights_ho_deltas = Matrix.product(gradients_ho, Matrix.transpose(hidden)) # Tweaking weights_ho with the calculated deltas self.weights_ho.add(weights_ho_deltas) # Tweaking hidden -> output bias with the gradients self.bias_o.add(gradients_ho) # Calculate hidden layer errors hidden_errors = Matrix.product(Matrix.transpose(self.weights_ho), output_errors) # Calculating gradients for Input -> Hidden gradients_ih = Matrix(hidden.data) gradients_ih.map(self.__activate_derivative) gradients_ih.multiply(hidden_errors) gradients_ih.multiply(self.learning_rate) # Calculating deltas weights_ih_deltas = Matrix.product(gradients_ih, Matrix.transpose(input_matrix)) # Tweaking weights_ih with the calculated deltas self.weights_ih.add(weights_ih_deltas) # Twaeking input -> hidden bias with the gradients self.bias_h.add(gradients_ih) def __activate(self, val): # Activate uses Sigmoid function # https://en.wikipedia.org/wiki/Sigmoid_function return 1.0 / (1 + math.exp(-val)) def __activate_derivative(self, active_val): return active_val * (1 - active_val)
class NeuralNetwork: def __init__(self, numI, numH, numO): self.input_nodes = numI self.hidden_nodes = numH self.output_nodes = numO self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes) self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes) self.weights_ih.randomize() self.weights_ho.randomize() self.bias_h = Matrix(self.hidden_nodes, 1) self.bias_o = Matrix(self.output_nodes, 1) self.bias_h.randomize() self.bias_o.randomize() self.learning_rate = 0.3 def feedforward(self, inputs): # Generate hidden output hiddens = MatMath.multiply(self.weights_ih, inputs) hiddens.add(self.bias_h) # Activation function on hidden hiddens.map(sigmoid) # Generate output outputs = MatMath.multiply(self.weights_ho, hiddens) outputs.add(self.bias_o) # Activation function on output outputs.map(sigmoid) return (outputs, hiddens) def guess(self, input_arr): inputs = Matrix.fromArray(input_arr) (outputs, _) = self.feedforward(inputs) return outputs.toArray() def train(self, input_arr, target_arr): inputs = Matrix.fromArray(input_arr) (outputs, hiddens) = self.feedforward(inputs) targets = Matrix.fromArray(target_arr) # Calculate the output error # ERROR = TARGETS - OUTPUTS output_errors = MatMath.subtract(targets, outputs) # Calculate output gradients output_gradients = MatMath.map(outputs, dsigmoid) output_gradients.multiply(output_errors) output_gradients.multiply(self.learning_rate) # Calculate deltas hiddens_T = MatMath.transpose(hiddens) weights_ho_deltas = MatMath.multiply(output_gradients, hiddens_T) # Adding deltas self.weights_ho.add(weights_ho_deltas) self.bias_o.add(output_gradients) # Calculate the hidden layer errors weights_ho_T = MatMath.transpose(self.weights_ho) hidden_errors = MatMath.multiply(weights_ho_T, output_errors) # Calculate hidden gradients hidden_gradients = MatMath.map(hiddens, dsigmoid) hidden_gradients.multiply(hidden_errors) hidden_gradients.multiply(self.learning_rate) # Calculate deltas inputs_T = MatMath.transpose(inputs) weights_ih_deltas = MatMath.multiply(hidden_gradients, inputs_T) # Adding deltas self.weights_ih.add(weights_ih_deltas) self.bias_h.add(hidden_gradients)
def main(self): # parse the command-line arguments args = self.parser().parse_args() file_name = args.arff learner_name = args.L eval_method = args.E[0] eval_parameter = args.E[1] if len(args.E) > 1 else None print_confusion_matrix = args.verbose normalize = args.normalize random.seed(args.seed) # Use a seed for deterministic results, if provided (makes debugging easier) # load the model learner = self.get_learner(learner_name) # load the ARFF file data = Matrix() data.load_arff(file_name) if normalize: print("Using normalized data") data.normalize() # print some stats print("\nDataset name: {}\n" "Number of instances: {}\n" "Number of attributes: {}\n" "Learning algorithm: {}\n" "Evaluation method: {}\n".format(file_name, data.rows, data.cols, learner_name, eval_method)) if eval_method == "training": print("Calculating accuracy on training set...") features = Matrix(data, 0, 0, data.rows, data.cols-1) labels = Matrix(data, 0, data.cols-1, data.rows, 1) confusion = Matrix() start_time = time.time() learner.train(features, labels) elapsed_time = time.time() - start_time print("Time to train (in seconds): {}".format(elapsed_time)) accuracy = learner.measure_accuracy(features, labels, confusion) print("Training set accuracy: " + str(accuracy)) if print_confusion_matrix: print("\nConfusion matrix: (Row=target value, Col=predicted value)") confusion.print() print("") elif eval_method == "static": print("Calculating accuracy on separate test set...") test_data = Matrix(arff=eval_parameter) if normalize: test_data.normalize() print("Test set name: {}".format(eval_parameter)) print("Number of test instances: {}".format(test_data.rows)) features = Matrix(data, 0, 0, data.rows, data.cols-1) labels = Matrix(data, 0, data.cols-1, data.rows, 1) start_time = time.time() learner.train(features, labels) elapsed_time = time.time() - start_time print("Time to train (in seconds): {}".format(elapsed_time)) train_accuracy = learner.measure_accuracy(features, labels) print("Training set accuracy: {}".format(train_accuracy)) test_features = Matrix(test_data, 0, 0, test_data.rows, test_data.cols-1) test_labels = Matrix(test_data, 0, test_data.cols-1, test_data.rows, 1) confusion = Matrix() test_accuracy = learner.measure_accuracy(test_features, test_labels, confusion) print("Test set accuracy: {}".format(test_accuracy)) if print_confusion_matrix: print("\nConfusion matrix: (Row=target value, Col=predicted value)") confusion.print() print("") elif eval_method == "random": print("Calculating accuracy on a random hold-out set...") train_percent = float(eval_parameter) if train_percent < 0 or train_percent > 1: raise Exception("Percentage for random evaluation must be between 0 and 1") print("Percentage used for training: {}".format(train_percent)) print("Percentage used for testing: {}".format(1 - train_percent)) data.shuffle() train_size = int(train_percent * data.rows) train_features = Matrix(data, 0, 0, train_size, data.cols-1) train_labels = Matrix(data, 0, data.cols-1, train_size, 1) test_features = Matrix(data, train_size, 0, data.rows - train_size, data.cols-1) test_labels = Matrix(data, train_size, data.cols-1, data.rows - train_size, 1) start_time = time.time() learner.train(train_features, train_labels) elapsed_time = time.time() - start_time print("Time to train (in seconds): {}".format(elapsed_time)) train_accuracy = learner.measure_accuracy(train_features, train_labels) print("Training set accuracy: {}".format(train_accuracy)) confusion = Matrix() test_accuracy = learner.measure_accuracy(test_features, test_labels, confusion) print("Test set accuracy: {}".format(test_accuracy)) if print_confusion_matrix: print("\nConfusion matrix: (Row=target value, Col=predicted value)") confusion.print() print("") elif eval_method == "cross": print("Calculating accuracy using cross-validation...") folds = int(eval_parameter) if folds <= 0: raise Exception("Number of folds must be greater than 0") print("Number of folds: {}".format(folds)) reps = 1 sum_accuracy = 0.0 elapsed_time = 0.0 for j in range(reps): data.shuffle() for i in range(folds): begin = int(i * data.rows / folds) end = int((i + 1) * data.rows / folds) train_features = Matrix(data, 0, 0, begin, data.cols-1) train_labels = Matrix(data, 0, data.cols-1, begin, 1) test_features = Matrix(data, begin, 0, end - begin, data.cols-1) test_labels = Matrix(data, begin, data.cols-1, end - begin, 1) train_features.add(data, end, 0, data.cols - 1) train_labels.add(data, end, data.cols - 1, 1) start_time = time.time() learner.train(train_features, train_labels) elapsed_time += time.time() - start_time accuracy = learner.measure_accuracy(test_features, test_labels) sum_accuracy += accuracy print("Rep={}, Fold={}, Accuracy={}".format(j, i, accuracy)) elapsed_time /= (reps * folds) print("Average time to train (in seconds): {}".format(elapsed_time)) print("Mean accuracy={}".format(sum_accuracy / (reps * folds))) else: raise Exception("Unrecognized evaluation method '{}'".format(eval_method))
from matrix import Matrix matrix_1 = Matrix(4, 5, 6, 7) matrix_2 = Matrix(2, 2, 2, 1) matrix_3 = matrix_2.add(matrix_1) matrix_4 = matrix_2.multiply(matrix_1) matrix_3.print() matrix_4.print()
class neural_network(): def __init__(self, input_nodes: int, hidden_nodes: int, output_nodes: int): self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes) self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes) self.weights_ih.randomize() self.weights_ho.randomize() self.bias_h = Matrix(self.hidden_nodes, 1) self.bias_o = Matrix(self.output_nodes, 1) self.bias_h.randomize() self.bias_o.randomize() self.set_learning_rate() self.set_activation_function() def copy(self): nn = neural_network(self.input_nodes, self.hidden_nodes, self.output_nodes) nn.weights_ih = self.weights_ih.copy() nn.weights_ho = self.weights_ho.copy() nn.bias_h = self.bias_h.copy() nn.bias_o = self.bias_o.copy() nn.set_learning_rate() nn.set_activation_function() return nn def predict(self, input_list: list) -> list: # Generating the Hidden Outputs inputs = Matrix.from_list(input_list) hidden = Matrix.static_multiply(self.weights_ih, inputs) hidden.add(self.bias_h) # Activation function! hidden.map(self.activation_function.x) # Generating the output's output! output = Matrix.static_multiply(self.weights_ho, hidden) output.add(self.bias_o) output.map(self.activation_function.x) # Sending back to the caller! return output.to_list() def set_learning_rate(self, learning_rate: float = 0.1): self.learning_rate = learning_rate def set_activation_function( self, func: activation_function = activation_function.sigmoid()): self.activation_function = func ##own def mutate(self, rate: float): def func(val, i, j): if random.uniform(0, 1) < rate: return val + random.uniform(-0.07, 0.07) else: return val self.weights_ih.map(func) self.weights_ho.map(func) self.bias_h.map(func) self.bias_o.map(func) return def train(self, input_list: list, target_list: list): # Generating the Hidden Outputs inputs = Matrix.from_list(input_list) hidden = Matrix.static_multiply(self.weights_ih, inputs) hidden.add(self.bias_h) # activation function! hidden.map(self.activation_function.x) # Generating the output's output! outputs = Matrix.static_multiply(self.weights_ho, hidden) outputs.add(self.bias_o) outputs.map(self.activation_function.x) # Convert array to matrix object targets = Matrix.from_list(target_list) # Calculate the error # ERROR = TARGETS - OUTPUTS output_errors = Matrix.subtract(targets, outputs) # Calculate gradient gradients = Matrix.static_map(outputs, self.activation_function.y) gradients.multiply(output_errors) gradients.multiply(self.learning_rate) # Calculate deltas hidden_T = Matrix.transpose(hidden) weight_ho_deltas = Matrix.static_multiply(gradients, hidden_T) # Calculate the hidden layer errors who_t = Matrix.transpose(self.weights_ho) hidden_errors = Matrix.static_multiply(who_t, output_errors) # Calculate hidden gradient hidden_gradient = Matrix.static_map(hidden, self.activation_function.y) hidden_gradient.multiply(hidden_errors) hidden_gradient.multiply(self.learning_rate) # Calcuate input->hidden deltas inputs_T = Matrix.transpose(inputs) weight_ih_deltas = Matrix.static_multiply(hidden_gradient, inputs_T) self.weights_ih.add(weight_ih_deltas) # Adjust the bias by its deltas(which is just the gradients) self.bias_h.add(hidden_gradient) # Adjust the weights by deltas self.weights_ho.add(weight_ho_deltas) # Adjust the bias by its deltas(which is just the gradients) self.bias_o.add(gradients) def serialize(self) -> bytes: return dill.dumps(self) @staticmethod def deserialize(data: bytes) -> 'neural_network': return dill.loads(data)
class NeuralNetwork(): def __init__(self, input_nodes, hidden_nodes, output_nodes): self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # set the learning rate self.lr = 0.1 # inputs -> hidden layer -> outputs self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes) self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes) # biases for hidden and output layer self.bias_h = Matrix(self.hidden_nodes, 1) self.bias_o = Matrix(self.output_nodes, 1) # randomize weigts and biases self.weights_ih.randomize() self.weights_ho.randomize() self.bias_h.randomize() self.bias_o.randomize() ''' sets the learning rate ''' def setLR(lr): self.lr = lr def confusion_matrix(self, xs, ys, labels, normalize=False): ''' generate a confusion matrix ''' ''' cols are the predictions and the rows are the vals ''' cm = Matrix(len(labels), len(labels)) for x, y in zip(xs, ys): index_prediction, index_actual = self.get_index_val(x, y) # print(f'{index_actual}, {index_prediction}') cm.data[index_actual][index_prediction] += 1 if normalize: cm.multiply(1/len(xs)) return cm def get_index_val(self, x, y): prediction = self.feed_forward(x) index_prediction = prediction.index(max(prediction)) index_actual = y.index(max(y)) return index_prediction, index_actual def compare_index_val(self, x, y): ''' compare indicies of individual points ''' index_prediction, index_actual = self.get_index_val(x, y) if (index_prediction == index_actual): return True else: return False ''' computes the accuracy ''' def get_accuracy(self, xs, ys): ''' compare the values of the indicies of the arrays ''' score = 0 for x, y in zip(xs, ys): if self.compare_index_val(x, y): score += 1 return score / len(xs) ''' predict ''' def predict(self, input_array): return self.feed_forward(input_array) ''' predicts output ''' def feed_forward(self, input_array): # get the values for the hidden nodes inputs = Matrix.fromArray(input_array) hidden = Matrix.matMultiply(self.weights_ih, inputs) hidden.add(self.bias_h) # activation function on the hidden nodes hidden.map(sigmoid) # get the values for the output output = Matrix.matMultiply(self.weights_ho, hidden) # multiply ho wegths with hidden nodes output.add(self.bias_o) # add bias output.map(sigmoid) # sigmoid activation # TODO: apply softmax layer # return output as array return output.toArray() ''' trains the nn ''' def train(self, input_array, target_array): inputs = Matrix.fromArray(input_array) targets = Matrix.fromArray(target_array) # get values for hidden nodes hidden = Matrix.matMultiply(self.weights_ih, inputs) hidden.add(self.bias_h) hidden.map(sigmoid) # get values for output nodes outputs = Matrix.matMultiply(self.weights_ho, hidden) outputs.add(self.bias_o) outputs.map(sigmoid) # TODO: apply softmax layer # calculate the error, ERROR = TARGETS - OUTPUTS output_errors = Matrix.subtract(targets, outputs) # get derivative of output_errors: gradien = ouptuts * (1 - outputs) gradients = Matrix.static_map(outputs, dsigmoid) gradients.multiply(output_errors) gradients.multiply(self.lr) # calculate deltas: delta_w = error * hidden_t * lr hidden_T = Matrix.transpose(hidden) weights_ho_deltas = Matrix.matMultiply(gradients, hidden_T) # adjsut the weights self.weights_ho.add(weights_ho_deltas) # adjust bias self.bias_o.add(gradients) # hidden layer errors # weights hidden out tranpose who_t = Matrix.transpose(self.weights_ho) hidden_errors = Matrix.matMultiply(who_t, output_errors) # hidden gradient hidden_gradient = Matrix.static_map(hidden, dsigmoid) hidden_gradient.multiply(hidden_errors) hidden_gradient.multiply(self.lr) # calc input -> hidden deltas inputs_T = Matrix.transpose(inputs) weights_ih_deltas = Matrix.matMultiply(hidden_gradient, inputs_T) # adjust weights for input_hidden self.weights_ih.add(weights_ih_deltas) self.bias_h.add(hidden_gradient)
from matrix import Matrix from vector import Vector matrix = Matrix(4) matrix.add(Vector.fromList([7, -5, 6, -7])) matrix.add(Vector.fromList([2, -3, 10, 9])) matrix.add(Vector.fromList([-5, 4, -2, 2])) matrix.add(Vector.fromList([8, -9, 7, 15])) m = matrix.simplify() # m.printMe() matrix = Matrix(4) matrix.add(Vector.fromList([12, -9, -6, 4])) matrix.add(Vector.fromList([-7, 4, 11, -6])) matrix.add(Vector.fromList([11, -8, -7, 10])) matrix.add(Vector.fromList([-9, 7, 3, -5])) matrix.add(Vector.fromList([5, -3, -9, 12])) m = matrix.simplify() # m.printMe() matrix = Matrix(3) matrix.add(Vector.fromList([0, 3, 3])) matrix.add(Vector.fromList([3, -7, -9])) matrix.add(Vector.fromList([-6, 8, 12])) matrix.add(Vector.fromList([6, -5, -9])) matrix.add(Vector.fromList([4, 8, 6])) matrix.add(Vector.fromList([-5, 9, 15])) m = matrix.simplify() # m.printMe() # Matrix.create([[1,3],[3,9],[4,7],[7,6]]).simplify().printMe()
class NeuralNetwork(): def __init__(self, input_nodes, hidden_nodes, output_nodes): self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # set the learning rate self.lr = 0.1 # inputs -> hidden layer -> outputs self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes) self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes) # biases for hidden and output layer self.bias_h = Matrix(self.hidden_nodes, 1) self.bias_o = Matrix(self.output_nodes, 1) # randomize weigts and biases self.weights_ih.randomize() self.weights_ho.randomize() self.bias_h.randomize() self.bias_o.randomize() ''' sets the learning rate ''' def setLR(lr): self.lr = lr ''' predicts output ''' def feed_forward(self, input_array): # get the values for the hidden nodes inputs = Matrix.fromArray(input_array) hidden = Matrix.matMultiply(self.weights_ih, inputs) hidden.add(self.bias_h) # activation function on the hidden nodes hidden.map(sigmoid) # get the values for the output output = Matrix.matMultiply(self.weights_ho, hidden) # multiply ho wegths with hidden nodes output.add(self.bias_o) # add bias output.map(sigmoid) # sigmoid activation # return output as array return output.toArray() ''' trains the nn ''' def train(self, input_array, target_array): inputs = Matrix.fromArray(input_array) targets = Matrix.fromArray(target_array) # get values for hidden nodes hidden = Matrix.matMultiply(self.weights_ih, inputs) hidden.add(self.bias_h) hidden.map(sigmoid) # get values for output nodes outputs = Matrix.matMultiply(self.weights_ho, hidden) outputs.add(self.bias_o) outputs.map(sigmoid) # calculate the error, ERROR = TARGETS - OUTPUTS output_errors = Matrix.subtract(targets, outputs) # get derivative of output_errors: gradien = ouptuts * (1 - outputs) gradients = Matrix.static_map(outputs, dsigmoid) gradients.multiply(output_errors) gradients.multiply(self.lr) # calculate deltas: delta_w = error * hidden_t * lr hidden_T = Matrix.transpose(hidden) weights_ho_deltas = Matrix.matMultiply(gradients, hidden_T) # adjsut the weights self.weights_ho.add(weights_ho_deltas) # adjust bias self.bias_o.add(gradients) # hidden layer errors # weights hidden out tranpose who_t = Matrix.transpose(self.weights_ho) hidden_errors = Matrix.matMultiply(who_t, output_errors) # hidden gradient hidden_gradient = Matrix.static_map(hidden, dsigmoid) hidden_gradient.multiply(hidden_errors) hidden_gradient.multiply(self.lr) # calc input -> hidden deltas inputs_T = Matrix.transpose(inputs) weights_ih_deltas = Matrix.matMultiply(hidden_gradient, inputs_T) # adjust weights for input_hidden self.weights_ih.add(weights_ih_deltas) self.bias_h.add(hidden_gradient)
class NeuralNetwork: def __init__(self, input_nodes, hidden_nodes, output_nodes): self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes) self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes) self.weights_ih.randomize() self.weights_ho.randomize() self.bias_h = Matrix(self.hidden_nodes, 1) self.bias_o = Matrix(self.output_nodes, 1) self.bias_h.randomize() self.bias_o.randomize() self.learning_rate = 0.1 # @staticmethod # def sigmoid(*args): # for x in args: # return 1/(1+Math.exp(-x)) def feed_forward(self, input_array): #Generating the hidden outputs inputs = Matrix.fromArray(input_array) hidden = Matrix.multiply(self.weights_ih, inputs) hidden.add(self.bias_h) #activation function # print("--------------------") # print("----------------") hidden.mapper(sigmoid) output = Matrix.multiply(self.weights_ho, hidden) output.add(self.bias_o) output.mapper(sigmoid) return output.toArray() def train(self, input_array, target_array): """------FEED_FORWARD------""" inputs = Matrix.fromArray(input_array) hidden = Matrix.multiply(self.weights_ih, inputs) hidden.add(self.bias_h) #activation function # print("--------------------") # print("----------------") hidden.mapper(sigmoid) outputs = Matrix.multiply(self.weights_ho, hidden) outputs.add(self.bias_o) outputs.mapper(sigmoid) """-----TRAINING-------""" #Convert array to Matrix object # outputs = Matrix.fromArray(outputs) targets = Matrix.fromArray(target_array) output_errors = Matrix.subtract(targets, outputs) #Calculate output gradient gradients = Matrix.map(outputs, dsigmoid) gradients.scalar_multiply(output_errors) gradients.scalar_multiply(self.learning_rate) hiddden_T = Matrix.transpose(hidden) weights_ho_deltas = Matrix.multiply(gradients, hiddden_T) #Adjust the weight delatas self.weights_ho.add(weights_ho_deltas) #Adjust the bias self.bias_o.add(gradients) who_t = Matrix.transpose(self.weights_ho) #Calculate the hidden layer errors hidden_errors = Matrix.multiply(who_t, output_errors) #Calculate hidden gradient hidden_gradient = Matrix.map(hidden, dsigmoid) hidden_gradient.scalar_multiply(hidden_errors) hidden_gradient.scalar_multiply(self.learning_rate) inputs_T = Matrix.transpose(inputs) weights_ih_deltas = Matrix.multiply(hidden_gradient, inputs_T) self.weights_ih.add(weights_ih_deltas) self.bias_h.add(hidden_gradient)
class NeuralNetwork: def __init__(self, in_nodes, hid_nodes, out_nodes): if type(in_nodes) == NeuralNetwork: a = in_nodes self.input_nodes = a.input_nodes self.hidden_nodes = a.hidden_nodes self.output_nodes = a.output_nodes self.weights_ih = a.weights_ih.copy() self.weights_ho = a.weights_ho.copy() self.bias_h = a.bias_h.copy() self.biah_o = a.bias_o.copy() else: self.input_nodes = in_nodes self.hidden_nodes = hid_nodes self.output_nodes = out_nodes self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes) self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes) self.weights_ih.randomize() self.weights_ho.randomize() self.bias_h = Matrix(self.hidden_nodes, 1) self.bias_o = Matrix(self.output_nodes, 1) self.bias_h.randomize() self.bias_o.randomize() self.set_learning_rate() self.set_activation_function() def predict(self, input_array): inputs = Matrix.from_array(input_array) hidden = Matrix.static_multiply(self.weights_ih, inputs) hidden.add(self.bias_h) hidden.map(self.activation_function.func) output = Matrix.static_multiply(self.weights_ho, hidden) output.add(self.bias_o) output.map(self.activation_function.func) return output.to_array() def set_learning_rate(self, learning_rate=0.1): self.learning_rate = learning_rate def set_activation_function(self, func=sigmoid): self.activation_function = func def train(self, input_array, target_array): inputs = Matrix.from_array(input_array) hidden = Matrix.static_multiply(self.weights_ih, inputs) hidden.add(self.bias_h) hidden.map(self.activation_function.func) outputs = Matrix.static_multiply(self.weights_ho, hidden) outputs.add(self.bias_o) outputs.map(self.activation_function.func) targets = Matrix.from_array(target_array) output_errors = Matrix.subtract(targets, outputs) gradients = Matrix.static_map(outputs, self.activation_function.dfunc) gradients.multiply(output_errors) gradients.multiply(self.learning_rate) hidden_T = Matrix.transpose(hidden) weight_ho_deltas = Matrix.static_multiply(gradients, hidden_T) self.weights_ho.add(weight_ho_deltas) self.bias_o.add(gradients) who_T = Matrix.transpose(self.weights_ho) hidden_errors = Matrix.static_multiply(who_T, output_errors) hidden_gradient = Matrix.static_map(hidden, self.activation_function.dfunc) hidden_gradient.multiply(hidden_errors) hidden_gradient.multiply(self.learning_rate) inputs_T = Matrix.transpose(inputs) weight_ih_deltas = Matrix.static_multiply(hidden_gradient, inputs_T) self.weights_ih.add(weight_ih_deltas) self.bias_h.add(hidden_gradient) def copy(self): return NeuralNetwork(self) def mutate(func): self.weights_ih.map(func) self.weights_ho.map(func) self.bias_h.map(func) self.biah_o.map(func)