def train(self, inputs, answers): # Convert parameters to column vectors inputs = np.array(inputs, ndmin=2).T answers = np.array(answers, ndmin=2).T # Get a vector of all the guesses the neural network makes hidden_values = np.dot(self.weights_to_hidden, inputs) hidden_values = np.add(hidden_values, self.biases_hidden) hidden_values = activation_function(hidden_values) output_values = np.dot(self.weights_to_output, hidden_values) output_values = np.add(output_values, self.biases_output) output_values = activation_function(output_values) # Adjust weights hidden -> output output_errors = np.subtract(answers, output_values) temp = output_errors * output_values * (1.0 - output_values) temp = self.learning_rate * temp delta_weights_ho = np.dot(temp, output_values.T) self.weights_to_output = np.add(self.weights_to_output, delta_weights_ho) self.biases_output = np.add(self.biases_output, temp) # Adjust weights input -> hidden hidden_errors = np.dot(self.weights_to_output.T, output_errors) temp = hidden_errors * hidden_values * (1.0 - hidden_values) temp = self.learning_rate * temp delta_weights_ih = np.dot(temp, inputs.T) self.weights_to_hidden = np.add(self.weights_to_hidden, delta_weights_ih) self.biases_hidden = np.add(self.biases_hidden, temp)
def train(self, input_vector, target_vector): # input_vector and target_vector can be tuple, list or ndarray input_vector = np.array(input_vector, ndmin=2).T target_vector = np.array(target_vector, ndmin=2).T output_vector1 = np.dot(self.weights_in_hidden, input_vector) output_vector_hidden = activation_function(output_vector1) output_vector2 = np.dot(self.weights_hidden_out, output_vector_hidden) output_vector_network = activation_function(output_vector2) output_errors = target_vector - output_vector_network # update the weights: tmp = output_errors * output_vector_network * (1.0 - output_vector_network) tmp = self.learning_rate * np.dot(tmp, output_vector_hidden.T) self.weights_hidden_out += tmp # calculate hidden errors: hidden_errors = np.dot(self.weights_hidden_out.T, output_errors) # update the weights: tmp = hidden_errors * output_vector_hidden * (1.0 - output_vector_hidden) self.weights_in_hidden += self.learning_rate * np.dot( tmp, input_vector.T)
def train(self, input_vector, target_vector): """ input_vector and target_vector can be tuple, list or ndarray """ # make sure that the vectors have the right shap input_vector = np.array(input_vector) input_vector = input_vector.reshape(input_vector.size, 1) if self.bias: # adding bias node to the end of the input_vector input_vector = np.concatenate( (input_vector, [[self.bias]]) ) target_vector = np.array(target_vector).reshape(target_vector.size, 1) output_vector_hidden = activation_function(self.weights_in_hidden @ input_vector) if self.bias: output_vector_hidden = np.concatenate( (output_vector_hidden, [[self.bias]]) ) output_vector_network = activation_function(self.weights_hidden_out @ output_vector_hidden) output_error = target_vector - output_vector_network # update the weights: tmp = output_error * output_vector_network * (1.0 - output_vector_network) self.weights_hidden_out += self.learning_rate * (tmp @ output_vector_hidden.T) # calculate hidden errors: hidden_errors = self.weights_hidden_out.T @ output_error # update the weights: tmp = hidden_errors * output_vector_hidden * (1.0 - output_vector_hidden) if self.bias: x = (tmp @input_vector.T)[:-1,:] # last row cut off, else: x = tmp @ input_vector.T self.weights_in_hidden += self.learning_rate * x
def train_single(self, input_vector, target_vector): """ input_vector and target_vector can be tuple, list or ndarray """ if self.bias: # adding bias node to the end of the input_vector input_vector = np.concatenate( (input_vector, [self.bias]) ) input_vector = np.array(input_vector, ndmin=2).T target_vector = np.array(target_vector, ndmin=2).T output_vector1 = np.dot(self.wih, input_vector) output_vector_hidden = activation_function(output_vector1) if self.bias: output_vector_hidden = np.concatenate( (output_vector_hidden, [[self.bias]]) ) output_vector2 = np.dot(self.who, output_vector_hidden) output_vector_network = activation_function(output_vector2) output_errors = target_vector - output_vector_network # update the weights: tmp = output_errors * output_vector_network * (1.0 - output_vector_network) tmp = self.learning_rate * np.dot(tmp, output_vector_hidden.T) self.who += tmp # calculate hidden errors: hidden_errors = np.dot(self.who.T, output_errors) # update the weights: tmp = hidden_errors * output_vector_hidden * (1.0 - output_vector_hidden) if self.bias: x = np.dot(tmp, input_vector.T)[:-1,:] else: x = np.dot(tmp, input_vector.T) self.wih += self.learning_rate * x
def train_single(self, input_vector, target_vector): """ input_vector and target_vector can be tuple, list or ndarray """ output_vectors = [] input_vector = np.array(input_vector, ndmin=2).T target_vector = np.array(target_vector, ndmin=2).T output_vector1 = np.dot(self.wih, input_vector) output_hidden = activation_function(output_vector1) output_vector2 = np.dot(self.who, output_hidden) output_network = activation_function(output_vector2) output_errors = target_vector - output_network # update the weights: tmp = output_errors * output_network * \ (1.0 - output_network) tmp = self.learning_rate * np.dot(tmp, output_hidden.T) self.who += tmp # calculate hidden errors: hidden_errors = np.dot(self.who.T, output_errors) # update the weights: tmp = hidden_errors * output_hidden * (1.0 - output_hidden) self.wih += self.learning_rate * np.dot(tmp, input_vector.T)
def train(self, input_vector, target_vector): """ input_vector and target_vector can be tuple, list or ndarray. """ bias_node = 1 if self.bias else 0 if self.bias: # adding bias node to the end of the inpuy_vector input_vector = np.concatenate((input_vector, [self.bias])) input_vector = np.array(input_vector, ndmin=2).T target_vector = np.array(target_vector, ndmin=2).T output_vector1 = np.dot(self.weights_in_hidden, input_vector) output_vector_hidden = activation_function(output_vector1) if self.bias: output_vector_hidden = np.concatenate( (output_vector_hidden, [[self.bias]])) output_vector2 = np.dot(self.weights_hidden_out, output_vector_hidden) output_vector_network = activation_function(output_vector2) output_errors = target_vector - output_vector_network # update the weights: tmp = output_errors * output_vector_network * (1.0 - output_vector_network) tmp = self.learning_rate * np.dot(tmp, output_vector_hidden.T) self.weights_hidden_out += tmp # calculate hidden errors: hidden_errors = np.dot(self.weights_hidden_out.T, output_errors) # update the weights: tmp = hidden_errors * output_vector_hidden * (1.0 - output_vector_hidden) if self.bias: x = np.dot( tmp, input_vector.T)[:-1, :] # ???? last element cut off, ??? else: x = np.dot(tmp, input_vector.T) self.weights_in_hidden += self.learning_rate * x
def train(self, input_vector, target_vector): """ Backpropagation @param input_vector: Input vector @param target_vector: Target vector """ input_vector = np.array(input_vector, ndmin=2).T target_vector = np.array(target_vector, ndmin=2).T output_vector1 = np.dot(self.weights_hidden_layer, input_vector) output_hidden = activation_function(output_vector1) output_vector2 = np.dot(self.weights_output_layer, output_hidden) output_network = activation_function(output_vector2) output_errors = target_vector - output_network tmp = output_errors * output_network * (1.0 - output_network) tmp = self.learning_rate * np.dot(tmp, output_hidden.T) self.weights_output_layer += tmp hidden_errors = np.dot(self.weights_output_layer.T, output_errors) tmp = hidden_errors * output_hidden * (1.0 - output_hidden) self.weights_hidden_layer += self.learning_rate * np.dot( tmp, input_vector.T)
def run(self, input_vector): # Run network with an input vector input_vector input_vector = np.array(input_vector, ndmin=2).T output_vector = np.dot(self.weights_in_hidden, input_vector) output_vector = activation_function(output_vector) output_vector = np.dot(self.weights_hidden_out, output_vector) output_vector = activation_function(output_vector) return output_vector
def run(self, input_vector): # input_vector can be tuple, list or ndarray input_vector = np.array(input_vector, ndmin=2).T output_vector = np.dot(self.weights_in_hidden, input_vector) output_vector = activation_function(output_vector) output_vector = np.dot(self.weights_hidden_out, output_vector) output_vector = activation_function(output_vector) return output_vector
def run(self, input_vector): # turning the input vector into a column vector input_vector = np.array(input_vector, ndmin=2).T output_vector = np.dot(self.weights_in_hidden, input_vector) output_vector = activation_function(output_vector) output_vector = np.dot(self.weights_hidden_out, output_vector) output_vector = activation_function(output_vector) return output_vector
def run(self, inputVector): #Layer 1 inputVector = np.array(inputVector, ndmin=2).T outputVector = np.dot(self.hWeights, inputVector) outputVector = activation_function(outputVector) #Output Layer outputVector = np.dot(self.oWeights, outputVector) outputVector = activation_function(outputVector) return outputVector
def run(self, input_vector): # input_vector can be tuple, list or ndarray input_vector = np.array(input_vector, ndmin=2).T output_vector = np.dot(self.wih, input_vector) output_vector = activation_function(output_vector) output_vector = np.dot(self.who, output_vector) output_vector = activation_function(output_vector) return output_vector
def run(self, input_vector): """ Inference @param input_vector: input vector return: output vector """ input_vector = np.array(input_vector, ndmin=2).T output_vector = np.dot(self.weights_hidden_layer, input_vector) output_vector = activation_function(output_vector) output_vector = np.dot(self.weights_output_layer, output_vector) output_vector = activation_function(output_vector) return output_vector
def run(self, input_vector): """ running the network with an input vector input_vector. input_vector can be tuple, list or ndarray """ # turning the input vector into a column vector input_vector = np.array(input_vector, ndmin=2).T output_vector = np.dot(self.weights_in_hidden, input_vector) output_vector = activation_function(output_vector) output_vector = np.dot(self.weights_hidden_out, output_vector) output_vector = activation_function(output_vector) return output_vector
def train(self, input_vector, target_vector): input_vector = np.array(input_vector, ndmin=2).T res_vectors = [input_vector] for k in range(self.no_of_layers - 1): in_vector = res_vectors[-1] if self.bias: in_vector = np.concatenate((in_vector, [[self.bias]])) res_vectors[-1] = in_vector x = np.dot(self.weights_matrices[k], in_vector) out_vector = activation_function(x) res_vectors.append(out_vector) target_vector = np.array(target_vector, ndmin=2).T output_errors = target_vector - out_vector for k in range(self.no_of_layers - 1, 0, -1): out_vector = res_vectors[k] in_vector = res_vectors[k - 1] if self.bias and not k == (self.no_of_layers - 1): out_vector = out_vector[:-1, :].copy() tmp = (output_errors * out_vector * (1.0 - out_vector) ) # sigma'(x) = sigma(x) (1 - sigma(x)) tmp = np.dot(tmp, in_vector.T) self.weights_matrices[k - 1] += self.learning_rate * tmp output_errors = np.dot(self.weights_matrices[k - 1].T, output_errors) if self.bias: output_errors = output_errors[:-1, :]
def feed_forward(self, inputs): # Convert input to column vector inputs = np.array(inputs, ndmin=2).T # Compute hidden node activations hidden_values = np.dot(self.weights_to_hidden, inputs) hidden_values = np.add(hidden_values, self.biases_hidden) hidden_values = activation_function(hidden_values) #Computer output node activations output_values = np.dot(self.weights_to_output, hidden_values) output_values = np.add(output_values, self.biases_output) output_values = activation_function(output_values) return output_values
def run(self, input_vector): """ running the network with an input vector 'input_vector'. 'input_vector' can be tuple, list or ndarray """ # make sure that input_vector is a column vector: input_vector = np.array(input_vector) input_vector = input_vector.reshape(input_vector.size, 1) if self.bias: # adding bias node to the end of the inpuy_vector input_vector = np.concatenate( (input_vector, [[1]]) ) input4hidden = activation_function(self.weights_in_hidden @ input_vector) if self.bias: input4hidden = np.concatenate( (input4hidden, [[1]]) ) output_vector_network = activation_function(self.weights_hidden_out @ input4hidden) return output_vector_network
def run(self, input_vector): # input_vector can be tuple, list or ndarray if self.bias: # adding bias node to the end of the input_vector input_vector = np.concatenate( (input_vector, [self.bias]) ) input_vector = np.array(input_vector, ndmin=2).T output_vector = np.dot(self.wih, input_vector) output_vector = activation_function(output_vector) if self.bias: output_vector = np.concatenate( (output_vector, [[self.bias]]) ) output_vector = np.dot(self.who, output_vector) output_vector = activation_function(output_vector) return output_vector
def run(self, input_vector): # input_vector can be tuple, list or ndarray if self.bias: # adding bias node to the end of the inpuy_vector input_vector = np.concatenate( (input_vector, [1]) ) input_vector = np.array(input_vector, ndmin=2).T output_vector = np.dot(self.weights_in_hidden, input_vector) output_vector = activation_function(output_vector) if self.bias: output_vector = np.concatenate( (output_vector, [[1]]) ) output_vector = np.dot(self.weights_hidden_out, output_vector) output_vector = activation_function(output_vector) return output_vector
def run(self, input): # format from horizontal matrix to vertical matrix input = np.array(input, ndmin=2).T for i in range(1, len(self.weights)): output = np.dot(self.weights[i], input) input = activation_function(output) return input
def run(self, input_vector): output_vector = np.dot(self.weights, input_vector) output_vector_activation = activation_function(output_vector) if self.bias_node: output_vector_activation = np.concatenate( (output_vector_activation, [[1]])) return output_vector_activation
def run(self, input_vector): if self.bias: input_vector = np.concatenate((input_vector, [self.bias])) in_vector = np.array(input_vector, ndmin=2).T for k in range(self.no_of_layers - 1): x = np.dot(self.weights_matrices[k], in_vector) out_vector = activation_function(x) in_vector = out_vector if self.bias: in_vector = np.concatenate((in_vector, [[self.bias]])) return out_vector
def train(self, inputVector): mistakes = 0 rewards = np.array([0, 0, 0, 0, 0, 0]) gameBoard = np.array([0, 0, 0, 0, 0, 0]) for i in range(0, 6): inputVector = np.array([ gameBoard[0], gameBoard[1], gameBoard[2], gameBoard[3], gameBoard[4], gameBoard[5], random.randint(1, 9) ]) #inputVector= np.array(inputVector, ndmin=2).T; #self.hWeights = np.array(self.hWeights, ndmin=2).T; #self.oWeights = np.array(self.oWeights, ndmin=2).T; outputVector = np.dot(self.hWeights, inputVector) outputHiddenVector = activation_function(outputVector) outputVector1 = np.dot(self.oWeights, outputHiddenVector) outputNetworkVector = activation_function(outputVector1) #printGameboard(); outputFloats = np.apply_along_axis(outputMaker, axis=1, arr=outputNetworkVector) mistakes += insertNumber(inputVector[6], outputFloats, rewards) mistakes += judgeGame() print(rewards, " | ", mistakes) #Learn wwt = np.dot(rewards, outputNetworkVector) * (1.0 - outputNetworkVector) wwt = self.learnRate * np.dot(wwt, outputHiddenVector.T) print(wwt) self.oWeights += wwt wwt = np.dot(rewards, outputHiddenVector) * (1.0 - outputHiddenVector) self.hWeights += self.learnRate * np.dot(wwt, inputVector.T) gameBoard = np.array([0, 0, 0, 0, 0, 0])
def train(self, input_vector, target_vector): """ input_vector and target_vector can be tuple, list or ndarray """ no_of_layers = len(self.structure) input_vector = np.array(input_vector, ndmin=2).T layer_index = 0 # The output/input vectors of the various layers: res_vectors = [input_vector] while layer_index < no_of_layers - 1: in_vector = res_vectors[-1] if self.bias: # adding bias node to the end of the 'input'_vector in_vector = np.concatenate((in_vector, [[self.bias]])) res_vectors[-1] = in_vector x = np.dot(self.weights_matrices[layer_index], in_vector) out_vector = activation_function(x) # the output of one layer is the input of the next one: res_vectors.append(out_vector) layer_index += 1 layer_index = no_of_layers - 1 target_vector = np.array(target_vector, ndmin=2).T # The input vectors to the various layers output_errors = target_vector - out_vector while layer_index > 0: out_vector = res_vectors[layer_index] in_vector = res_vectors[layer_index - 1] if self.bias and not layer_index == (no_of_layers - 1): out_vector = out_vector[:-1, :].copy() tmp = output_errors * out_vector * (1.0 - out_vector) tmp = np.dot(tmp, in_vector.T) #if self.bias: # tmp = tmp[:-1,:] self.weights_matrices[layer_index - 1] += self.learning_rate * tmp output_errors = np.dot(self.weights_matrices[layer_index - 1].T, output_errors) if self.bias: output_errors = output_errors[:-1, :] layer_index -= 1
def run(self, input_vector): # input_vector can be tuple, list or ndarray no_of_layers = len(self.structure) if self.bias: # adding bias node to the end of the inpuy_vector input_vector = np.concatenate((input_vector, [self.bias])) in_vector = np.array(input_vector, ndmin=2).T layer_index = 1 # The input vectors to the various layers while layer_index < no_of_layers: x = np.dot(self.weights_matrices[layer_index - 1], in_vector) out_vector = activation_function(x) # input vector for next layer in_vector = out_vector if self.bias: in_vector = np.concatenate((in_vector, [[self.bias]])) layer_index += 1 return out_vector