def train_single(self, input_vector, target_vector): """ input_vector and target_vector can be tuple, list or ndarray """ input_vector = np.array(input_vector, ndmin=2).T target_vector = np.array(target_vector, ndmin=2).T output_vector1 = np.dot(self.weights_in_hidden, input_vector) output_hidden = Activation.reLU(output_vector1) output_vector2 = np.dot(self.weights_hidden_output, output_hidden) #output_network = Activation.sigmoid(output_vector2) output_network = Activation.reLU(output_vector2) output_errors = target_vector - output_network # update the weights: #tmp = output_errors * Derivative.sigmoid(output_network) tmp = output_errors * Derivative.reLU(output_network) tmp = self.learning_rate * np.dot(tmp, output_hidden.T) self.weights_hidden_output += tmp # calculate hidden errors: hidden_errors = np.dot(self.weights_hidden_output.T, output_errors) # ---------------------------------------------------------------------- # update the weights: tmp = hidden_errors * Derivative.reLU(output_hidden) # ----------------------------------------------------------------------- self.weights_in_hidden += self.learning_rate * np.dot( tmp, input_vector.T)
def predict(self, input_vector): # input_vector can be tuple, list or ndarray input_vector = np.array(input_vector, ndmin=2).T # 1st layer output_vector = np.dot(self.weights_in_hidden, input_vector) output_vector = Activation.reLU(output_vector) # 2nd layer output_vector = np.dot(self.weights_hidden_output, output_vector) #output_vector = Activation.sigmoid(output_vector) output_vector = Activation.reLU(output_vector) return output_vector
def train_single(self, input_vector, target_vector): """ input_vector and target_vector can be tuple, list or ndarray """ input_vector = np.array(input_vector, ndmin=2).T target_vector = np.array(target_vector, ndmin=2).T output_vector1 = np.dot(self.weights_in_hidden, input_vector) output_hidden = Activation.reLU(output_vector1) output_hidden *= Dropout.get_mask(output_vector1) output_vector2 = np.dot(self.weights_hidden_output, output_hidden) output_network = Activation.reLU(output_vector2) output_network *= Dropout.get_mask(output_vector2) output_errors = target_vector - output_network # update the weights: #tmp = output_errors * Derivative.sigmoid(output_network) try: tmp = output_errors * Derivative.reLU(output_network) tmp = self.learning_rate * np.dot(tmp, output_hidden.T) self.weights_hidden_output += tmp except: print("Something went wrong when writing to the file") # calculate hidden errors: try: hidden_errors = np.dot(self.weights_hidden_output.T, output_errors) except: print("Something went wrong when writing to the file") # ---------------------------------------------------------------------- # update the weights: tmp1 = Derivative.reLU(output_hidden) tmp = hidden_errors * tmp1 # ----------------------------------------------------------------------- self.weights_in_hidden += self.learning_rate * np.dot( tmp, input_vector.T)