def train(self, X, y, num_passes=1000, lr=0.01, regularization=0.01, to_print=True): # add gates m_Gate = MultiplyGate() a_Gate = AddGate() # activate nonlinear layer if self.activation_func == 'sigmoid': layer = Sigmoid() elif self.activation_func == 'tanh': layer = Tanh() # activate output layer if self.output_func == 'softmax': output = Softmax() elif self.output_func == 'lse': output = LSE() # for each epoch for epoch in range(num_passes): # Forward propagation input = X forward = [(None, None, input)] # for each layer except the last one for i in range(len(self.W)): mul = m_Gate.forward(self.W[i], input) add = a_Gate.forward(mul, self.b[i]) input = layer.forward(add) forward.append((mul, add, input)) # last output of forward propagation is an array: num_samples * num_neurons_last_layer # Back propagation # derivative of cumulative error from output layer dfunc = output.calc_diff(forward[len(forward) - 1][2], y) for i in range(len(forward) - 1, 0, -1): # 1 layer consists of mul, add and layer dadd = layer.backward(forward[i][1], dfunc) # dLdb and dLdmul are functions of dLdadd db, dmul = a_Gate.backward(forward[i][0], self.b[i - 1], dadd) dW, dfunc = m_Gate.backward(self.W[i - 1], forward[i - 1][2], dmul) # Add regularization terms (b1 and b2 don't have regularization terms) dW += regularization * self.W[i - 1] # Gradient descent parameter update self.b[i - 1] += -lr * db self.W[i - 1] += -lr * dW if to_print and epoch % 100 == 0: print("Loss after iteration %i: %f" % (epoch, self.calculate_loss(X, y)))
def bptt(self, x, y): assert len(x) == len(y) output = Softmax() layers = self.forward_propagation(x) dU = np.zeros(self.U.shape) dV = np.zeros(self.V.shape) dW = np.zeros(self.W.shape) T = len(layers) prev_s_t = np.zeros(self.hidden_dim) diff_s = np.zeros(self.hidden_dim) delta = np.zeros(self.hidden_dim) for k in range(0, T): t = T - k - 1 input = np.zeros(self.word_dim) input[x[t]] = 1 if t == 0: prev_s_t = np.zeros(self.hidden_dim) else: prev_s_t = layers[t - 1].s dmulv = output.diff(layers[t].mulv, y[t]) delta, dU_t, dW_t, dV_t = layers[t].backward1( input, prev_s_t, self.U, self.W, self.V, delta, dmulv) dV += dV_t dU += dU_t dW += dW_t return (dU, dW, dV)
def bptt(self, x, y): assert len(x) == len(y) output = Softmax() layers = self.forward_propagation(x) dU = np.zeros(self.U.shape) dV = np.zeros(self.V.shape) dW = np.zeros(self.W.shape) T = len(layers) prev_s_t = np.zeros(self.hidden_dim) diff_s = np.zeros(self.hidden_dim) for t in range(0, T): dmulv = output.diff(layers[t].mulv, y[t]) input = np.zeros(self.word_dim) input[x[t]] = 1 dprev_s, dU_t, dW_t, dV_t = layers[t].backward(input, prev_s_t, self.U, self.W, self.V, diff_s, dmulv) prev_s_t = layers[t].s dmulv = np.zeros(self.word_dim) for i in range(t-1, max(-1, t-self.bptt_truncate-1), -1): input = np.zeros(self.word_dim) input[x[i]] = 1 prev_s_i = np.zeros(self.hidden_dim) if i == 0 else layers[i-1].s dprev_s, dU_i, dW_i, dV_i = layers[i].backward(input, prev_s_i, self.U, self.W, self.V, dprev_s, dmulv) dU_t += dU_i dW_t += dW_i dV += dV_t dU += dU_t dW += dW_t return (dU, dW, dV)
def calculate_loss(self, X, y): m_Gate = MultiplyGate() a_Gate = AddGate() if self.activation_func == 'sigmoid': layer = Sigmoid() elif self.activation_func == 'tanh': layer = Tanh() if self.output_func == 'softmax': output = Softmax() elif self.output_func == 'lse': output = LSE() input = X # loop through each layer for i in range(len(self.W)): # X*W mul = m_Gate.forward(self.W[i], input) # X*W + b add = a_Gate.forward(mul, self.b[i]) # nonlinear activation input = layer.forward(add) return output.eval_error(input, y)
def predict(self, X): m_Gate = MultiplyGate() a_Gate = AddGate() if self.activation_func == 'sigmoid': layer = Sigmoid() elif self.activation_func == 'tanh': layer = Tanh() if self.output_func == 'softmax': output = Softmax() elif self.output_func == 'lse': output = LSE() input = X for i in range(len(self.W)): mul = m_Gate.forward(self.W[i], input) add = a_Gate.forward(mul, self.b[i]) input = layer.forward(add) if self.output_func == 'softmax': probs = output.eval(input) return np.argmax(probs, axis=1) elif self.output_func == 'lse': return (np.greater(input, 0.5)) * 1
def calculate_loss(self, x, y): output = Softmax() layers = self.forward_propagation(x) loss = 0.0 for i, layer in enumerate(layers): loss += output.loss(layer.mulv, y[i]) return loss / float(len(y))
def calculate_loss(self, X, y): mulGate = MultiplyGate() addGate = AddGate() layer = Tanh() softmaxOutput = Softmax() input = X for i in range(len(self.W)): mul = mulGate.forward(self.W[i], input) add = addGate.forward(mul, self.b[i]) input = layer.forward(add) return softmaxOutput.loss(input, y)
def predict(self, X): mulGate = MultiplyGate() addGate = AddGate() layer = Tanh() softmaxOutput = Softmax() input = X for i in range(len(self.W)): mul = mulGate.forward(self.W[i], input) add = addGate.forward(mul, self.b[i]) input = layer.forward(add) probs = softmaxOutput.predict(input) return np.argmax(probs, axis=1)
def train(self, X, y, num_passes=20000, epsilon=0.01, reg_lambda=0.01, print_loss=False): mulGate = MultiplyGate() addGate = AddGate() layer = Tanh() softmaxOutput = Softmax() for epoch in range(num_passes): # Forward propagation input = X forward = [(None, None, input)] for i in range(len(self.W)): mul = mulGate.forward(self.W[i], input) add = addGate.forward(mul, self.b[i]) input = layer.forward(add) forward.append((mul, add, input)) # Back propagation dtanh = softmaxOutput.diff(forward[len(forward) - 1][2], y) for i in range(len(forward) - 1, 0, -1): dadd = layer.backward(forward[i][1], dtanh) db, dmul = addGate.backward(forward[i][0], self.b[i - 1], dadd) dW, dtanh = mulGate.backward(self.W[i - 1], forward[i - 1][2], dmul) # Add regularization terms (b1 and b2 don't have regularization terms) dW += reg_lambda * self.W[i - 1] # Gradient descent parameter update self.b[i - 1] += -epsilon * db self.W[i - 1] += -epsilon * dW # write log nn_log_instance.w = self.W nn_log_instance.b = self.b nn_log_instance.forward = forward nn_log_instance.write_log() if print_loss and epoch % 1000 == 0: print("Loss after iteration %i: %f" % (epoch, self.calculate_loss(X, y)))
def predict(self, x): output = Softmax() layers = self.forward_propagation(x) return [np.argmax(output.predict(layer.mulv)) for layer in layers]
def predict(self, x): """Calculate the predictions given data x.""" output = Softmax() layers = self.forward_propagation(x) return [np.argmax(output.predict(layer.mulv)) for layer in layers]