def update_learner(self, example): self.input[self.input_order] = example # fprop np.multiply(self.input, self.W, self.input_times_W) np.add.accumulate(self.input_times_W[:, :-1], axis=1, out=self.acc_input_times_W[:, 1:]) self.acc_input_times_W[:, 0] = 0 self.acc_input_times_W += self.c[:, np.newaxis] mlnonlin.sigmoid(self.acc_input_times_W, self.hid) if self.untied_weights: np.multiply(self.hid, self.V, self.Whid) else: np.multiply(self.hid, self.W, self.Whid) mllin.sum_columns(self.Whid, self.recact) self.recact += self.b mlnonlin.sigmoid(self.recact, self.rec) # bprop np.subtract(self.rec, self.input, self.drec) self.drec *= self.alpha self.db[:] = self.drec if self.untied_weights: np.multiply(self.drec, self.hid, self.dV) np.multiply(self.drec, self.V, self.dhid) self.dW[:] = 0 else: np.multiply(self.drec, self.hid, self.dW) np.multiply(self.drec, self.W, self.dhid) mlnonlin.dsigmoid(self.hid, self.dhid, self.dacc_input_times_W) mllin.sum_rows(self.dacc_input_times_W, self.dc) np.add.accumulate(self.dacc_input_times_W[:, :0:-1], axis=1, out=self.dWenc[:, -2::-1]) self.dWenc[:, -1] = 0 self.dWenc *= self.input self.dW += self.dWenc self.dW *= self.learning_rate / ( 1. + self.decrease_constant * self.n_updates) self.db *= self.learning_rate / ( 1. + self.decrease_constant * self.n_updates) self.dc *= self.learning_rate / ( 1. + self.decrease_constant * self.n_updates) self.W -= self.dW self.b -= self.db self.c -= self.dc if self.untied_weights: self.dV *= self.learning_rate / ( 1. + self.decrease_constant * self.n_updates) self.V -= self.dV self.n_updates += 1
def fprop(self): np.multiply(self.vec_input,self.mat_W,self.mat_inp_times_W) np.add.accumulate(self.mat_inp_times_W[:,:-1],axis=1,out=self.mat_acc_inp_times_W[:,1:]) self.mat_acc_inp_times_W[:,0] = 0 self.mat_acc_inp_times_W += self.vec_bias_h[:,np.newaxis] # The column's are the hidden_act_i mlnonlin.sigmoid(self.mat_acc_inp_times_W,self.mat_h) # The column's are the hidden_layer_i np.multiply(self.mat_h,self.mat_V,self.mat_Vhid) mllin.sum_columns(self.mat_Vhid,self.vec_recact) self.vec_recact += self.vec_bias_inp if self.fPoisson: self.vec_recProb = np.exp(self.vec_recact) else: mlnonlin.sigmoid(self.vec_recact,self.vec_recProb)
def update_learner(self,example): self.input[self.input_order] = example # fprop np.multiply(self.input,self.W,self.input_times_W) np.add.accumulate(self.input_times_W[:,:-1],axis=1,out=self.acc_input_times_W[:,1:]) self.acc_input_times_W[:,0] = 0 self.acc_input_times_W += self.c[:,np.newaxis] mlnonlin.sigmoid(self.acc_input_times_W,self.hid) if self.untied_weights: np.multiply(self.hid,self.V,self.Whid) else: np.multiply(self.hid,self.W,self.Whid) mllin.sum_columns(self.Whid,self.recact) self.recact += self.b mlnonlin.sigmoid(self.recact,self.rec) # bprop np.subtract(self.rec,self.input,self.drec) self.drec *= self.alpha self.db[:] = self.drec if self.untied_weights: np.multiply(self.drec,self.hid,self.dV) np.multiply(self.drec,self.V,self.dhid) self.dW[:] = 0 else: np.multiply(self.drec,self.hid,self.dW) np.multiply(self.drec,self.W,self.dhid) mlnonlin.dsigmoid(self.hid,self.dhid,self.dacc_input_times_W) mllin.sum_rows(self.dacc_input_times_W,self.dc) np.add.accumulate(self.dacc_input_times_W[:,:0:-1],axis=1,out=self.dWenc[:,-2::-1]) self.dWenc[:,-1] = 0 self.dWenc *= self.input self.dW += self.dWenc self.dW *= self.learning_rate/(1.+self.decrease_constant*self.n_updates) self.db *= self.learning_rate/(1.+self.decrease_constant*self.n_updates) self.dc *= self.learning_rate/(1.+self.decrease_constant*self.n_updates) self.W -= self.dW self.b -= self.db self.c -= self.dc if self.untied_weights: self.dV *= self.learning_rate/(1.+self.decrease_constant*self.n_updates) self.V -= self.dV self.n_updates += 1
def fprop(self): np.multiply(self.vec_input, self.mat_W, self.mat_inp_times_W) np.add.accumulate(self.mat_inp_times_W[:, :-1], axis=1, out=self.mat_acc_inp_times_W[:, 1:]) self.mat_acc_inp_times_W[:, 0] = 0 self.mat_acc_inp_times_W += self.vec_bias_h[:, np. newaxis] # The column's are the hidden_act_i mlnonlin.sigmoid(self.mat_acc_inp_times_W, self.mat_h) # The column's are the hidden_layer_i np.multiply(self.mat_h, self.mat_V, self.mat_Vhid) mllin.sum_columns(self.mat_Vhid, self.vec_recact) self.vec_recact += self.vec_bias_inp if self.fPoisson: self.vec_recProb = np.exp(self.vec_recact) else: mlnonlin.sigmoid(self.vec_recact, self.vec_recProb)
def use_learner(self,example): self.input[self.input_order] = example output = np.zeros((self.input_size)) recact = np.zeros((self.input_size)) # fprop np.multiply(self.input,self.W,self.input_times_W) np.add.accumulate(self.input_times_W[:,:-1],axis=1,out=self.acc_input_times_W[:,1:]) self.acc_input_times_W[:,0] = 0 self.acc_input_times_W += self.c[:,np.newaxis] mlnonlin.sigmoid(self.acc_input_times_W,self.hid) if self.untied_weights: np.multiply(self.hid,self.V,self.Whid) else: np.multiply(self.hid,self.W,self.Whid) mllin.sum_columns(self.Whid,recact) recact += self.b mlnonlin.sigmoid(recact,output) return [output,recact]
def use_learner(self, example): self.input[self.input_order] = example output = np.zeros((self.input_size)) recact = np.zeros((self.input_size)) # fprop np.multiply(self.input, self.W, self.input_times_W) np.add.accumulate(self.input_times_W[:, :-1], axis=1, out=self.acc_input_times_W[:, 1:]) self.acc_input_times_W[:, 0] = 0 self.acc_input_times_W += self.c[:, np.newaxis] mlnonlin.sigmoid(self.acc_input_times_W, self.hid) if self.untied_weights: np.multiply(self.hid, self.V, self.Whid) else: np.multiply(self.hid, self.W, self.Whid) mllin.sum_columns(self.Whid, recact) recact += self.b mlnonlin.sigmoid(recact, output) return [output, recact]