Esempio n. 1
0
 def gradients(self,params=None):
     # args contains the training data
     x,y=self.args;
     self.update_params(params);
     sigmoid_activation = pyvision.softmax(self.W,self.b,x);        
     e = [ self.compute_gradients(a,c,b) for a, c,b in izip(sigmoid_activation,y,x)]                         
     mean1=np.mean(e,axis=0);        
     mean1=mean1.T.flatten();
     return mean1;
Esempio n. 2
0
 def gradients(self,params=None):
     # args contains the training data
     x,y=self.args;
     self.update_params(params);
     sigmoid_activation = pyvision.softmax(self.W,self.b,x);        
     e = [ self.compute_gradients(a,c,b) for a, c,b in izip(sigmoid_activation,y,x)]                         
     mean1=np.mean(e,axis=0);        
     mean1=mean1.T.flatten();
     return mean1;
Esempio n. 3
0
 def negative_log_likelihood(self,params):
     # args contains the training data
     x,y=self.args;
              
     self.update_params(params);
     sigmoid_activation = pyvision.softmax(self.W,self.b,x);
     index=[range(0,np.shape(sigmoid_activation)[0]),y];
     p=sigmoid_activation[index]
     l=-np.mean(np.log(p));
     return l;
Esempio n. 4
0
 def negative_log_likelihood(self,params=None):
     # args contains the training data
     x,y=self.args;
              
     self.update_params(params);
     sigmoid_activation = pyvision.softmax(self.W,self.b,x);
     index=[range(0,np.shape(sigmoid_activation)[0]),y];
     p=sigmoid_activation[index]
     l=-np.mean(np.log(p));
     
     if self.Regularization==2:
         l=l+0.5*self.eta*np.sum(self.W**2);
     if self.Regularization==1:
         l=l+self.eta*np.sum(np.abs(self.W));
     
     
     return l;
 def negative_log_likelihood(self,params=None):
     # args contains the training data
     x,y=self.args;
              
     self.update_params(params);
     sigmoid_activation = pyvision.softmax(self.W,self.b,x);
     index=[range(0,np.shape(sigmoid_activation)[0]),y];
     p=sigmoid_activation[index]
     l=-np.mean(np.log(p));
     
     if self.Regularization==2:
         l=l+0.5*self.eta*np.sum(self.W**2);
     if self.Regularization==1:
         l=l+self.eta*np.sum(np.abs(self.W));
     
     
     return l;
Esempio n. 6
0
 def predict(self,x):  
     y=pyvision.softmax(self.W,self.b,x);
     self.output=y;
     return y;
Esempio n. 7
0
 def predict(self,x):  
     y=pyvision.softmax(self.W,self.b,x);
     self.output=y;
     return y;