Ejemplo n.º 1
0
   def update_learner(self,example):
      self.layers[0][:] = example[0]

      # fprop
      for h in range(self.n_hidden_layers):
         mllin.product_matrix_vector(self.Ws[h],self.layers[h],self.layer_acts[h+1])
         self.layer_acts[h+1] += self.cs[h]
         if self.activation_function == 'sigmoid':
             mlnonlin.sigmoid(self.layer_acts[h+1],self.layers[h+1])
         elif self.activation_function == 'tanh':
             mlnonlin.tanh(self.layer_acts[h+1],self.layers[h+1])
         elif self.activation_function == 'reclin':
             mlnonlin.reclin(self.layer_acts[h+1],self.layers[h+1])
         else:
             raise ValueError('activation_function must be either \'sigmoid\', \'tanh\' or \'reclin\'')

      mllin.product_matrix_vector(self.U,self.layers[-1],self.output_act)
      self.output_act += self.d
      mlnonlin.softmax(self.output_act,self.output)

      self.doutput_act[:] = self.output
      self.doutput_act[example[1]] -= 1
      self.doutput_act *= self.learning_rate/(1.+self.decrease_constant*self.n_updates)

      self.dd[:] = self.doutput_act
      mllin.outer(self.doutput_act,self.layers[-1],self.dU)      
      mllin.product_matrix_vector(self.U.T,self.doutput_act,self.dlayers[-1])
      if self.activation_function == 'sigmoid':
          mlnonlin.dsigmoid(self.layers[-1],self.dlayers[-1],self.dlayer_acts[-1])
      elif self.activation_function == 'tanh':
          mlnonlin.dtanh(self.layers[-1],self.dlayers[-1],self.dlayer_acts[-1])
      elif self.activation_function == 'reclin':
          mlnonlin.dreclin(self.layers[-1],self.dlayers[-1],self.dlayer_acts[-1])
      else:
          raise ValueError('activation_function must be either \'sigmoid\', \'tanh\' or \'reclin\'')

      for h in range(self.n_hidden_layers-1,-1,-1):
         self.dcs[h][:] = self.dlayer_acts[h+1]
         mllin.outer(self.dlayer_acts[h+1],self.layers[h],self.dWs[h])
         mllin.product_matrix_vector(self.Ws[h].T,self.dlayer_acts[h+1],self.dlayers[h])
         if self.activation_function == 'sigmoid':
             mlnonlin.dsigmoid(self.layers[h],self.dlayers[h],self.dlayer_acts[h])
         elif self.activation_function == 'tanh':
             mlnonlin.dtanh(self.layers[h],self.dlayers[h],self.dlayer_acts[h])
         elif self.activation_function == 'reclin':
             mlnonlin.dreclin(self.layers[h],self.dlayers[h],self.dlayer_acts[h])
         else:
             raise ValueError('activation_function must be either \'sigmoid\', \'tanh\' or \'reclin\'')

      self.U -= self.dU
      self.d -= self.dd
      for h in range(self.n_hidden_layers-1,-1,-1):
         self.Ws[h] -= self.dWs[h]
         self.cs[h] -= self.dcs[h]

      self.n_updates += 1
Ejemplo n.º 2
0
   def fprop(self,input):
       """
       Computes the output given some input. Puts the result in ``self.output``
       """
       self.input[:] = input
       self.output_act[:] = self.d
       for k in range(self.n_k_means):
           if self.n_k_means_inputs == self.input_size:
               c = self.clusterings[k].compute_cluster(self.input)
           else:
               c = self.clusterings[k].compute_cluster(self.input[self.k_means_subset_inputs[k]])
           idx = c + k*self.n_clusters
           self.cluster_indices[k] = c
           
           mllin.product_matrix_vector(self.Ws[idx],self.input,self.layer_acts[k])
           self.layer_acts[k] += self.cs[idx]
           #mlnonlin.sigmoid(self.layer_acts[k],self.layers[k])
           if self.activation_function == 'sigmoid':
               mlnonlin.sigmoid(self.layer_acts[k],self.layers[k])
           elif self.activation_function == 'tanh':
               mlnonlin.tanh(self.layer_acts[k],self.layers[k])
           elif self.activation_function == 'reclin':
               mlnonlin.reclin(self.layer_acts[k],self.layers[k])
           else:
               raise ValueError('activation_function must be either \'sigmoid\', \'tanh\' or \'reclin\'')
       
           mllin.product_matrix_vector(self.Vs[idx],self.layers[k],self.output_acts[k])
           self.output_act += self.output_acts[k]
       mlnonlin.softmax(self.output_act,self.output)

       if self.autoencoder_regularization != 0:
           self.dae_input[:] = input
           self.rng.shuffle(self.input_idx)
           self.dae_input[self.input_idx[:int(self.autoencoder_missing_fraction*self.input_size)]] = 0
           self.dae_output_act[:] = self.dae_d
           for k in range(self.n_k_means):
               idx = self.cluster_indices[k] + k*self.n_clusters
               
               mllin.product_matrix_vector(self.Ws[idx],self.dae_input,self.dae_layer_acts[k])
               self.dae_layer_acts[k] += self.cs[idx]
               #mlnonlin.sigmoid(self.dae_layer_acts[k],self.dae_layers[k])
               if self.activation_function == 'sigmoid':
                   mlnonlin.sigmoid(self.dae_layer_acts[k],self.dae_layers[k])
               elif self.activation_function == 'tanh':
                   mlnonlin.tanh(self.dae_layer_acts[k],self.dae_layers[k])
               elif self.activation_function == 'reclin':
                   mlnonlin.reclin(self.dae_layer_acts[k],self.dae_layers[k])
               else:
                   raise ValueError('activation_function must be either \'sigmoid\', \'tanh\' or \'reclin\'')
           
               mllin.product_matrix_vector(self.Ws[idx].T,self.dae_layers[k],self.dae_output_acts[k])
               self.dae_output_act += self.dae_output_acts[k]
           self.dae_output[:] = self.dae_output_act
Ejemplo n.º 3
0
 def apply_activation(self, input_data, output):
     """
     Apply the activation function
     """
     if self.activation_function == "sigmoid":
         mlnonlin.sigmoid(input_data, output)
     elif self.activation_function == "tanh":
         mlnonlin.tanh(input_data, output)
     elif self.activation_function == "reclin":
         mlnonlin.reclin(input_data, output)
     elif self.activation_function == "softmax":
         m = input_data.max(axis=1)
         output[:] = np.exp(input_data - m.reshape((-1, 1)))
         output[:] /= output.sum(axis=1).reshape((-1, 1))
     else:
         raise ValueError("activation_function must be either 'sigmoid', 'tanh' or 'reclin'")
Ejemplo n.º 4
0
    def apply_activation(self, input_data, output):
	"""
        Apply the activation function
        """
        if self.activation_function == 'sigmoid':
            mlnonlin.sigmoid(input_data,output)
        elif self.activation_function == 'tanh':
            mlnonlin.tanh(input_data,output)
        elif self.activation_function == 'reclin':
            mlnonlin.reclin(input_data,output)
        elif self.activation_function == 'softmax':
            m = input_data.max(axis=1)
            output[:] = np.exp(input_data-m.reshape((-1,1)))
            output[:] /= output.sum(axis=1).reshape((-1,1))
        else:
            raise ValueError('activation_function must be either \'sigmoid\', \'tanh\' or \'reclin\'')
Ejemplo n.º 5
0
   def use_learner(self,example):
      output = np.zeros((self.n_classes))
      self.layers[0][:] = example[0]

      # fprop
      for h in range(self.n_hidden_layers):
         mllin.product_matrix_vector(self.Ws[h],self.layers[h],self.layer_acts[h+1])
         self.layer_acts[h+1] += self.cs[h]
         if self.activation_function == 'sigmoid':
             mlnonlin.sigmoid(self.layer_acts[h+1],self.layers[h+1])
         elif self.activation_function == 'tanh':
             mlnonlin.tanh(self.layer_acts[h+1],self.layers[h+1])
         elif self.activation_function == 'reclin':
             mlnonlin.reclin(self.layer_acts[h+1],self.layers[h+1])
         else:
             raise ValueError('activation_function must be either \'sigmoid\', \'tanh\' or \'reclin\'')

      mllin.product_matrix_vector(self.U,self.layers[-1],self.output_act)
      self.output_act += self.d
      mlnonlin.softmax(self.output_act,output)

      return [output.argmax(),output]