コード例 #1
0
    def get_cost_updates(self, corruption_level, learning_rate):
        """ This function computes the cost and the updates for one trainng
            step of the dA 
            
            Aguments:
                corruption_level: the degree to which teh input should be 
                    corrupted. currently the data is not corrupted.
                
                learning_rate : the degree to which the weights should be
                    altered after a step of gradient descent.
                    
            Returns:
                The errors and the updates to the parameters.
            
        """        
        #tilde_x = self.x
        tilde_x = self.get_corrupted_input(self.x, corruption_level)        
        h = self.get_hidden_values(tilde_x)
        z = self.get_reconstructed_input(h)
        
        # in order to calculate L below, i could not use a sparse matrix
        # for self.y. as a result, i had to convert it to a dense matrix
        # and operate on this new matrix. 
        # TODO: try to make it work with sparse matrix?
        if type(self.y) == T.TensorVariable:
            y_mat = self.y
        else:        
            y_mat = sparse.dense_from_sparse(self.y)
        
        # this is the cross-entropy error 
        #L = -T.sum(self.y * T.log(z) + (one_mat - self.y) * T.log(one_mat - z), axis=1)
        #y_prob = y_mat * T.log(z)
        #y_not_prob = (1.0 - y_mat)
        #z_not_prob = (1.0 - z)        
        #L = -T.sum(y_prob + y_not_prob * T.log(z_not_prob), axis=1)
        L = -T.sum(y_mat * T.log(z) + (1.0 - y_mat) * T.log(1.0 - z), axis=1)
        
        # TODO: add L1 or L2 penalization here?
        cost = T.mean(L)

        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)

        # generate the list of updates
        updates = []
        for param, gparam in zip(self.params, gparams):
            updates.append((param, param - learning_rate * gparam))

        return (cost, updates)
コード例 #2
0
 def get_reconstruction_errors(self, data):
     h = self.get_hidden_values(data)
     z = self.get_reconstructed_input(h)
     
     error = z - sparse.dense_from_sparse(data)
     return error
コード例 #3
0
ファイル: myTheta.py プロジェクト: Veldhoen/thesis
 def unSparse(self):
   for name in self.keys():
     self[name] = sparse.dense_from_sparse(self[name])