Пример #1
0
    def forward_prop(self, X, add_noise=False, compute_loss=False, is_test=True):
        """
        Compute the forward propagation step that maps the input data matrix X
        into the output.
        """
        if not is_test or self.params.mu is None or self.params.sigma is None:
            self.mu = X.mean(axis=0)
            self.sigma = gnp.std(X, axis=0)
            self.X_hat = (X - self.mu) / (self.sigma + 1e-10)
            self.params.update_mean_std(self.mu, self.sigma)
        else:
            self.X_hat = (X - self.params.mu) / (self.params.sigma + 1e-10)
            self._res_mean = gnp.abs(self.X_hat.mean(axis=0)).max()
            self._res_std = gnp.abs((gnp.std(self.X_hat)) - 1).max()
            #self.mu = X.mean(axis=0)
            #self.sigma = gnp.sqrt(((X - self.mu)**2).mean(axis=0))
            #self.X_hat = (X - self.mu) / (self.sigma + 1e-10)

        self.Y = self.X_hat * self.params.gamma + self.params.beta
        return self.Y
Пример #2
0
    def computeStat(self):
        print 'Computing stats (mean and std)...'
        means=gp.zeros((self.numbatches, self.dim))
        variances=gp.zeros((self.numbatches, self.dim))
        i=0
        while True:
            batch=self.cache.getOneBatch()
            if batch==None:
                break
            means[i]=batch.mean(axis=0)
            variances[i]=gp.std(batch,axis=0)**2
            i+=1
        assert(i==self.numbatches)
        mean=means.mean(axis=0)
        std=gp.sqrt(variances.mean(axis=0)+gp.std(means,axis=0)**2)
        mean_std=std.mean()
        std+=(std==0.0)*mean_std
        self.reset()

        print 'Finish stats computing'
        return mean, std+1e-10
Пример #3
0
    def computeStat(self):
        print 'Computing stats (mean and std)...'
        means = gp.zeros((self.numbatches, self.dim))
        variances = gp.zeros((self.numbatches, self.dim))
        i = 0
        while True:
            batch = self.cache.getOneBatch()
            if batch == None:
                break
            means[i] = batch.mean(axis=0)
            variances[i] = gp.std(batch, axis=0)**2
            i += 1
        assert (i == self.numbatches)
        mean = means.mean(axis=0)
        std = gp.sqrt(variances.mean(axis=0) + gp.std(means, axis=0)**2)
        mean_std = std.mean()
        std += (std == 0.0) * mean_std
        self.reset()

        print 'Finish stats computing'
        return mean, std + 1e-10
Пример #4
0
    def forward_prop(self,
                     X,
                     add_noise=False,
                     compute_loss=False,
                     is_test=True):
        """
        Compute the forward propagation step that maps the input data matrix X
        into the output.
        """
        if not is_test or self.params.mu is None or self.params.sigma is None:
            self.mu = X.mean(axis=0)
            self.sigma = gnp.std(X, axis=0)
            self.X_hat = (X - self.mu) / (self.sigma + 1e-10)
            self.params.update_mean_std(self.mu, self.sigma)
        else:
            self.X_hat = (X - self.params.mu) / (self.params.sigma + 1e-10)
            self._res_mean = gnp.abs(self.X_hat.mean(axis=0)).max()
            self._res_std = gnp.abs((gnp.std(self.X_hat)) - 1).max()
            #self.mu = X.mean(axis=0)
            #self.sigma = gnp.sqrt(((X - self.mu)**2).mean(axis=0))
            #self.X_hat = (X - self.mu) / (self.sigma + 1e-10)

        self.Y = self.X_hat * self.params.gamma + self.params.beta
        return self.Y
Пример #5
0
 def setup_mean_std_stats(self, X):
     self.params.set_mean_std(X.mean(axis=0), gnp.std(X, axis=0))
Пример #6
0
 def relu_sigma_1_prime(x):
     b = 2 * gp.std(x, axis=1)[:, gp.newaxis]
     std_matrix = gp.dot(b, gp.ones((1, x.shape[1])))
     return (x > std_matrix) + (x < -std_matrix)
Пример #7
0
 def relu_sigma_1(x):
     b = 2 * gp.std(x, axis=1)[:, gp.newaxis]
     std_matrix = gp.dot(b, gp.ones((1, x.shape[1])))
     return ((x - std_matrix) > 0) * (x - std_matrix) + (
         (x + std_matrix) < 0) * (x + std_matrix)
Пример #8
0
 def threshold_mask_soft(x, k, dropout=None):
     b = k * gp.std(x, axis=1)[:, gp.newaxis]
     std_matrix = gp.dot(b, gp.ones((1, x.shape[1])))
     if dropout == None: return (x > std_matrix)
     return (x > std_matrix) * (gp.rand(x.shape) > (1 - dropout))
Пример #9
0
 def setup_mean_std_stats(self, X):
     self.params.set_mean_std(X.mean(axis=0), gnp.std(X, axis=0))