Exemple #1
0
 def forward(self, inputs, params):
     outputs, running_mean, running_variance = layers.batchnorm(
         inputs, params[self.gamma], params[self.beta],
         params['__training_in_progress__'], self.epsilon, self.momentum,
         self.running_mean, self.running_variance)
     self.running_mean, self.running_variance = running_mean, running_variance
     return outputs
Exemple #2
0
 def forward(self, inputs, params):
     outputs, running_mean, running_variance = layers.batchnorm(
         inputs, params[self.gamma], params[self.beta],
         params['__training_in_progress__'], self.epsilon, self.momentum,
         self.running_mean, self.running_variance)
     self.running_mean, self.running_variance = running_mean, running_variance
     return outputs
Exemple #3
0
    def forward(self, inputs, params):
        N, C, W, H = inputs.shape
        inputs = transpose(inputs, (0, 2, 3, 1))
        inputs = np.reshape(inputs, (N * W * H, C))

        outputs, running_mean, running_variance = layers.batchnorm(
            inputs, params[self.gamma], params[self.beta],
            params['__training_in_progress__'], self.epsilon, self.momentum,
            self.running_mean, self.running_variance)
        self.running_mean, self.running_variance = running_mean, running_variance
        outputs = np.reshape(outputs, (N, W, H, C))
        outputs = transpose(outputs, (0, 3, 1, 2))
        return outputs
Exemple #4
0
    def forward(self, inputs, params):
        N, C, W, H = inputs.shape
        inputs = transpose(inputs, (0, 2, 3, 1))
        inputs = np.reshape(inputs, (N * W * H, C))

        outputs, running_mean, running_variance = layers.batchnorm(
            inputs, params[self.gamma], params[self.beta],
            params['__training_in_progress__'], self.epsilon, self.momentum,
            self.running_mean, self.running_variance)
        self.running_mean, self.running_variance = running_mean, running_variance
        outputs = np.reshape(outputs, (N, W, H, C))
        outputs = transpose(outputs, (0, 3, 1, 2))
        return outputs
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
Exemple #6
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
Exemple #7
0
 def check_beta(b):
     y, _, _ = layers.batchnorm(x, gamma, b)
     return layers.l2_loss(y, fake_y)
Exemple #8
0
 def check_gamma(g):
     y, _, _ = layers.batchnorm(x, g, beta)
     return layers.l2_loss(y, fake_y)
Exemple #9
0
 def check_beta(b):
     y, _, _ = layers.batchnorm(x, gamma, b)
     return layers.softmax_loss(y, fake_y)
Exemple #10
0
 def check_gamma(g):
     y, _, _ = layers.batchnorm(x, g, beta)
     return layers.softmax_loss(y, fake_y)