示例#1
0
 def forward(self, inputs, params):
     outputs, running_mean, running_variance = layers.batchnorm(
         inputs, params[self.gamma], params[self.beta],
         params['__training_in_progress__'], self.epsilon, self.momentum,
         self.running_mean, self.running_variance)
     self.running_mean, self.running_variance = running_mean, running_variance
     return outputs
示例#2
0
 def forward(self, inputs, params):
     outputs, running_mean, running_variance = layers.batchnorm(
         inputs, params[self.gamma], params[self.beta],
         params['__training_in_progress__'], self.epsilon, self.momentum,
         self.running_mean, self.running_variance)
     self.running_mean, self.running_variance = running_mean, running_variance
     return outputs
示例#3
0
    def forward(self, inputs, params):
        N, C, W, H = inputs.shape
        inputs = transpose(inputs, (0, 2, 3, 1))
        inputs = np.reshape(inputs, (N * W * H, C))

        outputs, running_mean, running_variance = layers.batchnorm(
            inputs, params[self.gamma], params[self.beta],
            params['__training_in_progress__'], self.epsilon, self.momentum,
            self.running_mean, self.running_variance)
        self.running_mean, self.running_variance = running_mean, running_variance
        outputs = np.reshape(outputs, (N, W, H, C))
        outputs = transpose(outputs, (0, 3, 1, 2))
        return outputs
示例#4
0
    def forward(self, inputs, params):
        N, C, W, H = inputs.shape
        inputs = transpose(inputs, (0, 2, 3, 1))
        inputs = np.reshape(inputs, (N * W * H, C))

        outputs, running_mean, running_variance = layers.batchnorm(
            inputs, params[self.gamma], params[self.beta],
            params['__training_in_progress__'], self.epsilon, self.momentum,
            self.running_mean, self.running_variance)
        self.running_mean, self.running_variance = running_mean, running_variance
        outputs = np.reshape(outputs, (N, W, H, C))
        outputs = transpose(outputs, (0, 3, 1, 2))
        return outputs
示例#5
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
示例#6
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
示例#7
0
 def check_beta(b):
     y, _, _ = layers.batchnorm(x, gamma, b)
     return layers.l2_loss(y, fake_y)
示例#8
0
 def check_gamma(g):
     y, _, _ = layers.batchnorm(x, g, beta)
     return layers.l2_loss(y, fake_y)
示例#9
0
 def check_beta(b):
     y, _, _ = layers.batchnorm(x, gamma, b)
     return layers.softmax_loss(y, fake_y)
示例#10
0
 def check_gamma(g):
     y, _, _ = layers.batchnorm(x, g, beta)
     return layers.softmax_loss(y, fake_y)