Esempio n. 1
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
Esempio n. 2
0
 def forward(self, X, mode):
     # Flatten the input data to matrix.
     X = np.reshape(X, (batch_size, 3 * 32 * 32))
     # First affine layer (fully-connected layer).
     y1 = layers.affine(X, self.params['w1'], self.params['b1'])
     # ReLU activation.
     y2 = layers.relu(y1)
     # Batch normalization
     y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm(
         y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \
         running_var=self.aux_params['running_var'])
     # Second affine layer.
     y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
     # Dropout
     y5 = layers.dropout(y4, 0.5, mode=mode)
     return y5
Esempio n. 3
0
 def forward(self, data):
     return layers.dropout(data, self._p)
Esempio n. 4
0
 def forward(self, inputs, params):
     return layers.dropout(inputs, self.probability, params['__training_in_progress__'])
Esempio n. 5
0
 def forward(self, data):
     return layers.dropout(data, self._p)
Esempio n. 6
0
 def forward(self, inputs, params):
     return layers.dropout(inputs, self.probability,
                           params['__training_in_progress__'])