def forward(self, X, mode): # Flatten the input data to matrix. X = np.reshape(X, (self.batch_size, 784)) # First affine layer (fully-connected layer). y1 = layers.affine(X, self.params['wi'], self.params['bi']) # ReLU activation. y2 = layers.relu(y1) # Hidden layers. for i in range(self.num_hidden - 1): y2 = layers.affine(y2, self.params['w%d' % i], self.params['b%d' % i]) y2 = layers.relu(y2) # Second affine layer. y3 = layers.affine(y2, self.params['wo'], self.params['bo']) return y3
def forward(self, X, mode): N, sequence_length, D = X.shape WX = self.params['WX'] Wh = self.params['Wh'] bias_h = self.params['bias_h'] WY = self.params['WY'] bias_Y = self.params['bias_Y'] WY0 = self.params['WY0'] bias_Y0 = self.params['bias_Y0'] h = np.zeros((N, self._n_hidden)) self.previous_h = [h] for t in xrange(sequence_length): X_t = X[:, t, :] h0 = self._update_h(X_t, h, WX, Wh, bias_h) projected_h = sum( batch_scalar_product(h, h0) * h for t, h in enumerate(self.previous_h)) h = np.dot(X_t, WX) + np.dot(h, Wh) + projected_h h = self._nonlinear(h) self.previous_h.append(h) Y0 = layers.relu(layers.affine(h, WY0, bias_Y0)) Y = layers.affine(Y0, WY, bias_Y) return Y
def forward(self, X, mode): out = self.conv(X=X, **self.params) out = layers.affine(out, self.params['w1'], self.params['b1']) out = layers.relu(out) out = layers.affine(out, self.params['w2'], self.params['b2']) # This verifies whether symbols can be reused. trash = self.conv(X=np.zeros(X.shape), **self.params) return out
def forward(self, X, mode): # Flatten the input data to matrix. X = np.reshape(X, (batch_size, flattened_input_size)) # First affine layer (fully-connected layer). y1 = layers.affine(X, self.params['w1'], self.params['b1']) # ReLU activation. y2 = layers.relu(y1) # Second affine layer. y3 = layers.affine(y2, self.params['w2'], self.params['b2']) return y3
def forward(self, X, mode): # Flatten the input data to matrix. X = np.reshape(X, (batch_size, 3 * 32 * 32)) # First affine layer (fully-connected layer). y1 = layers.affine(X, self.params['w1'], self.params['b1']) # ReLU activation. y2 = layers.relu(y1) # Batch normalization y3, self.aux_params['running_mean'], self.aux_params['running_var'] = layers.batchnorm( y2, self.params['gamma'], self.params['beta'], running_mean=self.aux_params['running_mean'], \ running_var=self.aux_params['running_var']) # Second affine layer. y4 = layers.affine(y3, self.params['w2'], self.params['b2']) # Dropout y5 = layers.dropout(y4, 0.5, mode=mode) return y5
def forward(self, X, mode): N, sequence_length, D = X.shape h = np.zeros((N, self._n_hidden)) WX = self.params['WX'] Wh = self.params['Wh'] bias_h = self.params['bias_h'] WY = self.params['WY'] bias_Y = self.params['bias_Y'] WY0 = self.params['WY0'] bias_Y0 = self.params['bias_Y0'] self.previous_h = [h] for t in xrange(sequence_length): X_t = X[:, t, :] h = self._update_h(X_t, h, WX, Wh, bias_h) h = self._inner_loop(X_t, self.previous_h[-1], h, WX, Wh, self.previous_h) self.previous_h.append(h) Y0 = layers.relu(layers.affine(h, WY0, bias_Y0)) Y = layers.affine(Y0, WY, bias_Y) return Y
def check_fn(x): return layers.l2_loss(layers.relu(x), fake_y)
def forward(self, X): y1 = layers.affine(X, self.params['w1'], self.params['b1']) y2 = layers.relu(y1) y3 = layers.affine(y2, self.params['w2'], self.params['b2']) return y3
def check_fn(x): return layers.softmax_loss(layers.relu(x), fake_y)
def forward(self, inputs, *args): return layers.relu(inputs)
def forward(self, X, mode): out = self.conv(X=X, **self.params) out = layers.affine(out, self.params["w1"], self.params["b1"]) out = layers.relu(out) out = layers.affine(out, self.params["w2"], self.params["b2"]) return out
def forward(self, X, mode): out = self.conv(X=X, **self.params) out = layers.affine(out, self.params['w1'], self.params['b1']) out = layers.relu(out) out = layers.affine(out, self.params['w2'], self.params['b2']) return out