def conv_bn_conv_bn_pool2x2(inp_layer, conv_filters, conv_shapes, res_shape, training_name): assert conv_shapes[0][1] == conv_shapes[0][2] pad1 = conv_shapes[0][1] // 2 conv1 = layers.Conv((conv_filters[0], ) + conv_shapes[0], { 'stride': 1, 'pad': pad1 }, inp_layer) conv1 = layers.SpatialBatchnorm((conv_filters[0], ) + res_shape, training_name, conv1) conv1 = layers.Relu(conv1) conv1 = layers.Dropout(0.6, training_name, conv1) assert conv_shapes[1][0] == conv_shapes[1][1] pad2 = conv_shapes[1][1] // 2 conv2 = layers.Conv((conv_filters[1], conv_filters[0]) + conv_shapes[1], { 'stride': 1, 'pad': pad2 }, conv1) conv2 = layers.SpatialBatchnorm((conv_filters[1], ) + res_shape, training_name, conv2) conv2 = layers.Relu(conv2) conv2 = layers.Dropout(0.6, training_name, conv2) pool = layers.MaxPool((2, 2), 2, conv2) return pool
def __init__(self, D_in, outspec, ks=io_size, st=io_stride): """ outspec should be an Ordered Dict """ nn.Module.__init__(self) pd = layers.pad_size(ks, "same") self.output_layers = [] for (name,d_out) in outspec.items(): setattr(self, name, layers.Conv(D_in, d_out, ks, st, pd, bias=True)) self.output_layers.append(name)
def main(): input_X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] W = [[1, 1, 1], [0, 0, 0], [1, 1, 1]] b = [0] input_X = np.array(input_X).reshape((1, 3, 3, 1)) W = np.array(W).reshape((1, 3, 3, 1)) b = np.array(b).reshape((1, 1, 1, 1)) conv = layers.Conv(num_input_channels=1, filter_size=3, padding='valid', stride=2, num_filters=3, initializer='xavier_normal') pool = layers.Pool(2, 1) output_Z = conv.forward(input_X) output_A = pool.forward(input_X) print("### test convolutional layer ###") print("input_X = ", input_X) print("output_Z = ", output_Z) print("### test pooling layer ###") print("output_A = ", output_A)
x_train = mf.reshape_x(x_train) x_test = mf.reshape_x(x_test) # train shape of (60000, 1, 28, 28) test shape (10000, 1, 28, 28) x_train = x_train.reshape(x_train.shape[0], 1, 28, 28).astype('float32') x_test = x_test.reshape(x_test.shape[0], 1, 28, 28).astype('float32') # min max scale from 0-255 to 0-1 scale x_train = (x_train - np.min(x_train)) / (np.max(x_train) - np.min(x_train)) x_test = (x_test - np.min(x_test)) / (np.max(x_test) - np.min(x_test)) x_dims = (1, 28, 28) num_classes = 10 class_names = np.unique(y_train) # Conv layers with x dims 2 filters with kernel 3x3 stride of 1 and no padding conv1 = layers.Conv(x_dims, n_filter=2, h_filter=3, w_filter=3, stride=1, padding=0) # activation for layer 1 'sigmoid' sig = mf.sigmoid() # MaxPool layer 2x2 stride of 1 pool1 = layers.Maxpool(conv1.out_dim, size=2, stride=2) # Conv layer with 2 filters kernel size of 3x3 stride of 1 and no padding conv2 = layers.Conv(pool1.out_dim, n_filter=2, h_filter=3, w_filter=3, stride=1, padding=0) # activation for layer 2 rectified linear relu = mf.ReLU()
from net import * from cs231n.solver import * import layers if __name__ == "__main__": # Instantiation example i1 = layers.Input() c1 = layers.Conv((8, 3, 3, 3), {'stride': 1, 'pad': 1}, i1) flat = 8 * 28 * 28 s1 = layers.Reshape((flat, ), c1) a1 = layers.Affine((flat, 10), s1) l1 = layers.SoftmaxLoss('y', 'loss', a1) try: layers.load_network('network') except IOError: pass model = NeuralNetwork(i1, l1, 'loss', layers.params, layers.grads) data = { 'X_train': np.ones((2**10, 3, 28, 28)) * 0.1, 'y_train': np.ones(2**10, dtype=np.int) * 2, 'X_val': np.ones((2**3, 3, 28, 28)) * 0.1, 'y_val': np.ones(2**3, dtype=np.int) * 2 } solver = Solver(model, data, update_rule='sgd', optim_config={ 'learning_rate': 1e-3, },