コード例 #1
0
def test_single_initializer(initializer):
    model = NeuralNetwork(
        optimizer=AdamOptimizer(
            learning_rate=default_parameters['learning_rate']),
        loss=CrossEntropy(),
        layers=[
            Flatten(),
            Dense(layer_size=50,
                  activation_func=ReLu(),
                  weight_initializer=initializer),
            Dense(layer_size=10,
                  activation_func=Softmax(),
                  weight_initializer=initializer)
        ],
        callbacks=[
            LoggerCallback(),
            PlotCallback(f'./lab_3/initializers/{initializer.get_name()}')
        ])

    model.fit(x_train=X_train,
              y_train=y_train,
              x_val=X_val,
              y_val=y_val,
              epochs=default_parameters['epochs'],
              batch_size=default_parameters['batch_size'])

    model.test(X_test, y_test)
コード例 #2
0
def test_single_initializer_with_convo(initializer):
    model = NeuralNetwork(
        optimizer=AdamOptimizer(
            learning_rate=default_parameters['learning_rate'] * 10),
        loss=CrossEntropy(),
        layers=[
            Convolution2D(num_of_filters=8,
                          kernel=(3, 3),
                          activation_func=ReLu()),
            MaxPooling2D(pool_size=(2, 2), stride=(2, 2)),
            Flatten(),
            Dense(layer_size=50,
                  activation_func=ReLu(),
                  weight_initializer=initializer),
            Dense(layer_size=10,
                  activation_func=Softmax(),
                  weight_initializer=initializer)
        ],
        callbacks=[
            LoggerCallback(),
            PlotCallback(f'./lab_3/initializers/{initializer.get_name()}')
        ])

    model.fit(x_train=X_train,
              y_train=y_train,
              x_val=X_val,
              y_val=y_val,
              epochs=default_parameters['epochs'],
              batch_size=default_parameters['batch_size'])

    model.test(X_test, y_test)
コード例 #3
0
def make_cnn(input_dim, num_of_classes):
    conv1 = Convolution(input_dim=input_dim,
                        pad=2,
                        stride=2,
                        num_filters=10,
                        filter_size=3,
                        seed=1)
    relu1 = Relu()
    maxpool1 = Maxpool(input_dim=conv1.output_dim, filter_size=2, stride=1)
    flatten = Flatten(seed=1)
    dense1 = Dense(input_dim=np.prod(maxpool1.output_dim),
                   output_dim=num_of_classes,
                   seed=1)

    layers = [conv1, relu1, maxpool1, flatten, dense1]
    return layers
コード例 #4
0
(X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes) = load_dataset()

X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))

layers = [
        Conv((5, 5, 3, 8), strides=1,pad=2, activation=relu, filter_init=lambda shp: np.random.normal(size=shp) * 1.0 / (5*5*3)),
        MaxPool(f=8, strides=8, channels = 8),
        Conv((3, 3, 8, 16), strides=1,pad=1, activation=relu, filter_init=lambda shp:  np.random.normal(size=shp) * 1.0 / (3*3*8)),
        MaxPool(f=4, strides=4, channels = 16),
        Flatten((2, 2, 16)),
        FullyConnected((2*2*16, 20), activation=sigmoid, weight_init=lambda shp: np.random.normal(size=shp) * np.sqrt(1.0 / (2*2*16 + 20))),
        FullyConnected((20, 6), activation=linear, weight_init=lambda shp: np.random.normal(size=shp) * np.sqrt(1.0 / ( 20+ 6)))
]

minibatch_size = 20

lr = 0.009
k = 2000
net = Network(layers, lr=lr, loss=cross_entropy)
num_epochs = 10
costs = []

m = X_train.shape[0]
for epoch in range(num_epochs):
コード例 #5
0
import numpy as np
np.random.seed(0)


if __name__ == '__main__':
    train_data, train_labels = get_data(num_samples=50000)
    test_data, test_labels = get_data(num_samples=10000, dataset="testing")

    train_data = train_data / 255
    test_data = test_data / 255

    print("Train data shape: {}, {}".format(train_data.shape, train_labels.shape))
    print("Test data shape: {}, {}".format(test_data.shape, test_labels.shape))

    model = Model(
        Convolution(filters=5, padding='same'),
        Elu(),
        Pooling(mode='max', kernel_shape=(2, 2), stride=2),
        Flatten(),
        FullyConnected(units=10),
        Softmax(),
        name='cnn5'
    )

    model.set_loss(CategoricalCrossEntropy)

    model.train(train_data, train_labels.T, epochs=2) # set load_and_continue to True if you want to start from already trained weights
    # model.load_weights() # uncomment if loading previously trained weights and comment above line to skip training and only load trained weights.

    print('Testing accuracy = {}'.format(model.evaluate(test_data, test_labels)))