Exemple #1
0
def main(args):
    # Create model.
    model = ConvolutionNet()
    # Create data iterators for training and testing sets.
    data = get_CIFAR10_data(args.data_dir)
    train_dataiter = NDArrayIter(data=data['X_train'],
                                 label=data['y_train'],
                                 batch_size=batch_size,
                                 shuffle=True)
    test_dataiter = NDArrayIter(data=data['X_test'],
                                label=data['y_test'],
                                batch_size=batch_size,
                                shuffle=False)
    # Create solver.
    solver = Solver(model,
                    train_dataiter,
                    test_dataiter,
                    num_epochs=10,
                    init_rule='gaussian',
                    init_config={'stdvar': 0.001},
                    update_rule='sgd_momentum',
                    optim_config={
                        'learning_rate': 1e-3,
                        'momentum': 0.9
                    },
                    verbose=True,
                    print_every=20)
    # Initialize model parameters.
    solver.init()
    # Train!
    solver.train()
Exemple #2
0
def main(_):
    model = TwoLayerCaffeNet()
    data = get_CIFAR10_data()
    # reshape all data to matrix
    data['X_train'] = data['X_train'].reshape([data['X_train'].shape[0], 3 * 32 * 32])
    data['X_val'] = data['X_val'].reshape([data['X_val'].shape[0], 3 * 32 * 32])
    data['X_test'] = data['X_test'].reshape([data['X_test'].shape[0], 3 * 32 * 32])
    # ATTENTION: the batch size should be the same as the input shape declared above.
    train_dataiter = NDArrayIter(data['X_train'],
                         data['y_train'],
                         100,
                         True)

    test_dataiter = NDArrayIter(data['X_test'],
                         data['y_test'],
                         100,
                         True)
    solver = Solver(model,
                    train_dataiter,
                    test_dataiter,
                    num_epochs=10,
                    batch_size=128,
                    init_rule='xavier',
                    update_rule='sgd_momentum',
                    optim_config={
                        'learning_rate': 1e-4,
                        'momentum': 0.9
                    },
                    verbose=True,
                    print_every=20)
    solver.init()
    solver.train()
Exemple #3
0
def main(args):
    # Create model.
    model = ConvolutionNet()
    # Create data iterators for training and testing sets.
    data = get_CIFAR10_data(args.data_dir)
    train_dataiter = NDArrayIter(data=data['X_train'],
                                 label=data['y_train'],
                                 batch_size=batch_size,
                                 shuffle=True)
    test_dataiter = NDArrayIter(data=data['X_test'],
                                label=data['y_test'],
                                batch_size=batch_size,
                                shuffle=False)
    # Create solver.
    solver = Solver(model,
                    train_dataiter,
                    test_dataiter,
                    num_epochs=10,
                    init_rule='gaussian',
                    init_config={
                        'stdvar': 0.001
                    },
                    update_rule='sgd_momentum',
                    optim_config={
                        'learning_rate': 1e-3,
                        'momentum': 0.9
                    },
                    verbose=True,
                    print_every=20)
    # Initialize model parameters.
    solver.init()
    # Train!
    solver.train()
Exemple #4
0
def main():
    model = RNNNet()
    x_train, y_train = data_gen(10000)
    x_test, y_test = data_gen(1000)

    train_dataiter = NDArrayIter(x_train,
                                 y_train,
                                 batch_size=100,
                                 shuffle=True)

    test_dataiter = NDArrayIter(x_test,
                                y_test,
                                batch_size=100,
                                shuffle=False)

    solver = Solver(model,
                    train_dataiter,
                    test_dataiter,
                    num_epochs=10,
                    init_rule='xavier',
                    update_rule='adam',
                    task_type='regression',
                    verbose=True,
                    print_every=20)
    solver.init()
    solver.train()
Exemple #5
0
def main():
    model = RNNNet()
    x_train, y_train = data_gen(10000)
    x_test, y_test = data_gen(1000)

    train_dataiter = NDArrayIter(x_train,
                                 y_train,
                                 batch_size=100,
                                 shuffle=True)

    test_dataiter = NDArrayIter(x_test,
                                y_test,
                                batch_size=100,
                                shuffle=False)

    solver = Solver(model,
                    train_dataiter,
                    test_dataiter,
                    num_epochs=10,
                    init_rule='xavier',
                    update_rule='adam',
                    task_type='regression',
                    verbose=True,
                    print_every=20)
    solver.init()
    solver.train()
Exemple #6
0
def main(_):
    model = TwoLayerCaffeNet()
    data = get_CIFAR10_data()
    # reshape all data to matrix
    data['X_train'] = data['X_train'].reshape(
        [data['X_train'].shape[0], 3 * 32 * 32])
    data['X_val'] = data['X_val'].reshape(
        [data['X_val'].shape[0], 3 * 32 * 32])
    data['X_test'] = data['X_test'].reshape(
        [data['X_test'].shape[0], 3 * 32 * 32])
    # ATTENTION: the batch size should be the same as the input shape declared above.
    train_dataiter = NDArrayIter(data['X_train'], data['y_train'], 100, True)

    test_dataiter = NDArrayIter(data['X_test'], data['y_test'], 100, True)
    solver = Solver(model,
                    train_dataiter,
                    test_dataiter,
                    num_epochs=10,
                    batch_size=128,
                    init_rule='xavier',
                    update_rule='sgd_momentum',
                    optim_config={
                        'learning_rate': 1e-4,
                        'momentum': 0.9
                    },
                    verbose=True,
                    print_every=20)
    solver.init()
    solver.train()
def main(args):
    # Define a convolutional neural network the same as above
    net = builder.Sequential(
        builder.Convolution((7, 7), 32),
        builder.ReLU(),
        builder.Pooling("max", (2, 2), (2, 2)),
        builder.Flatten(),
        builder.Affine(hidden_size),
        builder.Affine(num_classes),
    )

    # Cast the definition to a model compatible with minpy solver
    model = builder.Model(net, "softmax", (3 * 32 * 32,))

    data = get_CIFAR10_data(args.data_dir)

    train_dataiter = NDArrayIter(data["X_train"], data["y_train"], batch_size=batch_size, shuffle=True)

    test_dataiter = NDArrayIter(data["X_test"], data["y_test"], batch_size=batch_size, shuffle=False)

    solver = Solver(
        model,
        train_dataiter,
        test_dataiter,
        num_epochs=10,
        init_rule="gaussian",
        init_config={"stdvar": 0.001},
        update_rule="sgd_momentum",
        optim_config={"learning_rate": 1e-3, "momentum": 0.9},
        verbose=True,
        print_every=20,
    )
    solver.init()
    solver.train()
Exemple #8
0
def main(args):
    # Define a convolutional neural network the same as above
    net = builder.Sequential(
        builder.Convolution((7, 7), 32),
        builder.ReLU(),
        builder.Pooling('max', (2, 2), (2, 2)),
        builder.Reshape((flattened_input_size,))
        builder.Affine(hidden_size),
        builder.Affine(num_classes),
    )

    # Cast the definition to a model compatible with minpy solver
    model = builder.Model(net, 'softmax', (3 * 32 * 32,))

    data = get_CIFAR10_data(args.data_dir)

    train_dataiter = NDArrayIter(data['X_train'],
                         data['y_train'],
                         batch_size=batch_size,
                         shuffle=True)

    test_dataiter = NDArrayIter(data['X_test'],
                         data['y_test'],
                         batch_size=batch_size,
                         shuffle=False)

    solver = Solver(model,
                    train_dataiter,
                    test_dataiter,
                    num_epochs=10,
                    init_rule='gaussian',
                    init_config={
                        'stdvar': 0.001
                    },
                    update_rule='sgd_momentum',
                    optim_config={
                        'learning_rate': 1e-3,
                        'momentum': 0.9
                    },
                    verbose=True,
                    print_every=20)
    solver.init()
    solver.train()
Exemple #9
0
def main(args):
    # Define a 2-layer perceptron
    MLP = builder.Sequential(
        builder.Affine(512),
        builder.ReLU(),
        builder.Affine(10)
    )

    # Cast the definition to a model compatible with minpy solver
    model = builder.Model(MLP, 'softmax', (3 * 32 * 32,))

    data = get_CIFAR10_data(args.data_dir)
    data['X_train'] = data['X_train'].reshape([data['X_train'].shape[0], 3 * 32 * 32])
    data['X_val'] = data['X_val'].reshape([data['X_val'].shape[0], 3 * 32 * 32])
    data['X_test'] = data['X_test'].reshape([data['X_test'].shape[0], 3 * 32 * 32])

    train_dataiter = NDArrayIter(data['X_train'],
                         data['y_train'],
                         batch_size=100,
                         shuffle=True)

    test_dataiter = NDArrayIter(data['X_test'],
                         data['y_test'],
                         batch_size=100,
                         shuffle=False)

    solver = Solver(model,
                    train_dataiter,
                    test_dataiter,
                    num_epochs=10,
                    init_rule='gaussian',
                    init_config={
                        'stdvar': 0.001
                    },
                    update_rule='sgd_momentum',
                    optim_config={
                        'learning_rate': 1e-5,
                        'momentum': 0.9
                    },
                    verbose=True,
                    print_every=20)
    solver.init()
    solver.train()
Exemple #10
0
def main(_):
    model = TwoLayerNet()
    data = get_CIFAR10_data()
    # reshape all data to matrix
    data['X_train'] = data['X_train'].reshape([data['X_train'].shape[0], 3 * 32 * 32])
    data['X_val'] = data['X_val'].reshape([data['X_val'].shape[0], 3 * 32 * 32])
    data['X_test'] = data['X_test'].reshape([data['X_test'].shape[0], 3 * 32 * 32])
    solver = Solver(model,
                    data,
                    num_epochs=10,
                    init_rule='xavier',
                    update_rule='sgd_momentum',
                    optim_config={
                        'learning_rate': 1e-4,
                        'momentum': 0.9
                    },
                    verbose=True,
                    print_every=20)
    solver.init()
    solver.train()
Exemple #11
0
    def main():
        # Create model.
        model = TwoLayerNet()
        # Create data iterators for training and testing sets.
        data = get_CIFAR10_data('cifar-10-batches-py')
        
        train_dataiter = NDArrayIter(data=data['X_train'],
                                     label=data['y_train'],
                                     batch_size=batch_size,
                                     shuffle=True)
        test_dataiter = NDArrayIter(data=data['X_test'],
                                    label=data['y_test'],
                                    batch_size=batch_size,
                                    shuffle=False)
    
        # Create solver.
        solver = Solver(model,
                        train_dataiter,
                        test_dataiter,
                        num_epochs=5,
                        init_rule='gaussian',
                        init_config={
                            'stdvar': 0.001
                        },
                        update_rule='sgd_momentum',
                        optim_config={
                            'learning_rate': 1e-4,
                            'momentum': 0.9
                        },
                        verbose=True,
                        print_every=20)

        solver.init()

        solver.train()

        train_acc = solver.check_accuracy(
            train_dataiter, num_samples=solver.train_acc_num_samples)

        # a bug-free mlp should reach around 60% train acc
        assert (train_acc >= 0.45)
Exemple #12
0
    def main():
        # Create model.
        model = ConvolutionNet()
        # Create data iterators for training and testing sets.
        data = get_CIFAR10_data('cifar-10-batches-py')
        train_dataiter = NDArrayIter(data=data['X_train'],
                                     label=data['y_train'],
                                     batch_size=batch_size,
                                     shuffle=True)
        test_dataiter = NDArrayIter(data=data['X_test'],
                                    label=data['y_test'],
                                    batch_size=batch_size,
                                    shuffle=False)
        # Create solver.
        solver = Solver(model,
                        train_dataiter,
                        test_dataiter,
                        num_epochs=1,
                        init_rule='gaussian',
                        init_config={
                            'stdvar': 0.001
                        },
                        update_rule='sgd_momentum',
                        optim_config={
                            'learning_rate': 1e-3,
                            'momentum': 0.9
                        },
                        verbose=True,
                        print_every=20)
        # Initialize model parameters.
        solver.init()
        # Train!
        solver.train()

        train_acc = solver.check_accuracy(
            train_dataiter, num_samples=solver.train_acc_num_samples)

        # a normal cnn should reach 50% train acc
        assert (train_acc >= 0.40)
Exemple #13
0
    def main():
        # Create model.
        model = ConvolutionNet()
        # Create data iterators for training and testing sets.
        data = get_CIFAR10_data('cifar-10-batches-py')
        train_dataiter = NDArrayIter(data=data['X_train'],
                                     label=data['y_train'],
                                     batch_size=batch_size,
                                     shuffle=True)
        test_dataiter = NDArrayIter(data=data['X_test'],
                                    label=data['y_test'],
                                    batch_size=batch_size,
                                    shuffle=False)
        # Create solver.
        solver = Solver(model,
                        train_dataiter,
                        test_dataiter,
                        num_epochs=1,
                        init_rule='gaussian',
                        init_config={'stdvar': 0.001},
                        update_rule='sgd_momentum',
                        optim_config={
                            'learning_rate': 1e-3,
                            'momentum': 0.9
                        },
                        verbose=True,
                        print_every=20)
        # Initialize model parameters.
        solver.init()
        # Train!
        solver.train()

        train_acc = solver.check_accuracy(
            train_dataiter, num_samples=solver.train_acc_num_samples)

        # a normal cnn should reach 50% train acc
        assert (train_acc >= 0.40)
Exemple #14
0
    def loss(self, predict, y):
        return softmax_crossentropy(predict.reshape((predict.shape[0]*predict.shape[1], predict.shape[2])), y.reshape((y.shape[0]*y.shape[1],))) 


def get_data(opts, test=False, post_name='.keep50kr'):
    return txt_data(opts.data_name, batch_size = opts.batch_size, test=test, post_name=post_name)

if __name__=='__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_name', type=str, default='data/ptb')
#    parser.add_argument('--train')
#    parser.add_argument('--test')
#    parser.add_argument('--fname', type=str)
    parser.add_argument('--batch_size', type=int, default=64)

    args = parser.parse_args()
    
    Dataset = get_data(args)
    train_word = Dataset.train_word.reshape((-1, 35))
    train_Yword = Dataset.train_Yword.reshape((-1, 35))
    test_word = Dataset.test_word.reshape((-1, 35))
    test_Yword = Dataset.test_Yword.reshape((-1, 35))

    train_dataiter = NDArrayIter(train_word, train_Yword, batch_size=64, shuffle=True)
    test_dataiter = NDArrayIter(test_word, test_Yword, batch_size=64, shuffle=False)

    model = LM_RNN(batch_size=64, WORD_DIM=Dataset.w_dim+1)
    solver = Solver(model, train_dataiter, test_dataiter, num_epochs=2, init_rule='xavier', update_rule='adam', print_every=20)
    solver.init()
    solver.train()