Exemplo n.º 1
0
def main(params):
    train_set, valid_set, test_set = df.datasets.mnist.data()
    train_set_x, train_set_y = train_set
    test_set_x, test_set_y = test_set

    model = lenet()
    criterion = df.ClassNLLCriterion()
    optimiser = df.SGD(lr=params['lr'])

    for epoch in range(100):
        model.training()
        train(train_set_x, train_set_y, model, optimiser, criterion, epoch,
              params['batch_size'], 'train')

        train(train_set_x, train_set_y, model, optimiser, criterion, epoch,
              params['batch_size'], 'stats')
        model.evaluate()
        validate(test_set_x, test_set_y, model, epoch, params['batch_size'])
Exemplo n.º 2
0
from test import test
from model import net, lenet2

if __name__ == "__main__":
    print("THIS IS JUST AN EXAMPLE.")
    print("Please don't take these numbers as a benchmark.")
    print("While the optimizer's parameters have been grid-searched,")
    print(
        "a fair comparison would run all experiments multiple times AND RUN MORE THAN FIVE EPOCHS."
    )

    batch_size = 64

    (Xtrain, ytrain), (Xval, yval), (Xtest, ytest) = load_mnist()

    criterion = df.ClassNLLCriterion()

    def run(optim):
        progress = make_progressbar('Training with ' + str(optim), 5)
        progress.start()

        model = net()
        model.training()
        for epoch in range(5):
            train(Xtrain, ytrain, model, optim, criterion, batch_size, 'train')
            train(Xtrain, ytrain, model, optim, criterion, batch_size, 'stats')
            progress.update(epoch + 1)

        progress.finish()

        model.evaluate()