Beispiel #1
0
data_path = '~/data/decompress_mnist'

if __name__ == '__main__':
    x_train, x_test, y_train, y_test = mnist.load_mnist(data_path, dst_size=(32, 32), shuffle=True)

    x_train = x_train / 255 - 0.5
    x_test = x_test / 255 - 0.5

    data = {
        'X_train': x_train,
        'y_train': y_train,
        'X_val': x_test,
        'y_val': y_test
    }

    model = models.LeNet5(in_channels=1, out_channels=10, dropout=0.5)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.params, lr=1e-3, momentum=0.9, nesterov=True)
    stepLR = optim.StepLR(optimizer, 5, gamma=0.1)

    solver = pynet.Solver(model, data, criterion, optimizer,
                          lr_scheduler=stepLR, batch_size=128, num_epochs=10, print_every=1)
    solver.train()

    plt = Draw()
    plt(solver.loss_history)
    plt.multi_plot((solver.train_acc_history, solver.val_acc_history), ('train', 'val'),
                   title='准确率', xlabel='迭代/次', ylabel='准确率', save_path='acc.png')
    print('best_train_acc: %f; best_val_acc: %f' % (solver.best_train_acc, solver.best_val_acc))
Beispiel #2
0
def nin_train():
    x_train, x_test, y_train, y_test = cifar.load_cifar10(data_path,
                                                          shuffle=True)

    # 标准化
    x_train = x_train / 255.0 - 0.5
    x_test = x_test / 255.0 - 0.5

    net = models.nin(in_channels=3, p_h=p_h)
    criterion = nn.CrossEntropyLoss()

    accuracy = vision.Accuracy()

    loss_list = []
    train_list = []
    test_list = []
    best_train_accuracy = 0.995
    best_test_accuracy = 0.995

    range_list = np.arange(0, x_train.shape[0] - batch_size, step=batch_size)
    for i in range(epochs):
        total_loss = 0
        num = 0
        start = time.time()
        for j in range_list:
            data = x_train[j:j + batch_size]
            labels = y_train[j:j + batch_size]

            scores = net(data)
            loss = criterion(scores, labels)
            total_loss += loss
            num += 1

            grad_out = criterion.backward()
            net.backward(grad_out)
            net.update(lr=learning_rate, reg=reg)
        end = time.time()
        print('one epoch need time: %.3f' % (end - start))
        print('epoch: %d loss: %f' % (i + 1, total_loss / num))
        loss_list.append(total_loss / num)

        if (i % 20) == 19:
            # # 每隔20次降低学习率
            # learning_rate *= 0.5

            train_accuracy = accuracy.compute_v2(x_train,
                                                 y_train,
                                                 net,
                                                 batch_size=batch_size)
            test_accuracy = accuracy.compute_v2(x_test,
                                                y_test,
                                                net,
                                                batch_size=batch_size)
            train_list.append(train_accuracy)
            test_list.append(test_accuracy)

            print(loss_list)
            print(train_list)
            print(test_list)
            if train_accuracy > best_train_accuracy and test_accuracy > best_test_accuracy:
                path = 'nin-epochs-%d.pkl' % (i + 1)
                utils.save_params(net.get_params(), path=path)
                break

    draw = vision.Draw()
    draw(loss_list, xlabel='迭代/20次')
    draw.multi_plot((train_list, test_list), ('训练集', '测试集'),
                    title='精度图',
                    xlabel='迭代/20次',
                    ylabel='精度值',
                    save_path='acc.png')