コード例 #1
0
def main_vgg(argv):
    # dataset = keras.datasets.cifar10
    scale = 1
    if len(argv) >= 3:
        scale = float(argv[2])
    (x_train, y_train), (x_test, y_test) = dataprocessing.preprocess(
        scale=scale)  # dataset.load_data()

    mean, std = meanStd(x_train.astype('float32'), x_test.astype('float32'))
    x_train = normalize(x_train.astype('float32'), mean, std)
    x_test = normalize(x_test.astype('float32'), mean, std)
    # y_train = keras.utils.to_categorical(y_train, num_classes=10)
    # y_test = keras.utils.to_categorical(y_test, num_classes=10)

    datagen = ImageDataGenerator(featurewise_center=False,
                                 samplewise_center=False,
                                 featurewise_std_normalization=False,
                                 samplewise_std_normalization=False,
                                 zca_whitening=False,
                                 rotation_range=15,
                                 width_shift_range=0.1,
                                 height_shift_range=0.1,
                                 horizontal_flip=True,
                                 vertical_flip=False)

    datagen.fit(x_train)

    model = VGG(argv[1])

    lr = 0.01
    lr_decay = 1e-6
    lr_drop = 20

    def lr_scheduler(epoch):
        return lr * (0.5**(epoch // lr_drop))

    reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)
    sgd = SGD(lr=lr, decay=lr_decay, momentum=0.9, nesterov=True)

    model_cp = keras.callbacks.ModelCheckpoint("Model/{0}.model".format(
        argv[1]))

    model.compile(optimizer=sgd,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    print(model.summary())
    batch_size = 128
    model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                        steps_per_epoch=x_train.shape[0] // batch_size,
                        epochs=int(argv[2]),
                        validation_data=(x_test, y_test),
                        callbacks=[reduce_lr, model_cp])
    print(model.evaluate(x_test, y_test))

    model.save("Model/{0}.model".format(argv[1]))
    model.save_weights("Model/{0}.weights".format(argv[1]))
コード例 #2
0
def train():
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(size=32),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=False,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=False,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=2)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    model = VGG(vars(args))
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lrate,
                                momentum=0.9,
                                weight_decay=5e-4)

    if args.use_cuda:
        model = model.cuda()

    if args.eval:
        model.load_state_dict(torch.load(args.model_dir))
        model.eval()
        accuracy = model.evaluate(testloader)
        exit()

    total_size = len(trainloader)
    lrate = args.lrate
    best_score = 0.0
    scores = []
    for epoch in range(1, args.epochs + 1):
        model.train()
        for i, (image, label) in enumerate(trainloader):

            loss = model(image, label)
            model.zero_grad()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if i % 100 == 0:
                print('Epoch = %d, step = %d / %d, loss = %.5f lrate = %.5f' %
                      (epoch, i, total_size, loss, lrate))

        model.eval()
        accuracy = model.evaluate(testloader)
        scores.append(accuracy)

        with open(args.model_dir + "_scores.pkl", "wb") as f:
            pkl.dump(scores, f)

        if best_score < accuracy:
            best_score = accuracy
            print('saving %s ...' % args.model_dir)
            torch.save(model.state_dict(), args.model_dir)

        if epoch % args.decay_period == 0:
            lrate *= args.decay
            for param_group in optimizer.param_groups:
                param_group['lr'] = lrate