Exemple #1
0
def trainCNN(modelName='resnet'):
    # More hyperparameters
    dataset = ImageDataset()
    dataset_labels = dataset.get_all_labels()
    num_classes = len(dataset_labels)

    if modelName == 'resnet':
        model = resnet_dropout_18(num_classes=num_classes, p=cnnDropout)
    elif modelName == 'inception':
        model = Inception3(num_classes=num_classes, aux_logits=False)
    elif modelName == 'segnet':
        # TODO: Figure out how dims need to be changed based off of NYU dataset
        model = SegNet(input_channels=3,
                       output_channels=1,
                       pretrained_vgg=True)
    else:
        raise Exception("Please select one of \'resnet\' or \'inception\' or "
                        "\'segnet\'")

    if torch.cuda.is_available():
        if multiGPU:
            model = nn.DataParallel(model)
        model = model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=cnnLr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           'min',
                                                           patience=2)
    # setup the device for running
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.eval()

    return model
Exemple #2
0
def main():
    train_loader = DataLoader(dataset=VaeDataset('train'), batch_size=batch_size, shuffle=True,
                              pin_memory=True, drop_last=True)
    val_loader = DataLoader(dataset=VaeDataset('valid'), batch_size=batch_size, shuffle=False,
                            pin_memory=True, drop_last=True)
    # Create SegNet model
    label_nbr = 3
    model = SegNet(label_nbr)
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [40, xxx] -> [10, ...], [10, ...], [10, ...], [10, ...] on 4 GPUs
        model = nn.DataParallel(model)
    # Use appropriate device
    model = model.to(device)
    # print(model)

    # define the optimizer
    # optimizer = optim.LBFGS(model.parameters(), lr=0.8)
    optimizer = optim.Adam(model.parameters(), lr=lr)

    best_loss = 100000
    epochs_since_improvement = 0

    # Epochs
    for epoch in range(start_epoch, epochs):
        # Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
        if epochs_since_improvement == 20:
            break
        if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
            adjust_learning_rate(optimizer, 0.8)

        # One epoch's training
        train(epoch, train_loader, model, optimizer)

        # One epoch's validation
        val_loss = valid(val_loader, model)
        print('\n * LOSS - {loss:.3f}\n'.format(loss=val_loss))

        # Check if there was an improvement
        is_best = val_loss < best_loss
        best_loss = min(best_loss, val_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer, val_loss, is_best)
Exemple #3
0
    os.chdir(outputdir)

    use_gpu = torch.cuda.is_available()

    own_net = SegNet(3, 12)  #OwnSegNet(3)

    loaders, w_class, class_encoding, sets = dataloader.get_data_loaders(
        camvid_dataset, 1, 1, 1, single_sample=single_sample)
    trainloader, valloader, testloader = loaders
    test_set, val_set, train_set = sets

    for i, key in enumerate(class_encoding.keys()):
        print("{} \t {}".format(i, key))

    optimizer = optim.SGD(own_net.parameters(),
                          lr=1e-3,
                          weight_decay=5e-4,
                          momentum=0.9)

    # Evaluation metric

    ignore_index = list(class_encoding).index('unlabeled')

    criterion = nn.CrossEntropyLoss(ignore_index=ignore_index)

    if use_gpu:
        own_net = own_net.cuda()

    trainer = train.Trainer(criterion,
                            optimizer,
Exemple #4
0
def main():
    Dataset = './SDOBenchmark-data-full/'
    train_set = SDODataset(Dataset, mode='train')
    test_set = SDODataset(Dataset, mode='test')
    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True)
    val_loader = DataLoader(test_set,
                            batch_size=batch_size,
                            shuffle=False,
                            pin_memory=True,
                            drop_last=True)
    # Create SegNet model
    label_nbr = 1
    model = SegNet(label_nbr, in_channels=1)
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    # Use appropriate device
    model = model.to(device)

    optimizer = optim.Adam(model.parameters(), lr=lr)

    best_loss = 100000
    epochs_since_improvement = 0
    state, start_epoch = load_checkpoint(mode='autoencoder')
    if start_epoch != 0:
        print("Load from checkpoint epoch: ", start_epoch - 1)
        model = state['model']
        model = model.to(device)
        optimizer = state['optimizer']
    # Epochs
    for epoch in range(start_epoch, epochs):
        # Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
        if epochs_since_improvement == 20:
            break
        if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
            adjust_learning_rate(optimizer, 0.8)

        # One epoch's training
        train_loss = train(epoch, train_loader, model, optimizer)

        # One epoch's validation
        val_loss = valid(val_loader, model)
        print('\n * LOSS - {loss:.3f}\n'.format(loss=val_loss))

        # Check if there was an improvement
        is_best = val_loss < best_loss
        best_loss = min(best_loss, val_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch,
                        model,
                        optimizer,
                        val_loss,
                        is_best,
                        mode='autoencoder',
                        train_loss=train_loss)
    print('train finished')
    layers = [
        layer for layer in vgg16.features.children()
        if isinstance(layer, nn.Conv2d)
    ]

    start = 0
    for i in range(net.encoder.block_count):
        end = start + net.encoder.blocks[i].layer_count
        net.encoder.blocks[i].initialize_from_layers(layers[start:end])
        start = end

    frequencies = {i: [] for i in range(0, 11 + 1)}

    for _, target in tqdm(train):
        for t in target:
            count = Counter(t.flatten().numpy())
            for key, value in count.items():
                frequencies[key] += [value]
    weights = []
    median = np.median(sum(frequencies.values(), []))

    for classid in range(0, 11 + 1):
        weights.append(median / np.sum(frequencies[classid]))

    criterion = nn.CrossEntropyLoss(weight=torch.tensor(weights))
    optimizer = torch.optim.Adam(net.parameters(), lr=1e-6, weight_decay=0.001)
    scheduler = ExponentialLR(optimizer, gamma=0.9999)

    for epoch in trange(100):
        losses = train_epoch(train, net, criterion, optimizer, scheduler)