예제 #1
0
def trainCNN(modelName='resnet'):
    # More hyperparameters
    dataset = ImageDataset()
    dataset_labels = dataset.get_all_labels()
    num_classes = len(dataset_labels)

    if modelName == 'resnet':
        model = resnet_dropout_18(num_classes=num_classes, p=cnnDropout)
    elif modelName == 'inception':
        model = Inception3(num_classes=num_classes, aux_logits=False)
    elif modelName == 'segnet':
        # TODO: Figure out how dims need to be changed based off of NYU dataset
        model = SegNet(input_channels=3,
                       output_channels=1,
                       pretrained_vgg=True)
    else:
        raise Exception("Please select one of \'resnet\' or \'inception\' or "
                        "\'segnet\'")

    if torch.cuda.is_available():
        if multiGPU:
            model = nn.DataParallel(model)
        model = model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=cnnLr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           'min',
                                                           patience=2)
    # setup the device for running
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.eval()

    return model
예제 #2
0
def main():
    train_loader = DataLoader(dataset=VaeDataset('train'), batch_size=batch_size, shuffle=True,
                              pin_memory=True, drop_last=True)
    val_loader = DataLoader(dataset=VaeDataset('valid'), batch_size=batch_size, shuffle=False,
                            pin_memory=True, drop_last=True)
    # Create SegNet model
    label_nbr = 3
    model = SegNet(label_nbr)
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [40, xxx] -> [10, ...], [10, ...], [10, ...], [10, ...] on 4 GPUs
        model = nn.DataParallel(model)
    # Use appropriate device
    model = model.to(device)
    # print(model)

    # define the optimizer
    # optimizer = optim.LBFGS(model.parameters(), lr=0.8)
    optimizer = optim.Adam(model.parameters(), lr=lr)

    best_loss = 100000
    epochs_since_improvement = 0

    # Epochs
    for epoch in range(start_epoch, epochs):
        # Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
        if epochs_since_improvement == 20:
            break
        if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
            adjust_learning_rate(optimizer, 0.8)

        # One epoch's training
        train(epoch, train_loader, model, optimizer)

        # One epoch's validation
        val_loss = valid(val_loader, model)
        print('\n * LOSS - {loss:.3f}\n'.format(loss=val_loss))

        # Check if there was an improvement
        is_best = val_loss < best_loss
        best_loss = min(best_loss, val_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer, val_loss, is_best)
예제 #3
0
def main():
    Dataset = './SDOBenchmark-data-full/'
    train_set = SDODataset(Dataset, mode='train')
    test_set = SDODataset(Dataset, mode='test')
    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True)
    val_loader = DataLoader(test_set,
                            batch_size=batch_size,
                            shuffle=False,
                            pin_memory=True,
                            drop_last=True)
    # Create SegNet model
    label_nbr = 1
    model = SegNet(label_nbr, in_channels=1)
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    # Use appropriate device
    model = model.to(device)

    optimizer = optim.Adam(model.parameters(), lr=lr)

    best_loss = 100000
    epochs_since_improvement = 0
    state, start_epoch = load_checkpoint(mode='autoencoder')
    if start_epoch != 0:
        print("Load from checkpoint epoch: ", start_epoch - 1)
        model = state['model']
        model = model.to(device)
        optimizer = state['optimizer']
    # Epochs
    for epoch in range(start_epoch, epochs):
        # Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
        if epochs_since_improvement == 20:
            break
        if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
            adjust_learning_rate(optimizer, 0.8)

        # One epoch's training
        train_loss = train(epoch, train_loader, model, optimizer)

        # One epoch's validation
        val_loss = valid(val_loader, model)
        print('\n * LOSS - {loss:.3f}\n'.format(loss=val_loss))

        # Check if there was an improvement
        is_best = val_loss < best_loss
        best_loss = min(best_loss, val_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch,
                        model,
                        optimizer,
                        val_loss,
                        is_best,
                        mode='autoencoder',
                        train_loss=train_loss)
    print('train finished')
예제 #4
0
            images = sample_test[0].to(device)
            trueMasks = sample_test[1].to(device)
            predMasks = model(images)

            plt.figure()
            predTensor = (torch.exp(predMasks[0, 0, :, :]).detach().cpu())
            plt.imshow((predTensor / torch.max(predTensor)) * 255, cmap='gray')
            pilTrans = transforms.ToPILImage()
            pilImg = pilTrans((predTensor / torch.max(predTensor)) * 255)
            pilArray = np.array(pilImg)
            pilArray = (pilArray > 127)
            im = Image.fromarray(pilArray)
            im.save(self.predMaskPath + '/' + str(i_test) + '.tif')

            print((predTensor / torch.max(predTensor)) * 255)

            mBatchDice = torch.mean(Loss(trueMasks, predMasks).dice_coeff())
            print(mBatchDice.item())


if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = SegNet(1, 1).to(device)
    # model = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=False, num_classes=1)
    modelName = model.__class__.__name__
    checkpoint = torch.load(test().checkpointsPath + '/' + modelName + '/' +
                            test().modelWeight)
    model.load_state_dict(checkpoint['model_state_dict'])
    model = model.to(device)
    test().main(model, device)