def load_trained_model(model_name, weights_path, num_classes):
    if model_name == 'segnet':
        model = SegNet(num_classes)
        checkpoint = torch.load(weights_path, map_location='cpu')
        model.load_state_dict(checkpoint['state_dict'])
    else:
        raise AssertionError('model not available')

    return model
Пример #2
0
def main():
    train_loader = DataLoader(dataset=VaeDataset('train'), batch_size=batch_size, shuffle=True,
                              pin_memory=True, drop_last=True)
    val_loader = DataLoader(dataset=VaeDataset('valid'), batch_size=batch_size, shuffle=False,
                            pin_memory=True, drop_last=True)
    # Create SegNet model
    label_nbr = 3
    model = SegNet(label_nbr)
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [40, xxx] -> [10, ...], [10, ...], [10, ...], [10, ...] on 4 GPUs
        model = nn.DataParallel(model)
    # Use appropriate device
    model = model.to(device)
    # print(model)

    # define the optimizer
    # optimizer = optim.LBFGS(model.parameters(), lr=0.8)
    optimizer = optim.Adam(model.parameters(), lr=lr)

    best_loss = 100000
    epochs_since_improvement = 0

    # Epochs
    for epoch in range(start_epoch, epochs):
        # Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
        if epochs_since_improvement == 20:
            break
        if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
            adjust_learning_rate(optimizer, 0.8)

        # One epoch's training
        train(epoch, train_loader, model, optimizer)

        # One epoch's validation
        val_loss = valid(val_loader, model)
        print('\n * LOSS - {loss:.3f}\n'.format(loss=val_loss))

        # Check if there was an improvement
        is_best = val_loss < best_loss
        best_loss = min(best_loss, val_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, model, optimizer, val_loss, is_best)
Пример #3
0
def trainCNN(modelName='resnet'):
    # More hyperparameters
    dataset = ImageDataset()
    dataset_labels = dataset.get_all_labels()
    num_classes = len(dataset_labels)

    if modelName == 'resnet':
        model = resnet_dropout_18(num_classes=num_classes, p=cnnDropout)
    elif modelName == 'inception':
        model = Inception3(num_classes=num_classes, aux_logits=False)
    elif modelName == 'segnet':
        # TODO: Figure out how dims need to be changed based off of NYU dataset
        model = SegNet(input_channels=3,
                       output_channels=1,
                       pretrained_vgg=True)
    else:
        raise Exception("Please select one of \'resnet\' or \'inception\' or "
                        "\'segnet\'")

    if torch.cuda.is_available():
        if multiGPU:
            model = nn.DataParallel(model)
        model = model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=cnnLr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           'min',
                                                           patience=2)
    # setup the device for running
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.eval()

    return model
Пример #4
0
def main(args):
    num_classes = 1
    size = [192, 192]  # size of images
    thresh = 0.5

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([3, size[0], size[1]])
    out = SegNet(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(net, eddl.adam(0.0001), ["cross_entropy"],
               ["mean_squared_error"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_segmentation")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
        ecvl.AugMirror(0.5),
        ecvl.AugFlip(0.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5)
    ])
    validation_augs = ecvl.SequentialAugmentationContainer(
        [ecvl.AugResizeDim(size)])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_augs, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[0], size[1]])
    num_samples_train = len(d.GetSplit())
    num_batches_train = num_samples_train // args.batch_size
    d.SetSplit(ecvl.SplitType.validation)
    num_samples_validation = len(d.GetSplit())
    num_batches_validation = num_samples_validation // args.batch_size
    indices = list(range(args.batch_size))

    evaluator = utils.Evaluator()
    print("Starting training")
    for e in range(args.epochs):
        print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
              flush=True)
        d.SetSplit(ecvl.SplitType.training)
        eddl.reset_loss(net)
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        d.ResetAllBatches()
        for b in range(num_batches_train):
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, args.epochs, b + 1, num_batches_train),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            y.div_(255.0)
            tx, ty = [x], [y]
            eddl.train_batch(net, tx, ty, indices)
            eddl.print_loss(net, b)
            print()

        print("Saving weights")
        eddl.save(net, "isic_segmentation_checkpoint_epoch_%s.bin" % e, "bin")

        d.SetSplit(ecvl.SplitType.validation)
        evaluator.ResetEval()
        print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
        for b in range(num_batches_validation):
            n = 0
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) ".format(
                e + 1, args.epochs, b + 1, num_batches_validation),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            y.div_(255.0)
            eddl.forward(net, [x])
            output = eddl.getOutput(out_sigm)
            for k in range(args.batch_size):
                img = output.select([str(k)])
                gt = y.select([str(k)])
                img_np = np.array(img, copy=False)
                gt_np = np.array(gt, copy=False)
                iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
                print("- IoU: %.6g " % iou, end="", flush=True)
                if args.out_dir:
                    # C++ BinaryIoU modifies image as a side effect
                    img_np[img_np >= thresh] = 1
                    img_np[img_np < thresh] = 0
                    img_t = ecvl.TensorToView(img)
                    img_t.colortype_ = ecvl.ColorType.GRAY
                    img_t.channels_ = "xyc"
                    img.mult_(255.)
                    # orig_img
                    orig_img = x.select([str(k)])
                    orig_img.mult_(255.)
                    orig_img_t = ecvl.TensorToImage(orig_img)
                    orig_img_t.colortype_ = ecvl.ColorType.BGR
                    orig_img_t.channels_ = "xyc"

                    tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                    ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                    ecvl.ConnectedComponentsLabeling(tmp, labels)
                    ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                    contours = ecvl.FindContours(tmp)
                    ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                    tmp_np = np.array(tmp, copy=False)
                    for cseq in contours:
                        for c in cseq:
                            tmp_np[c[0], c[1], 0] = 0
                            tmp_np[c[0], c[1], 1] = 0
                            tmp_np[c[0], c[1], 2] = 255
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s.png" % head
                    output_fn = os.path.join(args.out_dir, bname)
                    ecvl.ImWrite(output_fn, tmp)
                    if e == 0:
                        gt_t = ecvl.TensorToView(gt)
                        gt_t.colortype_ = ecvl.ColorType.GRAY
                        gt_t.channels_ = "xyc"
                        gt.mult_(255.)
                        gt_filename = d.samples_[d.GetSplit()[n]].label_path_
                        gt_fn = os.path.join(args.out_dir,
                                             os.path.basename(gt_filename))
                        ecvl.ImWrite(gt_fn, gt_t)
                n += 1
            print()
        print("MIoU: %.6g" % evaluator.MeanMetric())
Пример #5
0
def main(args):
    num_classes = 1
    size = [192, 192]  # size of images
    thresh = 0.5

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([3, size[0], size[1]])
    out = SegNet(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(net, eddl.adam(0.0001), ["cross_entropy"],
               ["mean_squared_error"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_segmentation_inference")

    if not os.path.exists(args.ckpts):
        raise RuntimeError('Checkpoint "{}" not found'.format(args.ckpts))
    eddl.load(net, args.ckpts, "bin")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    dataset_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[0], size[1]])
    print("Testing")
    d.SetSplit(ecvl.SplitType.test)
    num_samples_test = len(d.GetSplit())
    num_batches_test = num_samples_test // args.batch_size

    evaluator = utils.Evaluator()
    evaluator.ResetEval()
    for b in range(num_batches_test):
        n = 0
        print("Batch {:d}/{:d} ".format(b + 1, num_batches_test),
              end="",
              flush=True)
        d.LoadBatch(x, y)
        x.div_(255.0)
        y.div_(255.0)
        eddl.forward(net, [x])
        output = eddl.getOutput(out_sigm)
        for k in range(args.batch_size):
            img = output.select([str(k)])
            gt = y.select([str(k)])
            img_np, gt_np = np.array(img, copy=False), np.array(gt, copy=False)
            iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
            print("- IoU: %.6g " % iou, end="", flush=True)
            if args.out_dir:
                # C++ BinaryIoU modifies image as a side effect
                img_np[img_np >= thresh] = 1
                img_np[img_np < thresh] = 0
                img_t = ecvl.TensorToView(img)
                img_t.colortype_ = ecvl.ColorType.GRAY
                img_t.channels_ = "xyc"
                img.mult_(255.)
                # orig_img
                orig_img = x.select([str(k)])
                orig_img.mult_(255.)
                orig_img_t = ecvl.TensorToImage(orig_img)
                orig_img_t.colortype_ = ecvl.ColorType.BGR
                orig_img_t.channels_ = "xyc"

                tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                ecvl.ConnectedComponentsLabeling(tmp, labels)
                ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                contours = ecvl.FindContours(tmp)
                ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                tmp_np = np.array(tmp, copy=False)
                for cseq in contours:
                    for c in cseq:
                        tmp_np[c[0], c[1], 0] = 0
                        tmp_np[c[0], c[1], 1] = 0
                        tmp_np[c[0], c[1], 2] = 255

                filename = d.samples_[d.GetSplit()[n]].location_[0]
                head, tail = os.path.splitext(os.path.basename(filename))
                bname = "%s.png" % head
                output_fn = os.path.join(args.out_dir, bname)
                ecvl.ImWrite(output_fn, tmp)

                gt_t = ecvl.TensorToView(gt)
                gt_t.colortype_ = ecvl.ColorType.GRAY
                gt_t.channels_ = "xyc"
                gt.mult_(255.)
                gt_filename = d.samples_[d.GetSplit()[n]].label_path_
                gt_fn = os.path.join(args.out_dir,
                                     os.path.basename(gt_filename))
                ecvl.ImWrite(gt_fn, gt_t)
            n += 1
        print()
    print("MIoU: %.6g" % evaluator.MeanMetric())
Пример #6
0
# cv_dataset = DatasetFromjpg('./VOC2012/', mold='val', transforms=transformations, output_size=(320,320),predct=True)
# test_loader = DataLoader(dataset=cv_dataset, batch_size=10, shuffle=True, num_workers=2)
# cv_dataset = CamVidDataset('./CamVid/', mold='test', transforms=transformations, output_size=(352, 480), predct=True)
cv_dataset = DatasetFrombaidu('./baidu/',
                              mold='val',
                              transforms=transformations,
                              output_size=(720, 720),
                              predct=True)
# cv_loader = DataLoader(dataset=cv_dataset, batch_size=1, shuffle=False, num_workers=8, drop_last=True)
# 定义预测函数
# print(cv_dataset.datalen)
# cm = np.array(COLORMAP).astype('uint8')
# cm = np.array(CamVid_colours).astype('uint8')
n_class = 9
net = SegNet(num_classes=9)
# net=SegNet(num_classes=12)
net.cuda()
net.eval()
dir = './checkpoints/baiduSegNet5.pth'
state = t.load(dir)
net.load_state_dict(state['net'])
test_data, test_label = cv_dataset[1]
print(test_data.size())

# out=net(Variable(test_data.unsqueeze(0)).cuda())
# print(out.data.size())
# pred = out.max(1)[1].squeeze().cpu().data.numpy()
# print(pred.shape)

Пример #7
0
    pil_img = to_pil(out_img)
    return pil_img


if __name__ == '__main__':

    single_sample = True

    outputdir = os.path.dirname(os.path.abspath(__file__))

    os.chdir(outputdir)

    use_gpu = torch.cuda.is_available()

    own_net = SegNet(3, 12)  #OwnSegNet(3)

    loaders, w_class, class_encoding, sets = dataloader.get_data_loaders(
        camvid_dataset, 1, 1, 1, single_sample=single_sample)
    trainloader, valloader, testloader = loaders
    test_set, val_set, train_set = sets

    for i, key in enumerate(class_encoding.keys()):
        print("{} \t {}".format(i, key))

    optimizer = optim.SGD(own_net.parameters(),
                          lr=1e-3,
                          weight_decay=5e-4,
                          momentum=0.9)

    # Evaluation metric
def main():
    net = SegNet(num_classes=num_classes).cuda()
    if len(train_args['snapshot']) == 0:
        curr_epoch = 0
    else:
        print 'training resumes from ' + train_args['snapshot']
        net.load_state_dict(
            torch.load(
                os.path.join(ckpt_path, exp_name, train_args['snapshot'])))
        split_snapshot = train_args['snapshot'].split('_')
        curr_epoch = int(split_snapshot[1])
        train_record['best_val_loss'] = float(split_snapshot[3])
        train_record['corr_mean_iu'] = float(split_snapshot[6])
        train_record['corr_epoch'] = curr_epoch

    net.train()

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    train_simul_transform = simul_transforms.Compose([
        simul_transforms.Scale(int(train_args['input_size'][0] / 0.875)),
        simul_transforms.RandomCrop(train_args['input_size']),
        simul_transforms.RandomHorizontallyFlip()
    ])
    val_simul_transform = simul_transforms.Compose([
        simul_transforms.Scale(int(train_args['input_size'][0] / 0.875)),
        simul_transforms.CenterCrop(train_args['input_size'])
    ])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    target_transform = standard_transforms.Compose([
        expanded_transforms.MaskToTensor(),
        expanded_transforms.ChangeLabel(ignored_label, num_classes - 1)
    ])
    restore_transform = standard_transforms.Compose([
        expanded_transforms.DeNormalize(*mean_std),
        standard_transforms.ToPILImage()
    ])

    train_set = CityScapes('train',
                           simul_transform=train_simul_transform,
                           transform=img_transform,
                           target_transform=target_transform)
    train_loader = DataLoader(train_set,
                              batch_size=train_args['batch_size'],
                              num_workers=16,
                              shuffle=True)
    val_set = CityScapes('val',
                         simul_transform=val_simul_transform,
                         transform=img_transform,
                         target_transform=target_transform)
    val_loader = DataLoader(val_set,
                            batch_size=val_args['batch_size'],
                            num_workers=16,
                            shuffle=False)

    weight = torch.ones(num_classes)
    weight[num_classes - 1] = 0
    criterion = CrossEntropyLoss2d(weight).cuda()

    # don't use weight_decay for bias
    optimizer = optim.SGD([{
        'params': [
            param for name, param in net.named_parameters()
            if name[-4:] == 'bias' and 'dec' in name
        ],
        'lr':
        2 * train_args['new_lr']
    }, {
        'params': [
            param for name, param in net.named_parameters()
            if name[-4:] != 'bias' and 'dec' in name
        ],
        'lr':
        train_args['new_lr'],
        'weight_decay':
        train_args['weight_decay']
    }, {
        'params': [
            param for name, param in net.named_parameters()
            if name[-4:] == 'bias' and 'dec' not in name
        ],
        'lr':
        2 * train_args['pretrained_lr']
    }, {
        'params': [
            param for name, param in net.named_parameters()
            if name[-4:] != 'bias' and 'dec' not in name
        ],
        'lr':
        train_args['pretrained_lr'],
        'weight_decay':
        train_args['weight_decay']
    }],
                          momentum=0.9,
                          nesterov=True)

    if len(train_args['snapshot']) > 0:
        optimizer.load_state_dict(
            torch.load(
                os.path.join(ckpt_path, exp_name,
                             'opt_' + train_args['snapshot'])))
        optimizer.param_groups[0]['lr'] = 2 * train_args['new_lr']
        optimizer.param_groups[1]['lr'] = train_args['new_lr']
        optimizer.param_groups[2]['lr'] = 2 * train_args['pretrained_lr']
        optimizer.param_groups[3]['lr'] = train_args['pretrained_lr']

    if not os.path.exists(ckpt_path):
        os.mkdir(ckpt_path)
    if not os.path.exists(os.path.join(ckpt_path, exp_name)):
        os.mkdir(os.path.join(ckpt_path, exp_name))

    for epoch in range(curr_epoch, train_args['epoch_num']):
        train(train_loader, net, criterion, optimizer, epoch)
        validate(val_loader, net, criterion, optimizer, epoch,
                 restore_transform)
Пример #9
0
def main():
    Dataset = './SDOBenchmark-data-full/'
    train_set = SDODataset(Dataset, mode='train')
    test_set = SDODataset(Dataset, mode='test')
    train_loader = DataLoader(train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              pin_memory=True,
                              drop_last=True)
    val_loader = DataLoader(test_set,
                            batch_size=batch_size,
                            shuffle=False,
                            pin_memory=True,
                            drop_last=True)
    # Create SegNet model
    label_nbr = 1
    model = SegNet(label_nbr, in_channels=1)
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    # Use appropriate device
    model = model.to(device)

    optimizer = optim.Adam(model.parameters(), lr=lr)

    best_loss = 100000
    epochs_since_improvement = 0
    state, start_epoch = load_checkpoint(mode='autoencoder')
    if start_epoch != 0:
        print("Load from checkpoint epoch: ", start_epoch - 1)
        model = state['model']
        model = model.to(device)
        optimizer = state['optimizer']
    # Epochs
    for epoch in range(start_epoch, epochs):
        # Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
        if epochs_since_improvement == 20:
            break
        if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
            adjust_learning_rate(optimizer, 0.8)

        # One epoch's training
        train_loss = train(epoch, train_loader, model, optimizer)

        # One epoch's validation
        val_loss = valid(val_loader, model)
        print('\n * LOSS - {loss:.3f}\n'.format(loss=val_loss))

        # Check if there was an improvement
        is_best = val_loss < best_loss
        best_loss = min(best_loss, val_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch,
                        model,
                        optimizer,
                        val_loss,
                        is_best,
                        mode='autoencoder',
                        train_loss=train_loss)
    print('train finished')
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 3
    channels=list(map(int, args.channels.split(','))) #5
    input_channels=len(channels)
    print('channels:',channels,'len',input_channels)
    
    
    if args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, input_channels=input_channels)
    elif args.model == 'UNet':
        model = UNet(num_classes=num_classes, input_channels=input_channels)
    elif args.model == 'AlbuNet34':
        model = AlbuNet34(num_classes=num_classes, num_input_channels=input_channels, pretrained=False)
    elif args.model == 'SegNet':
        model = SegNet(num_classes=num_classes, num_input_channels=input_channels, pretrained=False)
    elif args.model == 'DeepLabV3':
        model = deeplabv3_resnet101(pretrained=False, progress=True, num_classes=num_classes)
        #model = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True, num_classes=num_classes)
    elif args.model == 'FCN':
        model = fcn_resnet101(pretrained=False, progress=True, num_classes=num_classes)
    else:
        model = UNet11(num_classes=num_classes, input_channels=input_channels)

    
    if torch.cuda.is_available():
        if args.device_ids:#
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        #model = nn.DataParallel(model, device_ids=device_ids).cuda()
Пример #11
0
            images = sample_test[0].to(device)
            trueMasks = sample_test[1].to(device)
            predMasks = model(images)

            plt.figure()
            predTensor = (torch.exp(predMasks[0, 0, :, :]).detach().cpu())
            plt.imshow((predTensor / torch.max(predTensor)) * 255, cmap='gray')
            pilTrans = transforms.ToPILImage()
            pilImg = pilTrans((predTensor / torch.max(predTensor)) * 255)
            pilArray = np.array(pilImg)
            pilArray = (pilArray > 127)
            im = Image.fromarray(pilArray)
            im.save(self.predMaskPath + '/' + str(i_test) + '.tif')

            print((predTensor / torch.max(predTensor)) * 255)

            mBatchDice = torch.mean(Loss(trueMasks, predMasks).dice_coeff())
            print(mBatchDice.item())


if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = SegNet(1, 1).to(device)
    # model = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=False, num_classes=1)
    modelName = model.__class__.__name__
    checkpoint = torch.load(test().checkpointsPath + '/' + modelName + '/' +
                            test().modelWeight)
    model.load_state_dict(checkpoint['model_state_dict'])
    model = model.to(device)
    test().main(model, device)
Пример #12
0
    print('Training size = {}'.format(len(trainset)))

    # Dataloaders
    trainloader = DataLoader(trainset, batch_size=args.batch, pin_memory=True)

    # Model
    if args.model == 'unet':
        if args.multitask:
            model = MultiTaskUNet(3, 1, R=5)
        else:
            model = UNet(3, 1)
    elif args.model == 'segnet':
        if args.multitask:
            model = MultiTaskSegNet(3, 1, R=5)
        else:
            model = SegNet(3, 1)
    else:
        raise ValueError('unknown model {}'.format(args.model))

    if torch.cuda.is_available():
        print('CUDA available -> Transfering to CUDA')
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
        print('CUDA unavailable')

    model = model.to(device)

    os.makedirs(args.destination, exist_ok=True)
    basename = os.path.join(args.destination,
                            args.model if args.name is None else args.name)
Пример #13
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold-out', type=int, default='0', help='fold train-val test')
    arg('--fold-in', type=int, default='0', help='fold train val')
    arg('--percent', type=float, default=1, help='percent of data')
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=4, help='HR:4,VHR:8')
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=40)
    arg('--lr', type=float, default=1e-3)
    arg('--model',
        type=str,
        default='UNet11',
        choices=['UNet11', 'UNet', 'AlbuNet34', 'SegNet'])
    arg('--dataset-path',
        type=str,
        default='data_VHR',
        help='main file,in which the dataset is:  data_VHR or data_HR')
    arg('--dataset-file',
        type=str,
        default='VHR',
        help='resolution of the dataset VHR,HR')
    #arg('--out-file', type=str, default='VHR', help='the file in which save the outputs')
    arg('--train-val-file',
        type=str,
        default='train_val_850',
        help='name of the train-val file VHR:train_val_850 or train_val_HR')
    arg('--test-file',
        type=str,
        default='test_850',
        help='name of the test file VHR:test_850 or HR:test_HR')

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1
    input_channels = 4

    if args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, input_channels=input_channels)
    elif args.model == 'UNet':
        model = UNet(num_classes=num_classes, input_channels=input_channels)
    elif args.model == 'AlbuNet34':
        model = AlbuNet34(num_classes=num_classes,
                          num_input_channels=input_channels,
                          pretrained=False)
    elif args.model == 'SegNet':
        model = SegNet(num_classes=num_classes,
                       num_input_channels=input_channels,
                       pretrained=False)
    else:
        model = UNet11(num_classes=num_classes, input_channels=input_channels)

    if torch.cuda.is_available():
        if args.device_ids:  #
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    cudnn.benchmark = True

    ####################Change the files_names ######################################
    out_path = Path(('logs_{}/mapping/').format(args.dataset_file))
    name_file = '_' + str(int(
        args.percent * 100)) + '_percent_' + args.dataset_file
    data_all = 'data'  ##file with all the data

    data_path = Path(args.dataset_path)
    print("data_path:", data_path)
    #################################################################################
    # Nested cross validation K-fold train test
    #train_val_file_names, test_file_names = get_split_out(data_path,data_all,args.fold_out)
    #################################################################################
    #eWe are consider the same test in all the cases
    train_val_file_names = np.array(
        sorted(
            glob.glob(
                str(data_path / args.train_val_file / 'images') + "/*.npy")))
    test_file_names = np.array(
        sorted(
            glob.glob(str(data_path / args.test_file / 'images') + "/*.npy")))

    if args.percent != 1:
        extra, train_val_file_names = percent_split(train_val_file_names,
                                                    args.percent)

    #################################################################################

    train_file_names, val_file_names = get_split_in(train_val_file_names,
                                                    args.fold_in)

    np.save(
        str(
            os.path.join(
                out_path, "train_files{}_{}_fold{}_{}.npy".format(
                    name_file, args.model, args.fold_out, args.fold_in))),
        train_file_names)
    np.save(
        str(
            os.path.join(
                out_path,
                "val_files{}_{}_fold{}_{}.npy".format(name_file, args.model,
                                                      args.fold_out,
                                                      args.fold_in))),
        val_file_names)

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    mode='train',
                    batch_size=4,
                    limit=None):
        return DataLoader(dataset=WaterDataset(file_names,
                                               transform=transform,
                                               mode=mode,
                                               limit=limit),
                          shuffle=shuffle,
                          batch_size=batch_size,
                          pin_memory=torch.cuda.is_available())

    max_values, mean_values, std_values = meanstd(train_file_names,
                                                  val_file_names,
                                                  test_file_names,
                                                  str(data_path),
                                                  input_channels)  #_60
    print(max_values, mean_values, std_values)
    if (args.dataset_file == 'VHR'):
        train_transform = DualCompose([
            CenterCrop(512),
            HorizontalFlip(),
            VerticalFlip(),
            Rotate(),
            ImageOnly(Normalize(mean=mean_values, std=std_values))
        ])

        val_transform = DualCompose([
            CenterCrop(512),
            ImageOnly(Normalize(mean=mean_values, std=std_values))
        ])
        max_values = 3521
        train_loader = make_loader(train_file_names,
                                   shuffle=True,
                                   transform=train_transform,
                                   mode='train',
                                   batch_size=args.batch_size)  #4 batch_size
        valid_loader = make_loader(val_file_names,
                                   transform=val_transform,
                                   batch_size=args.batch_size,
                                   mode="train")

    if (args.dataset_file == 'HR'):
        train_transform = DualCompose([
            CenterCrop(64),
            HorizontalFlip(),
            VerticalFlip(),
            Rotate(),
            ImageOnly(Normalize2(mean=mean_values, std=std_values))
        ])

        val_transform = DualCompose([
            CenterCrop(64),
            ImageOnly(Normalize2(mean=mean_values, std=std_values))
        ])
        train_loader = make_loader(train_file_names,
                                   shuffle=True,
                                   transform=train_transform,
                                   mode='train',
                                   batch_size=args.batch_size)  #8 batch_size
        valid_loader = make_loader(val_file_names,
                                   transform=val_transform,
                                   mode="train",
                                   batch_size=args.batch_size // 2)


#albunet 34 with only 3 batch_size

    dataloaders = {'train': train_loader, 'val': valid_loader}

    dataloaders_sizes = {x: len(dataloaders[x]) for x in dataloaders.keys()}

    root.joinpath(('params_{}.json').format(args.dataset_file)).write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    optimizer_ft = optim.Adam(model.parameters(), lr=args.lr)  #
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=20,
                                           gamma=0.1)

    utilsTrain.train_model(dataset_file=args.dataset_file,
                           name_file=name_file,
                           model=model,
                           optimizer=optimizer_ft,
                           scheduler=exp_lr_scheduler,
                           dataloaders=dataloaders,
                           fold_out=args.fold_out,
                           fold_in=args.fold_in,
                           name_model=args.model,
                           num_epochs=args.n_epochs)

    torch.save(
        model.module.state_dict(),
        (str(out_path) + '/model{}_{}_foldout{}_foldin{}_{}epochs').format(
            name_file, args.model, args.fold_out, args.fold_in, args.n_epochs))

    print(args.model)

    find_metrics(train_file_names=train_file_names,
                 val_file_names=val_file_names,
                 test_file_names=test_file_names,
                 max_values=max_values,
                 mean_values=mean_values,
                 std_values=std_values,
                 model=model,
                 fold_out=args.fold_out,
                 fold_in=args.fold_in,
                 name_model=args.model,
                 epochs=args.n_epochs,
                 out_file=args.dataset_file,
                 dataset_file=args.dataset_file,
                 name_file=name_file)
def main():
    training_batch_size = 8
    validation_batch_size = 8
    epoch_num = 200
    iter_freq_print_training_log = 50
    lr = 1e-4

    net = SegNet(pretrained=True, num_classes=num_classes).cuda()
    curr_epoch = 0

    # net = FCN8VGG(pretrained=False, num_classes=num_classes).cuda()
    # snapshot = 'epoch_41_validation_loss_2.1533_mean_iu_0.5225.pth'
    # net.load_state_dict(torch.load(os.path.join(ckpt_path, snapshot)))
    # split_res = snapshot.split('_')
    # curr_epoch = int(split_res[1])

    net.train()

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    train_simultaneous_transform = SimultaneousCompose([
        SimultaneousRandomHorizontallyFlip(),
        SimultaneousRandomScale((0.9, 1.1)),
        SimultaneousRandomCrop((300, 500))
    ])
    train_transform = transforms.Compose([
        RandomGaussianBlur(),
        transforms.ToTensor(),
        transforms.Normalize(*mean_std)
    ])
    val_simultaneous_transform = SimultaneousCompose(
        [SimultaneousScale((300, 500))])
    val_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(*mean_std)])
    restore = transforms.Compose(
        [DeNormalize(*mean_std),
         transforms.ToPILImage()])

    train_set = VOC(train_path,
                    simultaneous_transform=train_simultaneous_transform,
                    transform=train_transform,
                    target_transform=MaskToTensor())
    train_loader = DataLoader(train_set,
                              batch_size=training_batch_size,
                              num_workers=8,
                              shuffle=True)
    val_set = VOC(val_path,
                  simultaneous_transform=val_simultaneous_transform,
                  transform=val_transform,
                  target_transform=MaskToTensor())
    val_loader = DataLoader(val_set,
                            batch_size=validation_batch_size,
                            num_workers=8)

    criterion = CrossEntropyLoss2d(ignored_label=ignored_label)
    optimizer = optim.SGD([{
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] == 'bias'
        ]
    }, {
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] != 'bias'
        ],
        'weight_decay':
        5e-4
    }],
                          lr=lr,
                          momentum=0.9,
                          nesterov=True)

    if not os.path.exists(ckpt_path):
        os.mkdir(ckpt_path)

    best = [1e9, -1, -1]  # [best_val_loss, best_mean_iu, best_epoch]

    for epoch in range(curr_epoch, epoch_num):
        train(train_loader, net, criterion, optimizer, epoch,
              iter_freq_print_training_log)
        if (epoch + 1) % 20 == 0:
            lr /= 3
            adjust_lr(optimizer, lr)
        validate(epoch, val_loader, net, criterion, restore, best)
Пример #15
0
                plt.show()

                # logging.basicConfig(level=logging.DEBUG)
                # morphSnake(imagesArray, center_of_mass, dia/2, 200).example_lakes()
                # logging.info("Done.")
                # plt.show()

            # if i_train > 0:
            #     break
            #
            # break


if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model1 = SegNet(1, 1).to(device)
    modelName1 = model1.__class__.__name__
    model2 = torchvision.models.segmentation.deeplabv3_resnet101(
        pretrained=False, num_classes=1)
    model2 = model2.to(device)
    modelName2 = model2.__class__.__name__
    model3 = UNet(1, 1).to(device)
    modelName3 = model3.__class__.__name__

    model1_checkpoint = torch.load(
        train().checkpointsPath + '/' + modelName1 + '/' +
        '2019-08-30 13:21:52.559302_epoch-5_dice-0.4743926368317377.pth')
    model2_checkpoint = torch.load(
        train().checkpointsPath + '/' + modelName2 + '/' +
        '2019-08-22 08:37:06.839794_epoch-1_dice-0.4479589270841744.pth')
    model3_checkpoint = torch.load(
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--device-ids', type=str, default='0', help='For example 0,1 to run on two GPUs')
    arg('--fold-out', type=int, help='fold train test', default=0)
    arg('--fold-in', type=int, help='fold train val', default=0)
    arg('--percent', type=float, help='percent of data', default=1)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=4)
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=40)
    arg('--n-steps', type=int, default=200)
    arg('--lr', type=float, default=0.003) 
    arg('--modelVHR', type=str, default='UNet11', choices=['UNet11','UNet','AlbuNet34','SegNet'])
    arg('--dataset-path-HR', type=str, default='data_HR', help='ain path  of the HR dataset')
    arg('--model-path-HR', type=str, default='logs_HR/mapping/model_40epoch_HR_UNet11.pth', help='path of the model of HR')
    arg('--dataset-path-VHR', type=str, default='data_VHR', help='ain path  of the VHR dataset')
    arg('--name-file-HR', type=str, default='_HR', help='name file of HR dataset')
    arg('--dataset-file', type=str, default='VHR', help='main dataset resolution,depend of this correspond a specific crop' )
    arg('--out-file', type=str, default='seq', help='the file in which save the outputs')
    arg('--train-val-file-HR', type=str, default='train_val_HR', help='name of the train-val file' )
    arg('--test-file-HR', type=str, default='test_HR', help='name of the test file' )
    arg('--train-val-file-VHR', type=str, default='train_val_850', help='name of the train-val file' )
    arg('--test-file-VHR', type=str, default='test_850', help='name of the test file' )
    
    args = parser.parse_args()
    
    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1 
    input_channels=4

    if args.modelVHR == 'UNet11':
        model_VHR = UNet11(num_classes=num_classes, input_channels=input_channels)
    elif args.modelVHR == 'UNet':
        model_VHR = UNet(num_classes=num_classes, input_channels=input_channels)
    elif args.modelVHR == 'AlbuNet34':
        model_VHR =AlbuNet34(num_classes=num_classes, num_input_channels=input_channels, pretrained=False)
    elif args.modelVHR == 'SegNet':
        model_VHR = SegNet(num_classes=num_classes, num_input_channels=input_channels, pretrained=False)
    else:
        model_VHR = UNet11(num_classes=num_classes, input_channels=4)

    if torch.cuda.is_available():
        if args.device_ids:#
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model_VHR = nn.DataParallel(model_VHR, device_ids=device_ids).cuda()

    cudnn.benchmark = True


    out_path = Path(('logs_{}/mapping/').format(args.out_file))
    
    #Data-paths:--------------------------VHr-------------------------------------
    data_path_VHR = Path(args.dataset_path_VHR) 
    print("data_path:",data_path_VHR)
  

    name_file_VHR = '_'+ str(int(args.percent*100))+'_percent_'+args.out_file
    data_all='data'
    ##--------------------------------------
 
   ############################  
    # NEstes cross validation K-fold train test
    ##train_val_file_names, test_file_names_HR = get_split_out(data_path_HR,data_all,args.fold_out)
    ############################  
   ############################  Cross validation
    train_val_file_names=np.array(sorted(glob.glob(str((data_path_VHR/args.train_val_file_VHR/'images'))+ "/*.npy")))
    test_file_names_VHR =  np.array(sorted(glob.glob(str((data_path_VHR/args.test_file_VHR/'images')) + "/*.npy")))
    
    if args.percent !=1:
        extra, train_val_file_names= percent_split(train_val_file_names, args.percent) 

    train_file_VHR_lab,val_file_VHR_lab = get_split_in(train_val_file_names,args.fold_in)
    np.save(str(os.path.join(out_path,"train_files{}_{}_fold{}_{}.npy".format(name_file_VHR, args.modelVHR, args.fold_out, args.fold_in))), train_file_VHR_lab)
    np.save(str(os.path.join(out_path,"val_files{}_{}_fold{}_{}.npy". format(name_file_VHR, args.modelVHR, args.fold_out, args.fold_in))), val_file_VHR_lab)

      #Data-paths:--------------------------unlabeled VHR-------------------------------------    
    
    train_path_VHR_unlab= data_path_VHR/'unlabel'/'train'/'images'
    val_path_VHR_unlab = data_path_VHR/'unlabel'/'val'/'images'
    
    

    train_file_VHR_unlab = np.array(sorted(list(train_path_VHR_unlab.glob('*.npy'))))
    val_file_VHR_unlab = np.array(sorted(list(val_path_VHR_unlab.glob('*.npy'))))
   
    print('num train_lab = {}, num_val_lab = {}'.format(len(train_file_VHR_lab), len(val_file_VHR_lab)))
    print('num train_unlab = {}, num_val_unlab = {}'.format(len(train_file_VHR_unlab), len(val_file_VHR_unlab)))
    
    max_values_VHR, mean_values_VHR, std_values_VHR=meanstd(train_file_VHR_lab, val_file_VHR_lab,test_file_names_VHR,str(data_path_VHR),input_channels)

    def make_loader(file_names, shuffle=False, transform=None,mode='train',batch_size=4, limit=None):
        return DataLoader(
            dataset=WaterDataset(file_names, transform=transform,mode=mode, limit=limit),
            shuffle=shuffle,            
            batch_size=batch_size, 
            pin_memory=torch.cuda.is_available() 

        )
 #transformations ---------------------------------------------------------------------------      
        
    train_transform_VHR = DualCompose([
            CenterCrop(512),
            HorizontalFlip(),
            VerticalFlip(),
            Rotate(),
            ImageOnly(Normalize(mean=mean_values_VHR,std= std_values_VHR))
        ])
    
    val_transform_VHR = DualCompose([
            CenterCrop(512),
            ImageOnly(Normalize(mean=mean_values_VHR, std=std_values_VHR))
        ])
#-------------------------------------------------------------------      
    mean_values_HR=(0.11952524, 0.1264638 , 0.13479991, 0.15017026)
    std_values_HR=(0.08844988, 0.07304429, 0.06740904, 0.11003125)
    
    train_transform_VHR_unlab = DualCompose([
            CenterCrop(512),
            HorizontalFlip(),
            VerticalFlip(),
            Rotate(),
            ImageOnly(Normalize(mean=mean_values_HR,std= std_values_HR))
        ])
    
    val_transform_VHR_unlab = DualCompose([
            CenterCrop(512),
            ImageOnly(Normalize(mean=mean_values_HR, std=std_values_HR))
        ])
    

######################## DATA-LOADERS ###########################################################49
    train_loader_VHR_lab = make_loader(train_file_VHR_lab, shuffle=True, transform=train_transform_VHR , batch_size = 2, mode = "train")
    valid_loader_VHR_lab = make_loader(val_file_VHR_lab, transform=val_transform_VHR, batch_size = 4, mode = "train")
    
    train_loader_VHR_unlab = make_loader(train_file_VHR_unlab, shuffle=True, transform=train_transform_VHR, batch_size = 4, mode = "unlb_train")
    valid_loader_VHR_unlab = make_loader(val_file_VHR_unlab, transform=val_transform_VHR, batch_size = 2, mode = "unlb_val")

    
    dataloaders_VHR_lab= {
        'train': train_loader_VHR_lab, 'val': valid_loader_VHR_lab
    }
    
    dataloaders_VHR_unlab= {
        'train': train_loader_VHR_unlab, 'val': valid_loader_VHR_unlab
    }

#----------------------------------------------    
    root.joinpath(('params_{}.json').format(args.out_file)).write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))
    
    # Observe that all parameters are being optimized
    optimizer_ft = optim.Adam(model_VHR.parameters(), lr= args.lr)  
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.1) 

#--------------------------model HR-------------------------------------
    PATH_HR= args.model_path_HR

    #Initialise the model
    model_HR = UNet11(num_classes=num_classes)
    model_HR.cuda()
    model_HR.load_state_dict(torch.load(PATH_HR))
#---------------------------------------------------------------
    model_VHR= utilsTrain_seq.train_model(
        out_file=args.out_file,
        name_file_VHR=name_file_VHR,
        model_HR=model_HR, 
        model_VHR=model_VHR,
        optimizer=optimizer_ft,
        scheduler=exp_lr_scheduler,
        dataloaders_VHR_lab=dataloaders_VHR_lab,
        dataloaders_VHR_unlab=dataloaders_VHR_unlab,
        fold_out=args.fold_out,
        fold_in=args.fold_in,
        name_model_VHR=args.modelVHR,
        n_steps=args.n_steps,
        num_epochs=args.n_epochs 
        
        )


    torch.save(model_VHR.module.state_dict(), (str(out_path)+'/model{}_{}_foldout{}_foldin{}_{}epochs.pth').format(args.n_epochs,name_file_VHR,args.modelVHR, args.fold_out,args.fold_in,args.n_epochs))

    print(args.modelVHR)
    max_values_all_VHR=3521

    find_metrics(train_file_names=train_file_VHR_lab, 
                 val_file_names=val_file_VHR_lab,
                 test_file_names=test_file_names_VHR, 
                 max_values=max_values_all_VHR, 
                 mean_values=mean_values_VHR, 
                 std_values=std_values_VHR, 
                 model=model_VHR, 
                 fold_out=args.fold_out, 
                 fold_in=args.fold_in,
                 name_model=args.modelVHR,
                 epochs=args.n_epochs, 
                 out_file=args.out_file, 
                 dataset_file=args.dataset_file,
                 name_file=name_file_VHR)
        loss = criterion(output, target)
        train_losses.append(loss.item())
        loss.backward()
        optimizer.step()

    return train_losses


if __name__ == '__main__':
    train_loader = DataLoader(CamVid11('CamVid/', split='train'),
                              batch_size=5,
                              shuffle=True)
    val_loader = DataLoader(CamVid11('CamVid/', split='val'),
                            batch_size=1,
                            shuffle=True)
    net = SegNet(3, 12)

    net.apply(init_weights)

    # Initialize encoder weights from VGG16 pre-trained on ImageNet
    vgg16 = models.vgg16(pretrained=True)
    layers = [
        layer for layer in vgg16.features.children()
        if isinstance(layer, nn.Conv2d)
    ]

    start = 0
    for i in range(net.encoder.block_count):
        end = start + net.encoder.blocks[i].layer_count
        net.encoder.blocks[i].initialize_from_layers(layers[start:end])
        start = end