Beispiel #1
0
import torch, os, imageio, re, sys
import numpy as np
import torchvision as tv
import torchvision.transforms as T
from PIL import Image

sys.path.append('..')
import transforms as tr

#ROTATE_512 = tr.Compose([
#    tr.RandomRotate(),
#    tr.Lift(T.ToTensor()),
#    tr.RandomCropTransform((512, 512))
#])

#CROP_512 = tr.Compose([
#    tr.Lift(T.ToTensor()),
#    tr.RandomCropTransform((512, 512)),
#])

UNCUT = tr.Compose([
    #    tr.Lift(T.Pad(88)),
    tr.Lift(T.ToTensor())
])
Beispiel #2
0
        weights[background] = n * self.bg_weight / background.sum().item()
        weights[foreground] = n * self.fg_weight / foreground.sum().item()

        mask = mask * weights

        img = torch.cat([img, self.mean[None]], dim=0)

        if self.flip:
            img, mask, lbl = random_flip(img, mask, lbl)

        return img, mask, lbl


ROTATE_TRANS_1024 = tr.Compose([
    tr.AspectPreservingResizeTransform((1024, 768)),
    tr.Lift(T.Pad(88)),
    tr.RandomRotate(),
])

PAD_TRANS_1024 = tr.Compose([
    tr.AspectPreservingResizeTransform((1024, 768)),
    tr.Lift(T.Pad(88)),
])

RotatedISICDataset = rotated_dataset(ISICDataset)

if __name__ == '__main__':
    target_size = 1024, 768

    img_transform = T.Compose([T.ColorJitter(0.3, 0.3, 0.3, 0.), T.ToTensor()])
Beispiel #3
0

import sys
sys.path.append('..')
from utils import rotated_dataset

RotatedDeepglobeDataset = rotated_dataset(DeepglobeDataset)

if __name__ == '__main__':
    import sys
    import matplotlib.pyplot as plt
    import torchvision.transforms as T

    sys.path.append('..')
    import transforms as tr

    d = RotatedDeepglobeDataset(
        '/home/jatentaki/Storage/jatentaki/Datasets/roads/deepglobe/test/',
        global_transform=tr.Lift(T.ToTensor()))

    for i in range(25, 50):
        img, mask, lbl = d[i]

        fig, (a1, a2, a3) = plt.subplots(1, 3)
        img = img.numpy().transpose(1, 2, 0)
        a1.imshow(img)
        a2.imshow(lbl.numpy()[0])
        a3.imshow(mask.numpy()[0])

        plt.show()
Beispiel #4
0
    if args.action == 'inspect' and args.batch_size != 1:
        args.batch_size = 1
        print("Setting --batch-size to 1 for inspection")

    if args.action != 'train' and args.epochs is not None:
        print("Ignoring --epochs outside of training mode")

    if args.no_jit and args.optimize:
        print("Ignoring --optimize in --no-jit setting")

    writer.add_text('general', str(vars(args)))

    transform = T.Compose([T.CenterCrop(644), T.ToTensor()])

    test_global_transform = tr.Lift(T.Pad(40))

    tr_global_transform = [
        #        tr.RandomRotate(),
        #        tr.RandomFlip(),
        tr.Lift(T.Pad(40))
    ]
    tr_global_transform = tr.Compose(tr_global_transform)

    train_data = loader.DriveDataset(args.data_path,
                                     training=True,
                                     bloat=args.bloat,
                                     from_=args.cut,
                                     img_transform=transform,
                                     mask_transform=transform,
                                     label_transform=transform,
Beispiel #5
0
    if args.action == 'inspect' and args.batch_size != 1:
        args.batch_size = 1
        print("Setting --batch-size to 1 for inspection")

    if args.action != 'train' and args.epochs is not None:
        print("Ignoring --epochs outside of training mode")

    if args.no_jit and args.optimize:
        print("Ignoring --optimize in --no-jit setting")

    writer.add_text('general', str(vars(args)))

    transform = T.Compose([T.CenterCrop(644), T.ToTensor()])

    # if we are not padding the convolutions, we have to pad the input
    aug_pad = None if args.padding else tr.Lift(T.Pad(40))

    test_global_transform = aug_pad

    tr_global_transform = tr.Compose(
        [tr.RandomRotate(), tr.RandomFlip(), aug_pad])

    train_data = loader.DriveDataset(args.data_path,
                                     training=True,
                                     bloat=args.bloat,
                                     from_=args.cut,
                                     img_transform=transform,
                                     mask_transform=transform,
                                     label_transform=transform,
                                     global_transform=tr_global_transform)
    train_loader = DataLoader(train_data,
Beispiel #6
0
cuda = torch.cuda.is_available()

n_params = 0
for param in network.parameters():
    n_params += param.numel()
print(n_params, 'learnable parameters')

if cuda:
    network = network.cuda()

checkpoint = framework.load_checkpoint(args.load)
_, _, model_dict, _ = checkpoint

network.load_state_dict(model_dict)

tentrans = tr.Compose([tr.Lift(T.Pad(88)), tr.Lift(T.ToTensor())])
#threshold = 0.6#0.6364

with torch.no_grad():
    for i, ((path, img), ) in enumerate(tqdm(loader)):
        if i == args.early_stop:
            break

        scaler = APRTrans((1024, 768))
        downsized = scaler.forward(img)
        input = tentrans(downsized)[0]
        input = data.add_mean(input)[None]
        if cuda:
            input = input.cuda()

        prediction = network(input)