def transform(self, sample):
     if self.train:
         composed_transforms = transforms.Compose([
             tr.RandomHorizontalFlip(),
             tr.RandomVerticalFlip(),
             tr.RandomScaleCrop(),
             tr.ToTensor()
         ])
     else:
         composed_transforms = transforms.Compose([
             tr.ToTensor()
         ])
     return composed_transforms(sample)
 def transforms_val(self, sample):
     composed_transforms = transforms.Compose([
         tr.FixedResize(size=self.input_size),
         tr.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
                      std=[x / 255.0 for x in [63.0, 62.1, 66.7]]),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
Beispiel #3
0
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.args.crop_size),
            #tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])
        return composed_transforms(sample)
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=self.args['crop_size']),
            tr.Normalize(mean=self.mean, std=self.std),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Beispiel #5
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=400),
            tr.Normalize(mean=self.source_dist['mean'],
                         std=self.source_dist['std']),
            tr.ToTensor(),
        ])
        return composed_transforms(sample)
Beispiel #6
0
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(400),
            tr.Normalize(mean=self.source_dist['mean'],
                         std=self.source_dist['std']),
            tr.ToTensor(),
        ])
        return composed_transforms(sample)
Beispiel #7
0
def get_train_transforms(normalize):
  train_transforms = []
  train_transforms.append(transforms.Scale(160))
  train_transforms.append(transforms.RandomHorizontalFlip())
  train_transforms.append(transforms.RandomColor(0.15))
  train_transforms.append(transforms.RandomRotate(15))
  train_transforms.append(transforms.RandomSizedCrop(128))
  train_transforms.append(transforms.ToTensor())
  train_transforms.append(normalize)
  train_transforms = transforms.Compose(train_transforms)
  return train_transforms
Beispiel #8
0
 def transform_tr(self, sample):
     composed_transforms = transforms.Compose([
         #tr.RandomHorizontalFlip(),
         #tr.RandomScaleCrop(base_size=self.base_size, crop_size=self.crop_size),
         ##tr.RandomGaussianBlur(),
         tr.FixScaleCrop(crop_size=self.crop_size),
         tr.Normalize(mean=(0.485, 0.456, 0.406),
                      std=(0.229, 0.224, 0.225)),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
Beispiel #9
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),  #随机水平翻转
            tr.RandomScaleCrop(base_size=self.args.base_size,
                               crop_size=self.args.crop_size),  #随机尺寸裁剪
            tr.RandomGaussianBlur(),  #随机高斯模糊
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),  #归一化
            tr.ToTensor()
        ])

        return composed_transforms(sample)
 def transforms_train_esp(self, sample):
     composed_transforms = transforms.Compose([
         tr.RandomVerticalFlip(),
         tr.RandomHorizontalFlip(),
         tr.RandomAffine(degrees=40, scale=(.9, 1.1), shear=30),
         tr.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),
         tr.FixedResize(size=self.input_size),
         tr.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
                      std=[x / 255.0 for x in [63.0, 62.1, 66.7]]),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
Beispiel #11
0
 def transform_pair_train(self, sample):
     composed_transforms = transforms.Compose([
         tr.RandomHorizontalFlip(),
         tr.RandomScaleCrop(base_size=400, crop_size=400, fill=0),
         tr.HorizontalFlip(),
         tr.GaussianBlur(),
         tr.Normalize(mean=self.source_dist['mean'],
                      std=self.source_dist['std'],
                      if_pair=True),
         tr.ToTensor(if_pair=True),
     ])
     return composed_transforms(sample)
Beispiel #12
0
    def transform_pair_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(400),
            tr.HorizontalFlip(),
            tr.GaussianBlur(),
            tr.Normalize(mean=self.source_dist['mean'],
                         std=self.source_dist['std'],
                         if_pair=True),
            tr.ToTensor(if_pair=True),
        ])
        return composed_transforms(sample)
Beispiel #13
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            #tr.RandomHorizontalFlip(),

            # to make the image in the same batch has same shape
            tr.FixScaleCrop(crop_size=self.args.crop_size),

            #tr.RandomGaussianBlur(),
            #tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])
        return composed_transforms(sample)
Beispiel #14
0
 def transform_tr(self, sample):
     if not self.random_match:
         composed_transforms = transforms.Compose([
             tr.RandomHorizontalFlip(),
             tr.RandomScaleCrop(base_size=400, crop_size=400, fill=0),
             #tr.Remap(self.building_table, self.nonbuilding_table, self.channels)
             tr.RandomGaussianBlur(),
             #tr.ConvertFromInts(),
             #tr.PhotometricDistort(),
             tr.Normalize(mean=self.source_dist['mean'],
                          std=self.source_dist['std']),
             tr.ToTensor(),
         ])
     else:
         composed_transforms = transforms.Compose([
             tr.HistogramMatching(),
             tr.RandomHorizontalFlip(),
             tr.RandomScaleCrop(base_size=400, crop_size=400, fill=0),
             tr.RandomGaussianBlur(),
             tr.Normalize(mean=self.source_dist['mean'],
                          std=self.source_dist['std']),
             tr.ToTensor(),
         ])
     return composed_transforms(sample)
Beispiel #15
0
def make_augmentation_transforms(augmentation, mode):
    if mode == 'train':
        transforms = [
            t.RandomPadToLength(length=config.AUDIO_LENGTH),
            t.Noise(
                length=config.AUDIO_LENGTH,
                noise_waves=load_noise_waves(),
                noise_limit=0.2,
            ).with_prob(0.5),
            t.RandomShift(shift_limit=0.2).with_prob(0.5),
        ]
    else:
        transforms = [t.PadToLength(length=config.AUDIO_LENGTH)]
    transforms.append(augmentations[augmentation])
    transforms += [
        t.Pad(((0, 0), (0, 1)), 'constant'),
        t.ExpandDims(),
        t.ToTensor(),
    ]
    return t.Compose(transforms)
Beispiel #16
0
    def get_mean_std(self, ratio=0.1):
        trs = tf.Compose(
            [tr.FixedResize(512),
             tr.Normalize(mean=0, std=1),
             tr.ToTensor()])
        dataset = LungDataset(root_dir=r'D:\code\U-net',
                              transforms=trs,
                              train=True)
        print(dataset)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=int(
                                                     len(dataset) * ratio),
                                                 shuffle=True,
                                                 num_workers=4)

        for item in dataloader:
            train = item['image']
            # train = np.array(train)      #?
            print(train.shape)
            print('sample {} images to calculate'.format(train.shape[0]))
            mean = np.mean(train.numpy(), axis=(0, 2, 3))
            std = np.std(train.numpy(), axis=(0, 2, 3))
        return mean, std
Beispiel #17
0
    def get_img_size(self):
        img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[0]))

        return list(img.shape[:2])


if __name__ == '__main__':
    import custom_transforms as tr
    import torch
    from torchvision import transforms
    from matplotlib import pyplot as plt

    transforms = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.Resize(scales=[0.5, 0.8, 1]),
        tr.ToTensor()
    ])

    dataset = DAVIS2016(
        db_root_dir='/media/eec/external/Databases/Segmentation/DAVIS-2016',
        train=True,
        transform=transforms)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=1)

    for i, data in enumerate(dataloader):
        plt.figure()
        plt.imshow(
            overlay_mask(im_normalize(tens2image(data['image'])),
Beispiel #18
0
from torchvision import datasets, transforms
import torch.utils.data as data
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from PIL import Image
import os
import custom_transforms as trans

img_transform = transforms.Compose([
    trans.RandomHorizontalFlip(),
    trans.RandomGaussianBlur(),
    trans.RandomScaleCrop(700, 512),
    trans.Normalize(),
    trans.ToTensor()
])


class TrainImageFolder(data.Dataset):
    def __init__(self, data_dir):
        self.f = open(os.path.join(data_dir, 'train_id.txt'))
        self.file_list = self.f.readlines()
        self.data_dir = data_dir

    def __getitem__(self, index):
        img = Image.open(
            os.path.join(self.data_dir, 'train_images',
                         self.file_list[index][:-1] + '.jpg')).convert('RGB')
        parse = Image.open(
            os.path.join(self.data_dir, 'train_segmentations',
Beispiel #19
0
        # seq_name = self.video_list[idx].split('/')[2]
        return img, gt

    def get_img_size(self):
        img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[0]))

        return list(img.shape[:2])


if __name__ == '__main__':
    import custom_transforms as tr
    import torch
    from torchvision import transforms
    from matplotlib import pyplot as plt
    from dataloader.helpers import overlay_mask, im_normalize, tens2image

    transforms = transforms.Compose([tr.RandomHorizontalFlip(), tr.Resize(scales=[0.5, 0.8, 1]), tr.ToTensor()])

    dataset = DAVIS2016(db_root_dir='/home/ty/data/davis',
                        train=True, transform=None)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=5, shuffle=True, num_workers=1)

    for i, data in enumerate(dataloader):
        # plt.figure()
        # plt.imshow(overlay_mask(im_normalize(tens2image(data['image'])), tens2image(data['gt'])))
        # if i == 10:
        #     break
        print(data['img'].size())
        print(data['img_gt'].size())

    # plt.show(block=True)
    def get_img_size(self):
        img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[0]))

        return list(img.shape[:2])


if __name__ == '__main__':
    import custom_transforms as tr
    import torch
    from torchvision import transforms
    from matplotlib import pyplot as plt

    transforms = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.Resize(scales=[0.5, 0.8, 1]),
        tr.ToTensor()
    ])

    dataset = OnlineDataset(db_root_dir='../flow_mask',
                            train=True,
                            transform=tr.ToTensor())
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=1)

    for i, data in enumerate(dataloader):
        img = data['image']
        # print img
        print(img.shape)
        bb
Beispiel #21
0
def main(args):
  # parse args
  best_acc1 = 0.0

  if args.gpu >= 0:
    torch.cuda.set_device(args.gpu)
    print("Use GPU: {}".format(args.gpu))
  else:
    print('You are using CPU for computing!',
          'Yet we assume you are using a GPU.',
          'You will NOT be able to switch between CPU and GPU training!')

  # fix the random seeds (the best we can)
  fixed_random_seed = 2019
  torch.manual_seed(fixed_random_seed)
  np.random.seed(fixed_random_seed)
  random.seed(fixed_random_seed)

  # set up the model + loss
  if args.use_custom_conv:
    print("Using custom convolutions in the network")
    model = default_model(conv_op=CustomConv2d, num_classes=100)
  elif args.use_resnet18:
    model = torchvision.models.resnet18(pretrained=True)
    model.fc = nn.Linear(512, 100)
  elif args.use_adv_training:
    model = AdvSimpleNet(num_classes=100)
  else:
    model = default_model(num_classes=100)
  model_arch = "simplenet"
  criterion = nn.CrossEntropyLoss()
  # put everthing to gpu
  if args.gpu >= 0:
    model = model.cuda(args.gpu)
    criterion = criterion.cuda(args.gpu)

  # setup the optimizer
  optimizer = torch.optim.SGD(model.parameters(), args.lr,
                momentum=args.momentum,
                weight_decay=args.weight_decay)

  # resume from a checkpoint?
  if args.resume:
    if os.path.isfile(args.resume):
      print("=> loading checkpoint '{}'".format(args.resume))
      checkpoint = torch.load(args.resume)
      args.start_epoch = checkpoint['epoch']
      best_acc1 = checkpoint['best_acc1']
      model.load_state_dict(checkpoint['state_dict'])
      if args.gpu < 0:
        model = model.cpu()
      else:
        model = model.cuda(args.gpu)
      # only load the optimizer if necessary
      if (not args.evaluate) and (not args.attack):
        optimizer.load_state_dict(checkpoint['optimizer'])
      print("=> loaded checkpoint '{}' (epoch {}, acc1 {})"
          .format(args.resume, checkpoint['epoch'], best_acc1))
    else:
      print("=> no checkpoint found at '{}'".format(args.resume))

  # set up transforms for data augmentation
  normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                       std=[0.229, 0.224, 0.225])
  train_transforms = get_train_transforms(normalize)
  # val transofrms
  val_transforms=[]
  val_transforms.append(transforms.Scale(160, interpolations=None))
  val_transforms.append(transforms.ToTensor())
  val_transforms.append(normalize)
  val_transforms = transforms.Compose(val_transforms)
  if (not args.evaluate) and (not args.attack):
    print("Training time data augmentations:")
    print(train_transforms)

  # setup dataset and dataloader
  train_dataset = MiniPlacesLoader(args.data_folder,
                  split='train', transforms=train_transforms)
  val_dataset = MiniPlacesLoader(args.data_folder,
                  split='val', transforms=val_transforms)

  train_loader = torch.utils.data.DataLoader(
    train_dataset, batch_size=args.batch_size, shuffle=True,
    num_workers=args.workers, pin_memory=True, sampler=None, drop_last=True)
  val_loader = torch.utils.data.DataLoader(
    val_dataset, batch_size=100, shuffle=False,
    num_workers=args.workers, pin_memory=True, sampler=None, drop_last=False)

  # testing only
  if (args.evaluate==args.attack) and args.evaluate:
    print("Cann't set evaluate and attack to True at the same time!")
    return

  # set up visualizer
  if args.vis:
    visualizer = default_attention(criterion)
  else:
    visualizer = None

  # evaluation
  if args.resume and args.evaluate:
    print("Testing the model ...")
    cudnn.deterministic = True
    validate(val_loader, model, -1, args, visualizer=visualizer)
    return

  # attack
  if args.resume and args.attack:
    print("Generating adversarial samples for the model ..")
    cudnn.deterministic = True
    validate(val_loader, model, -1, args,
             attacker=default_attack(criterion),
             visualizer=visualizer)
    return

  # enable cudnn benchmark
  cudnn.enabled = True
  cudnn.benchmark = True

  # warmup the training
  if (args.start_epoch == 0) and (args.warmup_epochs > 0):
    print("Warmup the training ...")
    for epoch in range(0, args.warmup_epochs):
      train(train_loader, model, criterion, optimizer, epoch, "warmup", args)

  # start the training
  print("Training the model ...")
  for epoch in range(args.start_epoch, args.epochs):
    # train for one epoch
    train(train_loader, model, criterion, optimizer, epoch, "train", args)

    # evaluate on validation set
    acc1 = validate(val_loader, model, epoch, args)

    # remember best acc@1 and save checkpoint
    is_best = acc1 > best_acc1
    best_acc1 = max(acc1, best_acc1)
    save_checkpoint({
      'epoch': epoch + 1,
      'model_arch': model_arch,
      'state_dict': model.state_dict(),
      'best_acc1': best_acc1,
      'optimizer' : optimizer.state_dict(),
    }, is_best)
Beispiel #22
0
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu

        # os.environ.setdefault("NCCL_SOCKET_IFNAME", "^lo,docker")

        print(args.dist_backend, args.dist_url, args.world_size, args.rank)
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    model = UNet(1, 1, 4, args.up_sample)

    optimizer = torch.optim.Adam(
        model.parameters(),
        args.lr)  #, momentum=args.momentum, weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=20,
                                                           verbose=True,
                                                           factor=0.5,
                                                           min_lr=1e-8)

    if args.use_horovod:
        print('use horovod')
        hvd.init()
        torch.cuda.set_device(hvd.local_rank())
        model = model.cuda(args.gpu)
        args.batch_size = int(args.batch_size / ngpus_per_node)

        compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none

        optimizer = hvd.DistributedOptimizer(
            optimizer,
            named_parameters=model.named_parameters(),
            compression=compression)
        hvd.broadcast_parameters(model.state_dict(), root_rank=0)
        hvd.broadcast_optimizer_state(optimizer, root_rank=0)

    elif args.distributed:
        if args.gpu is not None:
            print('use DDP')
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            args.batch_size = int(args.batch_size / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        print('use DP')
        model = model.cuda()
        model = torch.nn.DataParallel(model).cuda()

    criterion = dice_loss
    cudnn.benchmark = True

    mixed_precision = args.mixed_precision
    scaler = torch.cuda.amp.GradScaler(enabled=mixed_precision)

    if args.method == 'A':
        # A
        print('not preprocess')
        train_dataset = UnetFolder(
            args.train_data,
            224,
            112,
            transform=custom_transforms.Compose([
                custom_transforms.ToTensor(),
                custom_transforms.Resize(112),
                custom_transforms.RandomHorizontalFlip()
            ]))
        # train_dataset = UnetFolder('./source/p2/', 224, 112, transform=Compose([ToTensor(), Resize(112), RandomHorizontalFlip()]))
        # test_dataset = UnetFolder('./source/p7/', 224, 112, transform=Compose([ToTensor(), Resize(112)]))
    elif args.method == 'B':
        # B
        # dataset is from pt, read each time
        print('already resized and croped')
        train_dataset = TensorDataset(
            args.train_data,
            transform=custom_transforms.Compose([
                custom_transforms.ToTensor(),
                custom_transforms.Resize(112),
                custom_transforms.RandomHorizontalFlip()
            ]))
        # train_dataset = TensorDataset('./before_resized/train', transform=Compose([Resize(112), RandomHorizontalFlip()]))
        # test_dataset = TensorDataset('./before_resized/train', transform=Compose([Resize(112)]))
    elif args.method == 'C':
        # C
        # dataset is from pt, already size 112, read each time
        print('already resized and croped')
        train_dataset = TensorDataset(
            args.train_data,
            transform=custom_transforms.Compose([
                custom_transforms.ToTensor(),
                custom_transforms.RandomHorizontalFlip()
            ]))
        # train_dataset = TensorDataset('./after_resized/train', transform=custom_transforms.Compose([
        #                                                                   custom_transforms.ToTensor(),
        #                                                                   custom_transforms.RandomHorizontalFlip()]))
        # test_dataset = TensorDataset('./after_resized/train')

    if args.use_horovod:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
    elif args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=(train_sampler is None),
                              num_workers=args.n_workers,
                              pin_memory=True,
                              sampler=train_sampler)
    # test_loader = DataLoader(
    #     test_dataset, batch_size=args.batch_size, shuffle=False,
    #     num_workers=args.n_workers, pin_memory=True
    # )

    for epoch in range(args.epochs):

        batch_time = AverageMeter('Time', ':6.3f')
        data_time = AverageMeter('Data', ':6.3f')
        losses = AverageMeter('Loss', ':.4e')

        progress = ProgressMeter(len(train_loader),
                                 batch_time,
                                 data_time,
                                 losses,
                                 prefix="Epoch: [{}]".format(epoch + 1))

        model.train()

        end = time.time()
        start = time.time()
        for i, data in enumerate(train_loader):
            images, targets = data

            # measure data loading time
            data_time.update(time.time() - end)
            optimizer.zero_grad()

            if args.gpu is not None:
                images = images.cuda(args.gpu, non_blocking=True)
                targets = targets.cuda(args.gpu, non_blocking=True)
            else:
                images, targets = images.cuda(), targets.cuda()

            batch_size = images.size(0)

            with torch.cuda.amp.autocast(enabled=mixed_precision):
                output = model(images)
                loss = criterion(output, targets)

            losses.update(loss.item(), batch_size)

            scaler.scale(loss).backward()

            if args.use_horovod:
                optimizer.synchronize()
                with optimizer.skip_synchronize():
                    # optimizer.step()
                    scaler.step(optimizer)
            else:
                scaler.step(optimizer)
            scaler.update()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % 10 == 0:
                progress.display(i)

        # # check loss and decay
        if scheduler:
            scheduler.step(losses.avg)
Beispiel #23
0
    def get_img_size(self):
        img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[0]))

        return list(img.shape[:2])


if __name__ == '__main__':
    import custom_transforms as tr
    import torch
    from torchvision import transforms
    from matplotlib import pyplot as plt

    transforms = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.Resize(scales=[0.5, 0.8, 1]),
        tr.ToTensor()
    ])

    dataset = OnlineDataset(train=True, transform=tr.ToTensor())
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=1)

    for i, data in enumerate(dataloader):
        img = data['image']
        # print img
        print(img.shape)
        bb
        plt.figure()
        plt.imshow(
Beispiel #24
0
 def source_transform():
     source_transform = transforms.Compose([custom_transforms.ToTensor()])
     return source_transform
Beispiel #25
0
        return list(img.shape[:2])

    def get_img_filename(self, idx):
        print(self.img_list[idx].split('/')[0])


if __name__ == '__main__':
    import custom_transforms as tr
    import torch
    from torchvision import transforms
    from matplotlib import pyplot as plt

    # transforms = transforms.Compose([tr.RandomHorizontalFlip(), tr.Resize(scales=[0.5, 0.8, 1]), tr.ToTensor()])

    dataset = OfflineDataset(train=False, transform=tr.ToTensor())
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=1)

    for i, data in enumerate(dataloader):
        img = data['image']
        # print img
        print(img.shape)
        bb
        plt.figure()
        plt.imshow(
            overlay_mask(im_normalize(tens2image(data['image'])),
                         tens2image(data['gt'])))
        if i == 10:
Beispiel #26
0
 def target_transform():
     target_transform = transforms.Compose([custom_transforms.ToTensor()])
     return target_transform
Beispiel #27
0
        #     thresh = (sample['crop_gt'].max() + sample['crop_gt'].min()) / 2
        #     sample['crop_gt'] = (torch.ge(sample['crop_gt'], thresh)).type(torch.FloatTensor)
        # print(sample)
        return sample

    def __len__(self):
        return len(self.images)


if __name__ == '__main__':
    import matplotlib.pyplot as plt
    import helpers as helpers
    import torch
    import custom_transforms as tr
    from torchvision import transforms
    transform = transforms.Compose([tr.ToTensor()])
    composed_transforms_tr = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-30, 30), scales=(.9, 1.1), semseg=True),
        tr.CropFromMask(crop_elems=('image', 'gt'), zero_pad=True),
        tr.FixedResize(resolutions={'crop_image': (512, 512), 'crop_gt': (512, 512)}),
        # tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'),
        # tr.ToImage(norm_elem='extreme_points'),
        tr.SelectRange(elem = 'crop_image', _min = 20, _max = 250),
        tr.Normalize(elems = ['crop_image']),
        # tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
        tr.AddConfidenceMap(elem = 'crop_image', hm_type = 'l1l2', tau = 7),
        tr.ToTensor()])

    dataset = ChaosSegmentation(split=['val'], transform=composed_transforms_tr)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1)
Beispiel #28
0
    parser.add_argument("--frn",
                        action='store_true',
                        help="Use Filter Response Normalization and TLU")

    args = parser.parse_args()

    running_losses = RunningLossesContainer()
    global_step = 0
    print(time.ctime())

    with torch.cuda.device(args.gpu_device):
        transform = transforms.Compose([
            #custom_transforms.Resize(320, 180),
            custom_transforms.Resize(640, 360),
            custom_transforms.RandomHorizontalFlip(),
            custom_transforms.ToTensor()
        ])
        monkaa = MonkaaDataset(os.path.join(args.data_dir, "monkaa"),
                               transform)
        flyingthings3d = FlyingThings3DDataset(
            os.path.join(args.data_dir, "flyingthings3d"), transform)
        dataset = monkaa + flyingthings3d
        batch_size = 1
        traindata = torch.utils.data.DataLoader(dataset,
                                                batch_size=batch_size,
                                                shuffle=True,
                                                num_workers=3,
                                                pin_memory=True,
                                                drop_last=True)
        if pretrain:
            state_dict_path = "model_min_20000.pth"
        gt_1 = np.array(label_1, dtype=np.float32)
        gt_1 = gt_1 / np.max([gt_1.max(), 1e-8])
        gt_t = np.array(label_t, dtype=np.float32)
        gt_t = gt_t / np.max([gt_t.max(), 1e-8])
        # name = self.img_list[idx].split('/')[-1].split('.')[0]
        return img_1, img_t, gt_1, gt_t

if __name__ == '__main__':
    import custom_transforms as tr
    import torch
    from torchvision import transforms
    from matplotlib import pyplot as plt
    from models import net
    from torch import nn

    transforms = transforms.Compose([tr.RandomHorizontalFlip(), tr.ToTensor()])

    dataset = DAVIS_OVER_FIT_TEST1(db_root_dir='../../../DAVIS-2016',
                        train=True, transform=transforms)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=1)

    for i, data in enumerate(dataloader):
        plt.figure()
        # b = tens2image(data['image_1'])
        # plt.imshow(b)
        # print(data['image_1'][:, :, 3:15, 3:15])
        x = data['image_1']
        xt = data['image_t']
        lab = data['gt']
        # x[:, 0, :, :] = x[:, 0, :, :]*lab
        # x[:, 1, :, :] = x[:, 1, :, :]*lab
Beispiel #30
0
 def transform_data(self, sample):
     composed_transforms = transforms.Compose([tr.ToTensor()])
     return composed_transforms(sample)