Beispiel #1
0
def make_test_augmentation_transforms(
    crop_params=None,
    flip_lr=False,
    flip_ud=False,
):
    transforms = [
        t.BGR2RGB(),
    ]

    if crop_params is not None:
        crop_size, crop_location, resize_after_crop = crop_params
        transforms.append(
            t.Crop(config.ORIGINAL_IMG_SIZE, crop_size, crop_location))
        if resize_after_crop:
            transforms.append(t.Resize(config.ORIGINAL_IMG_SIZE))

    if flip_lr:
        transforms.append(t.FlipLr())

    if flip_ud:
        transforms.append(t.FlipUd())

    transforms += [
        t.AsContiguousArray(),
        ToTensor(),
        Normalize(mean=config.IMG_MEAN, std=config.IMG_STD),
    ]

    return Compose(transforms)
Beispiel #2
0
    def get_img_size(self):
        img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[0]))

        return list(img.shape[:2])


if __name__ == '__main__':
    import custom_transforms as tr
    import torch
    from torchvision import transforms
    from matplotlib import pyplot as plt

    transforms = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.Resize(scales=[0.5, 0.8, 1]),
        tr.ToTensor()
    ])

    dataset = DAVIS2016(
        db_root_dir='/media/eec/external/Databases/Segmentation/DAVIS-2016',
        train=True,
        transform=transforms)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=1)

    for i, data in enumerate(dataloader):
        plt.figure()
        plt.imshow(
Beispiel #3
0
        # seq_name = self.video_list[idx].split('/')[2]
        return img, gt

    def get_img_size(self):
        img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[0]))

        return list(img.shape[:2])


if __name__ == '__main__':
    import custom_transforms as tr
    import torch
    from torchvision import transforms
    from matplotlib import pyplot as plt
    from dataloader.helpers import overlay_mask, im_normalize, tens2image

    transforms = transforms.Compose([tr.RandomHorizontalFlip(), tr.Resize(scales=[0.5, 0.8, 1]), tr.ToTensor()])

    dataset = DAVIS2016(db_root_dir='/home/ty/data/davis',
                        train=True, transform=None)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=5, shuffle=True, num_workers=1)

    for i, data in enumerate(dataloader):
        # plt.figure()
        # plt.imshow(overlay_mask(im_normalize(tens2image(data['image'])), tens2image(data['gt'])))
        # if i == 10:
        #     break
        print(data['img'].size())
        print(data['img_gt'].size())

    # plt.show(block=True)
Beispiel #4
0
                        default="./model_min_udnie.pth",
                        help="Output model file path")
    parser.add_argument("--frn",
                        action='store_true',
                        help="Use Filter Response Normalization and TLU")

    args = parser.parse_args()

    running_losses = RunningLossesContainer()
    global_step = 0
    print(time.ctime())

    with torch.cuda.device(args.gpu_device):
        transform = transforms.Compose([
            #custom_transforms.Resize(320, 180),
            custom_transforms.Resize(640, 360),
            custom_transforms.RandomHorizontalFlip(),
            custom_transforms.ToTensor()
        ])
        monkaa = MonkaaDataset(os.path.join(args.data_dir, "monkaa"),
                               transform)
        flyingthings3d = FlyingThings3DDataset(
            os.path.join(args.data_dir, "flyingthings3d"), transform)
        dataset = monkaa + flyingthings3d
        batch_size = 1
        traindata = torch.utils.data.DataLoader(dataset,
                                                batch_size=batch_size,
                                                shuffle=True,
                                                num_workers=3,
                                                pin_memory=True,
                                                drop_last=True)
Beispiel #5
0
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu

        # os.environ.setdefault("NCCL_SOCKET_IFNAME", "^lo,docker")

        print(args.dist_backend, args.dist_url, args.world_size, args.rank)
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    model = UNet(1, 1, 4, args.up_sample)

    optimizer = torch.optim.Adam(
        model.parameters(),
        args.lr)  #, momentum=args.momentum, weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=20,
                                                           verbose=True,
                                                           factor=0.5,
                                                           min_lr=1e-8)

    if args.use_horovod:
        print('use horovod')
        hvd.init()
        torch.cuda.set_device(hvd.local_rank())
        model = model.cuda(args.gpu)
        args.batch_size = int(args.batch_size / ngpus_per_node)

        compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none

        optimizer = hvd.DistributedOptimizer(
            optimizer,
            named_parameters=model.named_parameters(),
            compression=compression)
        hvd.broadcast_parameters(model.state_dict(), root_rank=0)
        hvd.broadcast_optimizer_state(optimizer, root_rank=0)

    elif args.distributed:
        if args.gpu is not None:
            print('use DDP')
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            args.batch_size = int(args.batch_size / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        print('use DP')
        model = model.cuda()
        model = torch.nn.DataParallel(model).cuda()

    criterion = dice_loss
    cudnn.benchmark = True

    mixed_precision = args.mixed_precision
    scaler = torch.cuda.amp.GradScaler(enabled=mixed_precision)

    if args.method == 'A':
        # A
        print('not preprocess')
        train_dataset = UnetFolder(
            args.train_data,
            224,
            112,
            transform=custom_transforms.Compose([
                custom_transforms.ToTensor(),
                custom_transforms.Resize(112),
                custom_transforms.RandomHorizontalFlip()
            ]))
        # train_dataset = UnetFolder('./source/p2/', 224, 112, transform=Compose([ToTensor(), Resize(112), RandomHorizontalFlip()]))
        # test_dataset = UnetFolder('./source/p7/', 224, 112, transform=Compose([ToTensor(), Resize(112)]))
    elif args.method == 'B':
        # B
        # dataset is from pt, read each time
        print('already resized and croped')
        train_dataset = TensorDataset(
            args.train_data,
            transform=custom_transforms.Compose([
                custom_transforms.ToTensor(),
                custom_transforms.Resize(112),
                custom_transforms.RandomHorizontalFlip()
            ]))
        # train_dataset = TensorDataset('./before_resized/train', transform=Compose([Resize(112), RandomHorizontalFlip()]))
        # test_dataset = TensorDataset('./before_resized/train', transform=Compose([Resize(112)]))
    elif args.method == 'C':
        # C
        # dataset is from pt, already size 112, read each time
        print('already resized and croped')
        train_dataset = TensorDataset(
            args.train_data,
            transform=custom_transforms.Compose([
                custom_transforms.ToTensor(),
                custom_transforms.RandomHorizontalFlip()
            ]))
        # train_dataset = TensorDataset('./after_resized/train', transform=custom_transforms.Compose([
        #                                                                   custom_transforms.ToTensor(),
        #                                                                   custom_transforms.RandomHorizontalFlip()]))
        # test_dataset = TensorDataset('./after_resized/train')

    if args.use_horovod:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
    elif args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=(train_sampler is None),
                              num_workers=args.n_workers,
                              pin_memory=True,
                              sampler=train_sampler)
    # test_loader = DataLoader(
    #     test_dataset, batch_size=args.batch_size, shuffle=False,
    #     num_workers=args.n_workers, pin_memory=True
    # )

    for epoch in range(args.epochs):

        batch_time = AverageMeter('Time', ':6.3f')
        data_time = AverageMeter('Data', ':6.3f')
        losses = AverageMeter('Loss', ':.4e')

        progress = ProgressMeter(len(train_loader),
                                 batch_time,
                                 data_time,
                                 losses,
                                 prefix="Epoch: [{}]".format(epoch + 1))

        model.train()

        end = time.time()
        start = time.time()
        for i, data in enumerate(train_loader):
            images, targets = data

            # measure data loading time
            data_time.update(time.time() - end)
            optimizer.zero_grad()

            if args.gpu is not None:
                images = images.cuda(args.gpu, non_blocking=True)
                targets = targets.cuda(args.gpu, non_blocking=True)
            else:
                images, targets = images.cuda(), targets.cuda()

            batch_size = images.size(0)

            with torch.cuda.amp.autocast(enabled=mixed_precision):
                output = model(images)
                loss = criterion(output, targets)

            losses.update(loss.item(), batch_size)

            scaler.scale(loss).backward()

            if args.use_horovod:
                optimizer.synchronize()
                with optimizer.skip_synchronize():
                    # optimizer.step()
                    scaler.step(optimizer)
            else:
                scaler.step(optimizer)
            scaler.update()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % 10 == 0:
                progress.display(i)

        # # check loss and decay
        if scheduler:
            scheduler.step(losses.avg)