Exemplo n.º 1
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    else:
        raise NotImplementedError
Exemplo n.º 2
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
        test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'ycb':
        train_set = SegDataset('/home/huipengly/data/Pictures/YCB_Video_Dataset/data',
                             '/home/huipengly/data/Pictures/YCB_Video_Dataset/image_sets/train.txt', True)
        val_set = SegDataset('/home/huipengly/data/Pictures/YCB_Video_Dataset/data',
                                  '/home/huipengly/data/Pictures/YCB_Video_Dataset/image_sets/val.txt', False)
        train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = DataLoader(val_set, batch_size=1, shuffle=True, **kwargs)

        test_loader = None
        return train_loader, val_loader, test_loader, 22

    else:
        raise NotImplementedError
Exemplo n.º 3
0
 def get_dataloader(self):
     if self.args.dataset == 'pascal':
         assert self.args.datamode == 'val'
         dataset = pascal.VOCSegmentation(self.args, split='val')
         self.n_classes = dataset.NUM_CLASSES
         self.name_classes = dataset.class_names
         self.dataloader = DataLoader(dataset,
                                      batch_size=self.args.test_batch_size,
                                      shuffle=False)
     elif self.args.dataset == 'cityscapes':
         dataset = cityscapes.CityscapesSegmentation(
             self.args, split=self.args.datamode)
         self.n_classes = dataset.NUM_CLASSES
         self.name_classes = dataset.class_names
         self.dataloader = DataLoader(dataset,
                                      batch_size=self.args.test_batch_size,
                                      shuffle=False)
     elif self.args.dataset == 'coco':
         dataset = coco.COCOSegmentation(self.args,
                                         split=self.args.datamode)
         self.n_classes = dataset.NUM_CLASSES
         self.name_classes = dataset.class_names
         self.dataloader = DataLoader(dataset,
                                      batch_size=self.args.test_batch_size,
                                      shuffle=False)
     else:
         raise NotImplementedError('不适用于其他数据集')
Exemplo n.º 4
0
def make_data_loader(args, **kwargs):
    if args.dataset == "pascal":
        train_set = pascal.VOCSegmentation(args, split="train")
        val_set = pascal.VOCSegmentation(args, split="val")

        num_class = train_set.NUM_CLASS
        train_loader = data.DataLoader(train_set,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       **kwargs)
        val_loader = data.DataLoader(val_set,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     **kwargs)
        test_loader = None
    else:
        return NotImplementedError

    return train_loader, val_loader, test_loader, num_class
Exemplo n.º 5
0
def make_data_loader(basedir, args, **kwargs):
    if args.dataset == 'pascal':
        if args.pattern == 'train':
            train_set = pascal.VOCSegmentation(args, base_dir=basedir, split='train')
            val_set = pascal.VOCSegmentation(args, base_dir=basedir, split='val')

            num_class = train_set.NUM_CLASSES

            train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
            val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
            return train_loader, val_loader, num_class
        if args.pattern == 'test':
            test_set = pascal.VOCSegmentation(args, base_dir=basedir, split='test')

            num_class = test_set.NUM_CLASSES

            test_set_loader = DataLoader(test_set, batch_size=args.test_batch_size, shuffle=True, **kwargs)
            return test_set_loader, num_class
    else:
        raise NotImplementedError
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    else:
        raise NotImplementedError
Exemplo n.º 7
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        class_names = train_set.class_names
        train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = DataLoader(val_set, batch_size=args.test_batch_size, shuffle=False, **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class, class_names

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        class_names = train_set.class_names
        train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = DataLoader(val_set, batch_size=args.test_batch_size, shuffle=False, **kwargs)
        test_loader = DataLoader(test_set, batch_size=args.test_batch_size, shuffle=False, **kwargs)

        return train_loader, val_loader, test_loader, num_class, class_names

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        class_names = train_set.class_names
        train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
        val_loader = DataLoader(val_set, batch_size=args.test_batch_size, shuffle=False, **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class, class_names

    else:
        raise NotImplementedError
Exemplo n.º 8
0
    def __init__(self, args):
        self.args = args

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        val_set = pascal.VOCSegmentation(args, split='val')
        self.nclass = val_set.NUM_CLASSES
        self.val_loader = DataLoader(val_set,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     **kwargs)

        # Define network
        self.model = DeepLab(num_classes=self.nclass,
                             backbone=args.backbone,
                             output_stride=args.out_stride,
                             sync_bn=args.sync_bn,
                             freeze_bn=args.freeze_bn)
        self.criterion = SegmentationLosses(
            weight=None, cuda=args.cuda).build_loss(mode=args.loss_type)

        # Define Evaluator
        self.evaluator = Evaluator(self.nclass)

        # Using cuda
        if args.cuda:
            print('device_ids', self.args.gpu_ids)
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
Exemplo n.º 9
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation('../../../Pascal/VOCdevkit',
                                           train=True)
        val_set = pascal.VOCSegmentation('../../../Pascal/VOCdevkit',
                                         train=False)

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.test_batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif 'cityscapes' in args.dataset:
        if args.dataset == 'cityscapes_edm':
            train_set = cityscapes.CityscapesSegmentation(args,
                                                          split='train',
                                                          full=True)
            num_class = train_set.NUM_CLASSES
            train_loader = DataLoader(train_set,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      **kwargs)
        else:
            if 'supernet' in args.network:
                train_set1, train_set2 = cityscapes.twoTrainSeg(args)
                num_class = train_set1.NUM_CLASSES
                train_loader1 = DataLoader(train_set1,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           **kwargs)
                train_loader2 = DataLoader(train_set2,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           **kwargs)
            else:
                train_set = cityscapes.CityscapesSegmentation(args,
                                                              split='train')
                num_class = train_set.NUM_CLASSES
                if args.dist:
                    train_loader = DataLoader(
                        train_set,
                        batch_size=args.batch_size,
                        shuffle=False,
                        sampler=DistributedSampler(train_set),
                        **kwargs)
                else:
                    train_loader = DataLoader(train_set,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              **kwargs)

        if args.network != None:
            val_set = cityscapes.CityscapesSegmentation(args, split='val')
            test_set = cityscapes.CityscapesSegmentation(args, split='test')
        else:
            raise Exception('autodeeplab param not set properly')

        val_loader = DataLoader(val_set,
                                batch_size=args.test_batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.test_batch_size,
                                 shuffle=False,
                                 **kwargs)

        if 'supernet' in args.network:
            return train_loader1, train_loader2, val_loader, test_loader, num_class
        else:
            return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, train_loader, val_loader, test_loader, num_class

    else:
        raise NotImplementedError
Exemplo n.º 10
0
def make_data_loader(args, **kwargs):
    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class
    elif args.dataset == 'flood':
        workpath = Path.db_root_dir('flood')
        train_data = flood.load_flood_train_data(workpath)
        train_dataset = flood.InMemoryDataset(train_data,
                                              flood.processAndAugment)
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=16,
                                                   shuffle=True,
                                                   sampler=None,
                                                   batch_sampler=None,
                                                   num_workers=0,
                                                   collate_fn=None,
                                                   pin_memory=True,
                                                   drop_last=False,
                                                   timeout=0,
                                                   worker_init_fn=None)
        valid_data = flood.load_flood_valid_data(workpath)
        valid_dataset = flood.InMemoryDataset(valid_data, flood.processTestIm)
        valid_loader = torch.utils.data.DataLoader(
            valid_dataset,
            batch_size=4,
            shuffle=True,
            sampler=None,
            batch_sampler=None,
            num_workers=0,
            collate_fn=lambda x:
            (torch.cat([a[0] for a in x], 0), torch.cat([a[1] for a in x], 0)),
            pin_memory=True,
            drop_last=False,
            timeout=0,
            worker_init_fn=None)
        test_data = flood.load_flood_valid_data(workpath)
        test_dataset = flood.InMemoryDataset(test_data, flood.processTestIm)
        test_loader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=4,
            shuffle=True,
            sampler=None,
            batch_sampler=None,
            num_workers=0,
            collate_fn=lambda x:
            (torch.cat([a[0] for a in x], 0), torch.cat([a[1] for a in x], 0)),
            pin_memory=True,
            drop_last=False,
            timeout=0,
            worker_init_fn=None)
        num_class = 2
        return train_loader, valid_loader, test_loader, num_class
    else:
        raise NotImplementedError
Exemplo n.º 11
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        test_set = None

        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])
            make_dataset_val(train_set, val_set)

            if args.use_divide_data:
                train_id_file = 'train_id.txt'
                test_id_file = 'test_id.txt'
                if os.path.isfile(train_id_file) & os.path.isfile(
                        test_id_file):
                    print(
                        'Find the exsiting divided file, Loading.... - . -\n')
                    train_set, test_set = load_combined_dataset(train_set)
                else:
                    train_set, test_set = divide_combineddataset(train_set)
                # make_dataset(train_set,test_set)

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    else:
        raise NotImplementedError
Exemplo n.º 12
0
def make_data_loader(args, **kwargs):

    if args.dataset == "pascal":
        if args.task == "segmentation":
            train_set = pascal.VOCSegmentation(args, split="train")
            val_set = pascal.VOCSegmentation(args, split="val")
            if args.use_sbd:
                sbd_train = sbd.SBDSegmentation(args, split=["train", "val"])
                train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                                   excluded=[val_set])
        elif args.task == "panoptic":
            train_set = pascal.VOCPanoptic(args, split="train")
            val_set = pascal.VOCPanoptic(args, split="val")

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)

        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == "cityscapes":
        if args.task == "segmentation":
            train_set = cityscapes.CityscapesSegmentation(args, split="train")
            val_set = cityscapes.CityscapesSegmentation(args, split="val")
            test_set = cityscapes.CityscapesSegmentation(args, split="test")
        elif args.task == "panoptic":
            train_set = cityscapes.CityscapesPanoptic(args, split="train")
            val_set = cityscapes.CityscapesPanoptic(args, split="val")
            test_set = cityscapes.CityscapesPanoptic(args, split="test")
        else:
            print("UNKNOWN TASK!")
            raise
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == "coco":
        train_set = coco.COCOSegmentation(args, split="train")
        val_set = coco.COCOSegmentation(args, split="val")
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    else:
        raise NotImplementedError
Exemplo n.º 13
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'drive':
        num_class = 2
        train_path = '../data/DRIVE/training/images'
        train_mask = '../data/DRIVE/training/1st_manual'
        test_path = '../data/DRIVE/test/'  #images and 1st_manual
        simple_transform = tfs.Compose([tfs.ToTensor()])
        pw = args.pw
        ph = args.ph
        npatches = args.npatches
        train_ipatches, train_lpatches, valid_ipatches, valid_lpatches = retina.getdata(
            train_path, train_mask, ph, pw, npatches)
        #test_patches,test_mask_patches = retina.getdata(test_path+'images' ,test_path+'1st_manual',False,0,0,0)
        train_set = retina.Retinal(train_ipatches,
                                   train_lpatches,
                                   transform=simple_transform)
        valid_set = retina.Retinal(valid_ipatches,
                                   valid_lpatches,
                                   transform=simple_transform)
        #test_set = retina.Retinal(test_patches,test_mask_patches,simple_transform)
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True)
        valid_loader = DataLoader(valid_set,
                                  batch_size=args.batch_size,
                                  shuffle=False)
        #test_loader = DataLoader(test_set,batch_size=args.batch_size,shuffle=False)
        test_loader = None
        return train_loader, valid_loader, test_loader, num_class

    elif args.dataset == 'brain':
        num_class = 4
        train_set, val_set = brats.get_Brain_data()
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True)
        valid_loader = DataLoader(val_set,
                                  batch_size=args.batch_size,
                                  shuffle=False)
        test_loader = None
        return train_loader, valid_loader, test_loader, num_class

    else:
        raise NotImplementedError
Exemplo n.º 14
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'whales':
        train_path = "../../data/HumpbackWhales/segmentacion/train/"
        val_path = "../../data/HumpbackWhales/segmentacion/val/"
        test_path = "../../data/HumpbackWhales/segmentacion/test/"

        train_set = whales.WhalesSegmentation(
            args,
            images_path=train_path + 'mask_images_train/',
            label_path=train_path + 'mask_train/',
            image=os.listdir(train_path + 'mask_images_train/'),
            split='train',
            drop_last=True)

        val_set = whales.WhalesSegmentation(
            args,
            images_path=val_path + 'mask_images_val/',
            label_path=val_path + 'mask_val/',
            image=os.listdir(val_path + 'mask_images_val/'),
            split='val',
            drop_last=True)
        test_set = whales.WhalesSegmentation(
            args,
            images_path=test_path,
            label_path=0,
            image=os.listdir(test_path),
            split='test',
            drop_last=True)  #,label=os.listdir(test_path)

        num_class = train_set.NUM_CLASSES

        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    else:
        raise NotImplementedError
Exemplo n.º 15
0
def make_data_loader(args, **kwargs):
    root = args.data_path
    if args.dist:
        print("=> Using Distribued Sampler")
        if args.dataset == 'cityscapes':
            if args.autodeeplab == 'train':
                train_set = cityscapes.CityscapesSegmentation(args,
                                                              root,
                                                              split='retrain')
                num_class = train_set.NUM_CLASSES
                train_sampler = torch.utils.data.distributed.DistributedSampler(
                    train_set)
                train_loader = DataLoader(train_set,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          sampler=train_sampler,
                                          **kwargs)

                val_set = cityscapes.CityscapesSegmentation(args,
                                                            root,
                                                            split='val')
                test_set = cityscapes.CityscapesSegmentation(args,
                                                             root,
                                                             split='test')
                val_sampler = torch.utils.data.distributed.DistributedSampler(
                    val_set)
                test_sampler = torch.utils.data.distributed.DistributedSampler(
                    test_set)
                val_loader = DataLoader(val_set,
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        sampler=val_sampler,
                                        **kwargs)
                test_loader = DataLoader(test_set,
                                         batch_size=args.batch_size,
                                         shuffle=False,
                                         sampler=test_sampler,
                                         **kwargs)

            elif args.autodeeplab == 'train_seg':
                dataset_cfg = {
                    'cityscapes':
                    dict(root=args.data_path,
                         split='train',
                         is_train=True,
                         crop_size=(args.image_height, args.image_width),
                         mirror=True,
                         min_scale=0.5,
                         max_scale=2.0,
                         scale_step_size=0.1,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225))
                }
                train_set = Cityscapes(**dataset_cfg['cityscapes'])
                num_class = train_set.num_classes
                train_sampler = torch.utils.data.distributed.DistributedSampler(
                    train_set)
                train_loader = DataLoader(train_set,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          sampler=train_sampler,
                                          **kwargs)

                dataset_val_cfg = {
                    'cityscapes':
                    dict(root=args.data_path,
                         split='val',
                         is_train=False,
                         crop_size=(args.eval_height, args.eval_width),
                         mirror=True,
                         min_scale=0.5,
                         max_scale=2.0,
                         scale_step_size=0.1,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225))
                }
                val_set = Cityscapes(**dataset_val_cfg['cityscapes'])
                val_sampler = torch.utils.data.distributed.DistributedSampler(
                    val_set)
                val_loader = DataLoader(val_set,
                                        batch_size=max(1,
                                                       args.batch_size // 4),
                                        shuffle=False,
                                        sampler=val_sampler,
                                        num_workers=args.workers,
                                        pin_memory=True,
                                        drop_last=False)

            elif args.autodeeplab == 'train_seg_panoptic':
                dataset_cfg = {
                    'cityscapes_panoptic':
                    dict(root=args.data_path,
                         split='train',
                         is_train=True,
                         crop_size=(args.image_height, args.image_width),
                         mirror=True,
                         min_scale=0.5,
                         max_scale=2.0,
                         scale_step_size=0.1,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225),
                         semantic_only=False,
                         ignore_stuff_in_offset=True,
                         small_instance_area=4096,
                         small_instance_weight=3)
                }
                train_set = CityscapesPanoptic(
                    **dataset_cfg['cityscapes_panoptic'])
                num_class = train_set.num_classes
                train_sampler = torch.utils.data.distributed.DistributedSampler(
                    train_set)
                train_loader = DataLoader(train_set,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          sampler=train_sampler,
                                          **kwargs)

                dataset_val_cfg = {
                    'cityscapes_panoptic':
                    dict(root=args.data_path,
                         split='val',
                         is_train=False,
                         crop_size=(args.eval_height, args.eval_width),
                         mirror=True,
                         min_scale=0.5,
                         max_scale=2.0,
                         scale_step_size=0.1,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225),
                         semantic_only=False,
                         ignore_stuff_in_offset=True,
                         small_instance_area=4096,
                         small_instance_weight=3)
                }
                val_set = Cityscapes(**dataset_val_cfg['cityscapes_panoptic'])
                val_sampler = torch.utils.data.distributed.DistributedSampler(
                    val_set)
                val_loader = DataLoader(val_set,
                                        batch_size=max(1,
                                                       args.batch_size // 4),
                                        shuffle=False,
                                        sampler=val_sampler,
                                        num_workers=args.workers,
                                        pin_memory=True,
                                        drop_last=False)
            else:
                raise Exception('autodeeplab param not set properly')

            return train_loader, train_sampler, val_loader, val_sampler, num_class

        elif args.dataset == 'coco':
            if args.autodeeplab == 'train_seg_panoptic':
                dataset_cfg = {
                    'coco_panoptic':
                    dict(root=args.data_path,
                         split='train2017',
                         is_train=True,
                         min_resize_value=args.image_height,
                         max_resize_value=args.image_height,
                         resize_factor=32,
                         crop_size=(args.image_height, args.image_width),
                         mirror=True,
                         min_scale=0.5,
                         max_scale=1.5,
                         scale_step_size=0.1,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225),
                         semantic_only=False,
                         ignore_stuff_in_offset=True,
                         small_instance_area=4096,
                         small_instance_weight=3)
                }
                train_set = COCOPanoptic(**dataset_cfg['coco_panoptic'])
                num_class = train_set.num_classes
                train_sampler = torch.utils.data.distributed.DistributedSampler(
                    train_set)
                train_loader = DataLoader(train_set,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          sampler=train_sampler,
                                          **kwargs)

                # train_set = coco.COCOSegmentation(args, root, split='train')
                # root=args.data_path
                # val_set = coco.COCOSegmentation(args, root, split='val')
                dataset_val_cfg = {
                    'coco_panoptic':
                    dict(root=args.data_path,
                         split='val2017',
                         is_train=True,
                         min_resize_value=args.image_height,
                         max_resize_value=args.image_height,
                         resize_factor=32,
                         crop_size=(args.eval_height, args.eval_width),
                         mirror=False,
                         min_scale=1,
                         max_scale=1,
                         scale_step_size=0,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225),
                         semantic_only=False,
                         ignore_stuff_in_offset=True,
                         small_instance_area=4096,
                         small_instance_weight=3)
                }
                val_set = COCOPanoptic(**dataset_val_cfg['coco_panoptic'])
                val_sampler = torch.utils.data.distributed.DistributedSampler(
                    val_set)
                val_loader = DataLoader(val_set,
                                        batch_size=args.batch_size * 4,
                                        shuffle=False,
                                        sampler=val_sampler,
                                        num_workers=args.workers,
                                        pin_memory=True,
                                        drop_last=False)

            return train_loader, train_sampler, val_loader, val_sampler, num_class
        else:
            raise NotImplementedError

    else:
        if args.dataset == 'pascal':
            train_set = pascal.VOCSegmentation(args, root, split='train')
            val_set = pascal.VOCSegmentation(args, root, split='val')
            if args.use_sbd:
                sbd_train = sbd.SBDSegmentation(args,
                                                root,
                                                split=['train', 'val'])
                train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                                   excluded=[val_set])

            num_class = train_set.NUM_CLASSES
            train_loader = DataLoader(train_set,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      **kwargs)
            val_loader = DataLoader(val_set,
                                    batch_size=args.batch_size,
                                    shuffle=False,
                                    **kwargs)
            test_loader = None

            return train_loader, train_loader, val_loader, test_loader, num_class

        elif args.dataset == 'cityscapes':
            if args.autodeeplab == 'train_seg':
                dataset_cfg = {
                    'cityscapes':
                    dict(root=args.data_path,
                         split='train',
                         is_train=True,
                         crop_size=(args.image_height, args.image_width),
                         mirror=True,
                         min_scale=0.5,
                         max_scale=2.0,
                         scale_step_size=0.1,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225))
                }
                train_set = Cityscapes(**dataset_cfg['cityscapes'])
                num_class = train_set.num_classes
                train_loader = DataLoader(train_set,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          **kwargs)

                dataset_val_cfg = {
                    'cityscapes':
                    dict(root=args.data_path,
                         split='val',
                         is_train=False,
                         crop_size=(args.eval_height, args.eval_width),
                         mirror=True,
                         min_scale=0.5,
                         max_scale=2.0,
                         scale_step_size=0.1,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225))
                }
                val_set = Cityscapes(**dataset_val_cfg['cityscapes'])
                val_loader = DataLoader(val_set,
                                        batch_size=max(1,
                                                       args.batch_size // 4),
                                        shuffle=False,
                                        num_workers=args.workers,
                                        pin_memory=True,
                                        drop_last=False)

            elif args.autodeeplab == 'train_seg_panoptic':
                dataset_cfg = {
                    'cityscapes_panoptic':
                    dict(root=args.data_path,
                         split='train',
                         is_train=True,
                         crop_size=(args.image_height, args.image_width),
                         mirror=True,
                         min_scale=0.5,
                         max_scale=2.0,
                         scale_step_size=0.1,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225),
                         semantic_only=False,
                         ignore_stuff_in_offset=True,
                         small_instance_area=4096,
                         small_instance_weight=3)
                }
                train_set = CityscapesPanoptic(
                    **dataset_cfg['cityscapes_panoptic'])
                num_class = train_set.num_classes
                train_loader = DataLoader(train_set,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          **kwargs)

                dataset_val_cfg = {
                    'cityscapes_panoptic':
                    dict(root=args.data_path,
                         split='val',
                         is_train=False,
                         crop_size=(args.eval_height, args.eval_width),
                         mirror=True,
                         min_scale=0.5,
                         max_scale=2.0,
                         scale_step_size=0.1,
                         mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225),
                         semantic_only=False,
                         ignore_stuff_in_offset=True,
                         small_instance_area=4096,
                         small_instance_weight=3)
                }
                val_set = Cityscapes(**dataset_val_cfg['cityscapes_panoptic'])
                val_loader = DataLoader(val_set,
                                        batch_size=max(1,
                                                       args.batch_size // 4),
                                        shuffle=False,
                                        num_workers=args.workers,
                                        pin_memory=True,
                                        drop_last=False)
            else:
                raise Exception('autodeeplab param not set properly')

            return train_loader, val_loader, num_class

        elif args.dataset == 'coco':
            train_set = coco.COCOSegmentation(args, root, split='train')
            val_set = coco.COCOSegmentation(args, root, split='val')
            num_class = train_set.NUM_CLASSES
            train_loader = DataLoader(train_set,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      **kwargs)
            val_loader = DataLoader(val_set,
                                    batch_size=args.batch_size,
                                    shuffle=False,
                                    **kwargs)
            test_loader = None
            return train_loader, train_loader, val_loader, test_loader, num_class

        elif args.dataset == 'kd':
            train_set = kd.CityscapesSegmentation(args, root, split='train')
            val_set = kd.CityscapesSegmentation(args, root, split='val')
            test_set = kd.CityscapesSegmentation(args, root, split='test')
            num_class = train_set.NUM_CLASSES
            train_loader1 = DataLoader(train_set,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       **kwargs)
            train_loader2 = DataLoader(train_set,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       **kwargs)
            val_loader = DataLoader(val_set,
                                    batch_size=args.batch_size,
                                    shuffle=False,
                                    **kwargs)
            test_loader = DataLoader(test_set,
                                     batch_size=args.batch_size,
                                     shuffle=False,
                                     **kwargs)

            return train_loader1, train_loader2, val_loader, test_loader, num_class
        else:
            raise NotImplementedError
Exemplo n.º 16
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'mydataset':
        train_transform = transforms.Compose([
            transforms.ColorJitter(brightness=0.5,
                                   contrast=0.25,
                                   saturation=0.25),
            transforms.ToTensor(),
            transforms.Normalize(
                [0.519401, 0.359217, 0.310136],
                [0.061113, 0.048637, 0.041166
                 ]),  #R_var is 0.061113, G_var is 0.048637, B_var is 0.041166
        ])

        valid_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(
                [0.517446, 0.360147, 0.310427],
                [0.061526, 0.049087, 0.041330
                 ])  #R_var is 0.061526, G_var is 0.049087, B_var is 0.041330
        ])

        train_set = mydataset.SegmentDataset(args,
                                             split='train',
                                             transform=train_transform)
        valid_set = mydataset.SegmentDataset(args,
                                             split='valid',
                                             transform=valid_transform)
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        valid_loader = DataLoader(valid_set,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  **kwargs)
        test_loader = None
        return train_loader, valid_loader, test_loader, num_class

    else:
        raise NotImplementedError
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'rsc':
        train_set = rsc.RSCDataset(
            r"/home/ma-user/work/RSC/data/train/images/",
            r"/home/ma-user/work/RSC/data/train/labels/")
        val_set = rsc.RSCDataset(r"/home/ma-user/work/RSC/data/val/images/",
                                 r"/home/ma-user/work/RSC/data/val/labels/")

        #train_set=rsc.RSCDataset(r"E:\huawei\data\train\images\\",r"E:\huawei\data\train\labels\\")
        #val_set=rsc.RSCDataset(r"E:\huawei\data\val\images\\",r"E:\huawei\data\val\labels\\")

        num_class = 2
        test_loader = None
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        return train_loader, val_loader, test_loader, num_class

    else:
        raise NotImplementedError
Exemplo n.º 18
0
def make_data_loader(args, **kwargs):
    normalize = transform.Normalize(mean=[0.5, 0.5, 0.5],
                                    std=[0.225, 0.225, 0.225])
    transforms = transform.Compose([
        transform.ToTensor(),
        normalize,
    ])

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'mpgw':
        train_set = SegmentDataset(cfg,
                                   cfg["train_set"],
                                   transforms=transforms)
        val_set = SegmentDataset(cfg,
                                 cfg["valid_set"],
                                 is_train=False,
                                 transforms=transforms)
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class
    else:
        raise NotImplementedError
Exemplo n.º 19
0
if __name__ == "__main__":
    import argparse

    import matplotlib.pyplot as plt
    import numpy as np
    import torch
    from dataloaders import sbd
    from dataloaders.datasets import pascal, sbd
    from dataloaders.utils import decode_segmap

    parser = argparse.ArgumentParser()
    args = parser.parse_args()
    args.base_size = 513
    args.crop_size = 513

    pascal_voc_val = pascal.VOCSegmentation(args, split="val")
    sbd = sbd.SBDSegmentation(args, split=["train", "val"])
    pascal_voc_train = pascal.VOCSegmentation(args, split="train")

    dataset = CombineDBs([pascal_voc_train, sbd], excluded=[pascal_voc_val])
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=2,
                                             shuffle=True,
                                             num_workers=0)

    for ii, sample in enumerate(dataloader):
        for jj in range(sample["image"].size()[0]):
            img = sample["image"].numpy()
            gt = sample["label"].numpy()
            tmp = np.array(gt[jj]).astype(np.uint8)
            segmap = decode_segmap(tmp, dataset="pascal")
Exemplo n.º 20
0
def make_data_loader(args, **kwargs):
    """
    if args.dist:
        print("=> Using Distribued Sampler")
        if args.dataset == 'cityscapes':
            if args.autodeeplab == 'search':
                train_set1, train_set2 = cityscapes.twoTrainSeg(args)
                num_class = train_set1.NUM_CLASSES
                sampler1 = torch.utils.data.distributed.DistributedSampler(train_set1)
                sampler2 = torch.utils.data.distributed.DistributedSampler(train_set2)
                train_loader1 = DataLoader(train_set1, batch_size=args.batch_size, shuffle=False, sampler=sampler1, **kwargs)
                train_loader2 = DataLoader(train_set2, batch_size=args.batch_size, shuffle=False, sampler=sampler2, **kwargs)

            elif args.autodeeplab == 'train':
                train_set = cityscapes.CityscapesSegmentation(args, split='retrain')
                num_class = train_set.NUM_CLASSES
                sampler1 = torch.utils.data.distributed.DistributedSampler(train_set)
                train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False, sampler=sampler1, **kwargs)

            else:
                raise Exception('autodeeplab param not set properly')

            val_set = cityscapes.CityscapesSegmentation(args, split='val')
            test_set = cityscapes.CityscapesSegmentation(args, split='test')
            sampler3 = torch.utils.data.distributed.DistributedSampler(val_set)
            sampler4 = torch.utils.data.distributed.DistributedSampler(test_set)
            val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, sampler=sampler3, **kwargs)
            test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, sampler=sampler4, **kwargs)

            if args.autodeeplab == 'search':
                return train_loader1, train_loader2, val_loader, test_loader, num_class
            elif args.autodeeplab == 'train':
                return train_loader, num_class, sampler1
        else:
            raise NotImplementedError

    
    else:
        """

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        if args.autodeeplab == 'search':
            train_set1, train_set2 = cityscapes.twoTrainSeg(args)
            num_class = train_set1.NUM_CLASSES
            train_loader1 = DataLoader(train_set1,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       **kwargs)
            train_loader2 = DataLoader(train_set2,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       **kwargs)
        elif args.autodeeplab == 'train':
            train_set = cityscapes.CityscapesSegmentation(args,
                                                          split='retrain')
            num_class = train_set.NUM_CLASSES
            train_loader = DataLoader(train_set,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      **kwargs)
        else:
            raise Exception('autodeeplab param not set properly')

        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        if args.autodeeplab == 'search':
            return train_loader1, train_loader2, val_loader, test_loader, num_class
        elif args.autodeeplab == 'train':
            return train_loader, num_class

    elif args.dataset == 'custom':
        if args.autodeeplab == 'search':
            train_set1, train_set2 = custom.twoTrainSeg(args)
            num_class = train_set1.NUM_CLASSES
            train_loader1 = DataLoader(train_set1,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       **kwargs)
            train_loader2 = DataLoader(train_set2,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       **kwargs)
        elif args.autodeeplab == 'train':
            train_set = custom.CustomSegmentation(args, split='retrain')
            num_class = train_set.NUM_CLASSES
            train_loader = DataLoader(train_set,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      **kwargs)
        else:
            raise Exception('autodeeplab param not set properly')

        val_set = custom.CustomSegmentation(args, split='val')
        test_set = custom.CustomSegmentation(args, split='test')
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        if args.autodeeplab == 'search':
            return train_loader1, train_loader2, val_loader, test_loader, num_class
        elif args.autodeeplab == 'train':
            return train_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'kd':
        train_set = kd.CityscapesSegmentation(args, split='train')
        val_set = kd.CityscapesSegmentation(args, split='val')
        test_set = kd.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader1 = DataLoader(train_set,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   **kwargs)
        train_loader2 = DataLoader(train_set,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader1, train_loader2, val_loader, test_loader, num_class
    else:
        raise NotImplementedError
Exemplo n.º 21
0
def make_data_loader(args, **kwargs):
    crop_size = args.crop_size
    gt_size = args.gt_size
    if args.dataset == 'pascal' or args.dataset == 'click':
        composed_transforms_tr = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25)),
            tr.CropFromMask(crop_elems=('image', 'gt'),
                            relax=20,
                            zero_pad=True,
                            jitters_bound=(40, 70)),
            tr.FixedResize(
                resolutions={
                    'crop_image': (crop_size, crop_size),
                    'crop_gt': (gt_size, gt_size)
                }),
            tr.Normalize(elems='crop_image'),
            tr.ToTensor()
        ])
        composed_transforms_val = transforms.Compose([
            tr.CropFromMask(crop_elems=('image', 'gt'),
                            relax=20,
                            zero_pad=True,
                            jitters_bound=(50, 51)),
            tr.FixedResize(
                resolutions={
                    'crop_image': (crop_size, crop_size),
                    'crop_gt': (gt_size, gt_size)
                }),
            tr.Normalize(elems='crop_image'),
            tr.ToTensor()
        ])
        train_set = pascal.VOCSegmentation(split='train',
                                           transform=composed_transforms_tr)
        if args.dataset == 'click':
            train_set.reset_target_list(args)
        val_set = pascal.VOCSegmentation(split='val',
                                         transform=composed_transforms_val)
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        NUM_CLASSES = 2
        return train_loader, val_loader, test_loader, NUM_CLASSES

    elif args.dataset == 'grabcut':
        composed_transforms_val = transforms.Compose([
            tr.CropFromMask(crop_elems=('image', 'gt'),
                            relax=20,
                            zero_pad=True,
                            jitters_bound=(50, 51)),
            tr.FixedResize(
                resolutions={
                    'crop_image': (crop_size, crop_size),
                    'crop_gt': (gt_size, gt_size)
                }),
            tr.Normalize(elems='crop_image'),
            tr.ToTensor()
        ])
        val_set = grab_berkeley_eval.GrabBerkely(
            which='grabcut', transform=composed_transforms_val)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        train_loader = None
        NUM_CLASSES = 2
        return train_loader, val_loader, test_loader, NUM_CLASSES

    elif args.dataset == 'bekeley':
        composed_transforms_val = transforms.Compose([
            tr.CropFromMask(crop_elems=('image', 'gt'),
                            relax=20,
                            zero_pad=True,
                            jitters_bound=(50, 51)),
            tr.FixedResize(
                resolutions={
                    'crop_image': (crop_size, crop_size),
                    'crop_gt': (gt_size, gt_size)
                }),
            tr.Normalize(elems='crop_image'),
            tr.ToTensor()
        ])
        val_set = grab_berkeley_eval.GrabBerkely(
            which='bekeley', transform=composed_transforms_val)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        train_loader = None
        NUM_CLASSES = 2
        return train_loader, val_loader, test_loader, NUM_CLASSES

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        val_set = coco_eval.COCOSegmentation(split='val', cat=args.coco_part)
        num_class = 2
        train_loader = None
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    # elif args.dataset == 'click':
    #     train_set = click_dataset.ClickDataset(split='train')
    #     val_set = click_dataset.ClickDataset(split='val')
    #     num_class = 2
    #     train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
    #     val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
    #     test_loader = None
    #     return train_loader, val_loader, test_loader, num_class

    else:
        raise NotImplementedError
Exemplo n.º 22
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'carvana':
        dataset_params = {
            'folds': args.folds,
            'fold_num': args.fold,
        }
        transform_train = make_augmentation_transform(
            'crop_fliplr_affine_color')
        transform_valid = make_augmentation_transform('crop_fliplr')
        train_set = dataset.CarvanaTrainDataset(**dataset_params,
                                                mode='train',
                                                transform=transform_train)
        val_set = dataset.CarvanaTrainDataset(**dataset_params,
                                              mode='valid',
                                              transform=transform_valid)
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set, batch_size=1, shuffle=False, **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, 2

    else:
        raise NotImplementedError
Exemplo n.º 23
0
if __name__ == "__main__":
    import matplotlib.pyplot as plt
    from dataloaders.datasets import pascal, sbd
    from dataloaders import sbd
    import torch
    import numpy as np
    from dataloaders.utils import decode_segmap
    import argparse

    parser = argparse.ArgumentParser()
    args = parser.parse_args()
    args.base_size = 513
    args.crop_size = 513

    pascal_voc_val = pascal.VOCSegmentation(args, split='val')
    sbd = sbd.SBDSegmentation(args, split=['train', 'val'])
    pascal_voc_train = pascal.VOCSegmentation(args, split='train')

    dataset = CombineDBs([pascal_voc_train, sbd], excluded=[pascal_voc_val])
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=0)

    for ii, sample in enumerate(dataloader):
        for jj in range(sample["image"].size()[0]):
            img = sample['image'].numpy()
            gt = sample['label'].numpy()
            tmp = np.array(gt[jj]).astype(np.uint8)
            segmap = decode_segmap(tmp, dataset='pascal')
            img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
            img_tmp *= (0.229, 0.224, 0.225)
            img_tmp += (0.485, 0.456, 0.406)
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'rip':
        classes = {'full': 7, 'level1': 2, 'level2': 3, 'level3': 5}
        import os
        from mypath import Path
        data_root = Path.db_root_dir(args.dataset)
        root = os.path.join(data_root, 'RipTrainingAllData')

        patches, level = args.rip_mode.split('-')
        if patches == 'patches':
            patches = 'COCOJSONPatches'
        elif patches == 'patches_v1':
            patches = 'COCOJSONPatches_v1'
        else:
            patches = 'COCOJSONs'
        # patches = 'COCOJSONPatches' if patches == 'patches' else 'COCOJSONs'
        train_ann_file = os.path.join(data_root, patches, level, 'cv_5_fold',
                                      'train_1.json')
        val_ann_file = os.path.join(data_root, patches, level, 'cv_5_fold',
                                    'val_1.json')

        train_set = rip.RIPSegmentation(args,
                                        split='train',
                                        root=root,
                                        ann_file=train_ann_file)
        val_set = rip.RIPSegmentation(args,
                                      split='val',
                                      root=root,
                                      ann_file=val_ann_file)
        num_classes = classes[level]
        # NOTE: drop_last=True here to avoid situation when batch_size=1 which causes BatchNorm2d errors
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_classes

    else:
        raise NotImplementedError
Exemplo n.º 25
0
def make_data_loader(args, **kwargs):

    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        if args.autodeeplab == 'search':
            train_set1, train_set2 = cityscapes.twoTrainSeg(args)
            num_class = train_set1.NUM_CLASSES
            train_loader1 = DataLoader(train_set1,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       **kwargs)
            train_loader2 = DataLoader(train_set2,
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       **kwargs)
        elif args.autodeeplab == 'train':
            train_set = cityscapes.CityscapesSegmentation(args, split='train')
            num_class = train_set.NUM_CLASSES
            train_loader = DataLoader(train_set,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      **kwargs)
        else:
            raise Exception('autodeeplab param not set properly')

        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        if args.autodeeplab == 'search':
            return train_loader1, train_loader2, val_loader, test_loader, num_class
        elif args.autodeeplab == 'train':
            return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'kd':
        train_set = kd.CityscapesSegmentation(args, split='train')
        val_set = kd.CityscapesSegmentation(args, split='val')
        test_set = kd.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader1 = DataLoader(train_set,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   **kwargs)
        train_loader2 = DataLoader(train_set,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader1, train_loader2, val_loader, test_loader, num_class
    else:
        raise NotImplementedError
Exemplo n.º 26
0
def make_data_loader(args, **kwargs):
    if args.dataset == 'pascal':
        train_set = pascal.VOCSegmentation(args, split='train')
        val_set = pascal.VOCSegmentation(args, split='val')
        if args.use_sbd:
            sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
            train_set = combine_dbs.CombineDBs([train_set, sbd_train],
                                               excluded=[val_set])

        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'cityscapes':
        train_set = cityscapes.CityscapesSegmentation(args, split='train')
        val_set = cityscapes.CityscapesSegmentation(args, split='val')
        test_set = cityscapes.CityscapesSegmentation(args, split='test')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'apollo' or args.dataset == 'apollo_seg':
        num_class = args.num_class
        min_depth = args.min_depth
        max_depth = args.max_depth
        train_n_val = apollo.ApolloDepthSegmentation(args,
                                                     split='train',
                                                     num_class=num_class,
                                                     min_depth=min_depth,
                                                     max_depth=max_depth)
        n_train = int(train_n_val.__len__() * 0.8)
        n_val = train_n_val.__len__() - n_train
        train_set, val_set = random_split(train_n_val, [n_train, n_val])
        # val_set = apollo.ApolloDepthSegmentation(args, split='val', num_class=num_class,
        #                                          min_depth=min_depth, max_depth=max_depth)
        test_set = apollo.ApolloDepthSegmentation(args,
                                                  split='test',
                                                  num_class=num_class,
                                                  min_depth=min_depth,
                                                  max_depth=max_depth)
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'farsight' or args.dataset == 'farsight_seg':

        num_class = args.num_class
        min_depth = args.min_depth
        max_depth = args.max_depth
        train_n_val = farsight.FarsightDepthSegmentation(args,
                                                         split='train',
                                                         num_class=num_class,
                                                         min_depth=min_depth,
                                                         max_depth=max_depth)
        n_train = int(train_n_val.__len__() * 0.8)
        n_val = train_n_val.__len__() - n_train
        train_set, val_set = random_split(train_n_val, [n_train, n_val])
        # val_set = farsight.FarsightDepthSegmentation(args, split='val', num_class=num_class,
        #                                          min_depth=min_depth, max_depth=max_depth)
        test_set = farsight.FarsightDepthSegmentation(args,
                                                      split='test',
                                                      num_class=num_class,
                                                      min_depth=min_depth,
                                                      max_depth=max_depth)
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = DataLoader(test_set,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 **kwargs)

        return train_loader, val_loader, test_loader, num_class

    elif args.dataset == 'coco':
        train_set = coco.COCOSegmentation(args, split='train')
        val_set = coco.COCOSegmentation(args, split='val')
        num_class = train_set.NUM_CLASSES
        train_loader = DataLoader(train_set,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  **kwargs)
        val_loader = DataLoader(val_set,
                                batch_size=args.batch_size,
                                shuffle=False,
                                **kwargs)
        test_loader = None
        return train_loader, val_loader, test_loader, num_class

    else:
        raise NotImplementedError