Exemplo n.º 1
0
def gen_train_val_loader(labeled_scene_index, **kwargs):
    labeled_scene_index_shuf = labeled_scene_index
    random.shuffle(labeled_scene_index_shuf)
    train_labeled_scene_index = labeled_scene_index_shuf[:
                                                         -10]  #hard code we know there are only 28 scenes that are labeled
    val_labeled_scene_index = labeled_scene_index_shuf[-10:]

    print(len(train_labeled_scene_index), len(val_labeled_scene_index))
    print(train_labeled_scene_index[0], val_labeled_scene_index[0])

    loadkwargs = {
        'batch_size': 2,
        'shuffle': True,
        'collate_fn': collate_fn,
        'num_workers': 2,
    }

    labeled_trainset = LabeledDataset(scene_index=train_labeled_scene_index,
                                      **kwargs)
    print(len(labeled_trainset))
    trainloader = torch.utils.data.DataLoader(labeled_trainset, **loadkwargs)

    labeled_valset = LabeledDataset(scene_index=val_labeled_scene_index,
                                    **kwargs)
    print(len(labeled_valset))
    valloader = torch.utils.data.DataLoader(labeled_valset, **loadkwargs)

    result = {"train": trainloader, "val": valloader}
    return result
Exemplo n.º 2
0
def LoadData(depth_folder, image_folder, annotation_csv, args):
    train_labeled_scene_index = np.arange(106, 131)  #128
    val_labeled_scene_index = np.arange(131, 134)  #134
    labeled_trainset = LabeledDataset(depth_folder=depth_folder,
                                      image_folder=image_folder,
                                      annotation_file=annotation_csv,
                                      scene_index=train_labeled_scene_index,
                                      transform=(transform, transform_depth),
                                      extra_info=True)

    labeled_valset = LabeledDataset(depth_folder=depth_folder,
                                    image_folder=image_folder,
                                    annotation_file=annotation_csv,
                                    scene_index=val_labeled_scene_index,
                                    transform=(transform, transform_depth),
                                    extra_info=True)

    trainloader = torch.utils.data.DataLoader(
        labeled_trainset,
        batch_size=args.per_gpu_batch_size,
        shuffle=True,
        num_workers=4,
        collate_fn=collate_fn,
        pin_memory=True)
    valloader = torch.utils.data.DataLoader(labeled_valset,
                                            batch_size=args.per_gpu_batch_size,
                                            shuffle=True,
                                            num_workers=4,
                                            collate_fn=collate_fn,
                                            pin_memory=True)

    return trainloader, valloader
Exemplo n.º 3
0
def LoadData(image_folder, annotation_csv):
	train_labeled_scene_index = np.arange(106, 128)
	val_labeled_scene_index = np.arange(128, 134)
	labeled_trainset = LabeledDataset(image_folder=image_folder, annotation_file=annotation_csv, 
		scene_index=train_labeled_scene_index, transform=transform, extra_info=True)

	labeled_valset = LabeledDataset(image_folder=image_folder, annotation_file=annotation_csv,
		scene_index=val_labeled_scene_index,transform=transform,extra_info=True)

	trainloader = torch.utils.data.DataLoader(labeled_trainset, batch_size=8, shuffle=True, num_workers=2, collate_fn=collate_fn)
	valloader = torch.utils.data.DataLoader(labeled_valset, batch_size=8, shuffle=True, num_workers=2, collate_fn=collate_fn)

	return trainloader, valloader
Exemplo n.º 4
0
def get_dataloader(batch_size, indices, data_dir):
    labeled_scene_index = np.arange(106, 134)

    val_sampler = SubsetRandomSampler(indices)

    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.5563, 0.6024, 0.6325),
                                         (0.3195, 0.3271, 0.3282))
    ])

    image_folder = data_dir
    annotation_csv = f'{data_dir}/annotation.csv'
    labeled_trainset = LabeledDataset(image_folder=image_folder,
                                      annotation_file=annotation_csv,
                                      scene_index=labeled_scene_index,
                                      transform=transform,
                                      extra_info=True)

    val_loader = torch.utils.data.DataLoader(labeled_trainset,
                                             batch_size=batch_size,
                                             num_workers=8,
                                             pin_memory=True,
                                             collate_fn=collate_fn,
                                             sampler=val_sampler)
    return val_loader
Exemplo n.º 5
0
def LoadData(image_folder, annotation_csv, args):
        train_labeled_scene_index = np.arange(106, 131)
        val_labeled_scene_index = np.arange(131, 134)

        extra_transforms = [data_transforms, data_jitter_brightness, data_jitter_hue, data_jitter_contrast, data_jitter_saturation]

        extra_datasets = []
        for t in extra_transforms:
                extra_datasets.append(LabeledDataset(image_folder=image_folder, annotation_file=annotation_csv,scene_index=train_labeled_scene_index, transform=t, extra_info=True))
        trainloader = torch.utils.data.DataLoader(torch.utils.data.ConcatDataset(extra_datasets), batch_size=args.per_gpu_batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn, pin_memory = True)

        labeled_valset = LabeledDataset(image_folder=image_folder, annotation_file=annotation_csv,
                scene_index=val_labeled_scene_index,transform=transform,extra_info=True)

        valloader = torch.utils.data.DataLoader(labeled_valset, batch_size=args.per_gpu_batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn, pin_memory = True)

        return trainloader, valloader
Exemplo n.º 6
0
    def prepare_data(self):
        # The scenes from 106 - 133 are labeled
        labeled_scene_index = np.arange(106, 134)

        # Actually used during training and validation
        # Keeping aside last 8 scenes for validation
        # self._train_labeled_scene_index = labeled_scene_index[:-8]
        # self._valid_labeled_scene_index = labeled_scene_index[-8:]

        # Modification for submission (training on entire data)
        # Not removing the validation scenes to avoid changing other parts of
        # the code. Though validation here does not make sense.
        self._train_labeled_scene_index = labeled_scene_index[:]
        self._valid_labeled_scene_index = labeled_scene_index[-14:]

        self._static_transform = torchvision.transforms.Compose([
            torchvision.transforms.Resize((224, 224)),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize((0.54, 0.60, 0.63),
                                             (0.34, 0.34, 0.34)),
        ])

        self.labeled_trainset = LabeledDataset(
            image_folder=IMAGE_FOLDER,
            annotation_file=ANNOTATION_CSV,
            scene_index=self._train_labeled_scene_index,
            transform=self._static_transform,
            extra_info=False,
        )

        self.labeled_validset = LabeledDataset(
            image_folder=IMAGE_FOLDER,
            annotation_file=ANNOTATION_CSV,
            scene_index=self._valid_labeled_scene_index,
            transform=self._static_transform,
            extra_info=False,
        )
Exemplo n.º 7
0
    def read_data(self, 
                  labeled = True   # whether to read labeled data
                  ):
        """
        Load data and split to train/validation
        --------------------
        Return:
        train - training dataset
        val   - validation dataset
        """

        train = None
        val = None

        if labeled:
            subset = 'labeled'
            indices = self.labeled_scene_index
        else:
            subset = 'unlabeled'
            indices = self.unlabeled_scene_index

        cache_file = os.path.join(self.cached, subset+'.pt')

        if os.path.exists(cache_file):
            log.info('Loading from cached file: {}'.format(cache_file))
            loaded = torch.load(cache_file)
            train, val = (loaded['train'],
                          loaded['val'])
        else:
            if labeled:
                dataset = LabeledDataset(image_folder = self.data_dir,
                                         annotation_file = self.annotation_csv,
                                         scene_index = indices,
                                         transform = self.transform,
                                         extra_info = False)
            else:
                dataset = UnlabeledDataset(image_folder = self.data_dir,
                                           scene_index = indices,
                                           first_dim = 'sample',
                                           transform = self.transform)

            val_size = int(len(dataset)*self.split)
            train_size = int(len(dataset)-val_size)

            return torch.utils.data.random_split(dataset,[train_size, val_size])
Exemplo n.º 8
0
    def __getitem__(self, index):
        sample, target, road_image = LabeledDataset.__getitem__(self, index)
        road_image = road_image.int()

        for i, bb in enumerate(target['bounding_box']):
            point_squence = torch.stack(
                [bb[:, 0], bb[:, 1], bb[:, 3], bb[:, 2], bb[:, 0]])
            x = (point_squence.T[0] * 10 + 400).int()
            y = (-point_squence.T[1] * 10 + 400).int()

            bottom = y.min()
            top = y.max()
            left = x.min()
            right = x.max()

            road_image[bottom:top, left:right] = target['category'][i]

        return sample, road_image.long()
Exemplo n.º 9
0
def test():
    image_folder = '../dl/data'
    annotation_csv = '../dl/data/annotation.csv'
    labeled_scene_index = np.arange(106, 128)
    labeled_scene_index_test = np.arange(130, 131)
    transform = get_transform()
    labeled_trainset = LabeledDataset(image_folder=image_folder,
                                      annotation_file=annotation_csv,
                                      scene_index=labeled_scene_index,
                                      transform=transform,
                                      extra_info=True)

    trainloader = torch.utils.data.DataLoader(labeled_trainset,
                                              batch_size=2,
                                              shuffle=True,
                                              num_workers=0,
                                              collate_fn=collate_fn)

    sample, target, road_image, extra = iter(trainloader).next()
    loader = ModelLoader()
parser.add_argument('--testset', action='store_true')
parser.add_argument('--verbose', action='store_true')
opt = parser.parse_args()

image_folder = opt.data_dir
annotation_csv = f'{opt.data_dir}/annotation.csv'

if opt.testset:
    labeled_scene_index = np.arange(134, 148)
else:
    labeled_scene_index = np.arange(120, 134)

# For bounding boxes task
labeled_trainset_task1 = LabeledDataset(image_folder=image_folder,
                                        annotation_file=annotation_csv,
                                        scene_index=labeled_scene_index,
                                        transform=get_transform_task1(),
                                        extra_info=False)
dataloader_task1 = torch.utils.data.DataLoader(labeled_trainset_task1,
                                               batch_size=1,
                                               shuffle=False,
                                               num_workers=0)
# For road map task
labeled_trainset_task2 = LabeledDataset(image_folder=image_folder,
                                        annotation_file=annotation_csv,
                                        scene_index=labeled_scene_index,
                                        transform=get_transform_task2(),
                                        extra_info=False)
dataloader_task2 = torch.utils.data.DataLoader(labeled_trainset_task2,
                                               batch_size=1,
                                               shuffle=False,
Exemplo n.º 11
0
    device = 'cuda'

    image_folder = 'data'
    annotation_csv = 'data/annotation.csv'

    unlabeled_scene_index = np.arange(106)
    labeled_scene_index = np.arange(106, 126)
    labeled_scene_index_val = np.arange(126, 134)

    epoch = 50
    batchsize = 1
    lr = 0.00001
    transform = torchvision.transforms.ToTensor()
    labeled_trainset = LabeledDataset(image_folder=image_folder,
                                      annotation_file=annotation_csv,
                                      scene_index=labeled_scene_index,
                                      transform=transform,
                                      extra_info=True
                                      )
    trainloader = torch.utils.data.DataLoader(labeled_trainset, batch_size=batchsize, shuffle=True, num_workers=1,
                                              collate_fn=collate_fn)

    labeled_valset = LabeledDataset(image_folder=image_folder,
                                    annotation_file=annotation_csv,
                                    scene_index=labeled_scene_index_val,
                                    transform=transform,
                                    extra_info=True
                                    )
    valloader = torch.utils.data.DataLoader(labeled_valset, batch_size=batchsize, shuffle=True, num_workers=2,
                                            collate_fn=collate_fn)
    model = YOLOv1_resnet().to(device)
    criterion = Loss_yolov1()
Exemplo n.º 12
0
    # The scenes from 106 - 133 are labeled
    # Divide the labeled_scene_index into two subsets (training and validation)
    labeled_scene_index_tr = np.arange(106, 129)
    labeled_scene_index_ts = np.arange(129, 134)

    def get_transform():
        return torchvision.transforms.Compose([
            torchvision.transforms.Resize((224, 224)),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465),
                                             (0.2023, 0.1994, 0.2010)),
        ])

    labeled_trainset = LabeledDataset(image_folder=image_folder,
                                      annotation_file=annotation_csv,
                                      scene_index=labeled_scene_index_tr,
                                      transform=get_transform(),
                                      extra_info=True)
    train_loader = torch.utils.data.DataLoader(labeled_trainset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=4,
                                               collate_fn=collate_fn)

    labeled_testset = LabeledDataset(image_folder=image_folder,
                                     annotation_file=annotation_csv,
                                     scene_index=labeled_scene_index_ts,
                                     transform=get_transform(),
                                     extra_info=True)
    test_loader = torch.utils.data.DataLoader(labeled_testset,
                                              batch_size=1,
Exemplo n.º 13
0
def get_loaders(
        data_type,
        image_folder='/content/drive/My Drive/DL Project/data',
        annotation_file='/content/drive/My Drive/DL Project/data/annotation.csv',
        split_folder='data_utils',
        batch_size=4,
        extra_info=False,
        visual=False):
    """
    Args:
        type (string): 'labeled' or 'unlabeled'
        image_folder (string, optional): the location of the image folders
        annotation_file (string, optional): the location of the annotations
        split_folder (string, optional): the location of the split folder
        batch_size (int, optional): how many samples to load per batch
        extra_info (Boolean, optional): whether you want the extra information
    """

    assert data_type in ['labeled', 'unlabeled'], "Set correct data_type"

    if data_type == 'labeled':
        train_labeled_scene_index = pickle.load(
            open(os.path.join(split_folder, 'labeled_scene_index_train.p'),
                 'rb'))
        val_labeled_scene_index = pickle.load(
            open(os.path.join(split_folder, 'labeled_scene_index_val.p'),
                 'rb'))
        if visual:
            transform = transforms.ToTensor()
        else:
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.698, 0.718, 0.730),
                                     (0.322, 0.313, 0.308))
            ])

        trainset = LabeledDataset(image_folder=image_folder,
                                  annotation_file=annotation_file,
                                  scene_index=train_labeled_scene_index,
                                  transform=transform,
                                  extra_info=extra_info)
        valset = LabeledDataset(image_folder=image_folder,
                                annotation_file=annotation_file,
                                scene_index=val_labeled_scene_index,
                                transform=transform,
                                extra_info=extra_info)

        trainloader = torch.utils.data.DataLoader(trainset,
                                                  batch_size=batch_size,
                                                  shuffle=True,
                                                  num_workers=2,
                                                  collate_fn=collate_fn)

        valloader = torch.utils.data.DataLoader(valset,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=2,
                                                collate_fn=collate_fn)
    else:
        train_unlabeled_scene_index = pickle.load(
            open(os.path.join(split_folder, 'unlabeled_scene_index_train.p'),
                 'rb'))
        val_unlabeled_scene_index = pickle.load(
            open(os.path.join(split_folder, 'unlabeled_scene_index_val.p'),
                 'rb'))
        if visual:
            transform = transforms.ToTensor()
        else:
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.548, 0.597, 0.630),
                                     (0.339, 0.340, 0.342))
            ])

        trainset = UnlabeledDataset(
            image_folder=image_folder,
            first_dim='sample',
            scene_index=train_unlabeled_scene_index,
            transform=transform,
        )
        valset = UnlabeledDataset(image_folder=image_folder,
                                  first_dim='sample',
                                  scene_index=val_unlabeled_scene_index,
                                  transform=transform)

        trainloader = torch.utils.data.DataLoader(trainset,
                                                  batch_size=batch_size,
                                                  shuffle=True,
                                                  num_workers=2)

        valloader = torch.utils.data.DataLoader(valset,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=2)

    return trainloader, valloader
Exemplo n.º 14
0
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
                                           normalize
                                           ])


kwargs = {
    #'first_dim': 'sample',
    'transform': transform,
    'image_folder': image_folder,
    'annotation_file': annotation_csv,
    'extra_info': True}

#dataset_train = LabeledDataset_RCNN (scene_index=train_labeled_scene_index, **kwargs)
#dataset_val = LabeledDataset_RCNN (scene_index=val_labeled_scene_index, **kwargs)

dataset_train = LabeledDataset(scene_index=train_labeled_scene_index, **kwargs)
dataset_val = LabeledDataset(scene_index=val_labeled_scene_index, **kwargs)





train_data_loader = torch.utils.data.DataLoader(
    dataset_train, batch_size=30, shuffle=False, num_workers=4,
    collate_fn=collate_fn)

val_data_loader = torch.utils.data.DataLoader(
    dataset_val, batch_size=30, shuffle=False, num_workers=4,
    collate_fn=collate_fn)

Exemplo n.º 15
0
unlabeled_scene_index = np.arange(106)
labeled_scene_index_train = np.arange(106, 130)
labeled_scene_index_test = np.arange(130, 134)
"""
Evaluate trained generator model on test data (Qualitative)
"""

labeled_testset = LabeledDataset(image_folder=data_dir,
                                 annotation_file=annotation_csv,
                                 scene_index=labeled_scene_index_test,
                                 img_transform=transforms.Compose([
                                     transforms.Resize((256, 256)),
                                     transforms.ToTensor(),
                                     transforms.Normalize(mean=(0.5, ),
                                                          std=(0.5, ))
                                 ]),
                                 map_transform=transforms.Compose([
                                     transforms.ToPILImage(),
                                     transforms.Resize(256),
                                     transforms.ToTensor()
                                 ]),
                                 extra_info=True)

testloader = torch.utils.data.DataLoader(labeled_testset,
                                         batch_size=1,
                                         shuffle=True,
                                         num_workers=2,
                                         collate_fn=collate_fn,
                                         pin_memory=True)
Exemplo n.º 16
0
    print("Train scenes: {} \nVal scenes: {}".format(len(labeled_scene_index_train), len(labeled_scene_index_val)))


    if opt.depth && !opt.precomputed:
        # transform = MonoDepthEstimator("/scratch/dy1078/monodepth2/models/mono_model/models/weights_13/")
        transform = MonoDepthEstimator("./models/weights_13/")
    else:
        transform = torchvision.transforms.ToTensor()


    # The labeled dataset can only be retrieved by sample.
    # And all the returned data are tuple of tensors, since bounding boxes may have different size
    labeled_trainset = LabeledDataset(image_folder=image_folder,
                                      annotation_file=annotation_csv,
                                      scene_index=labeled_scene_index_train,
                                      transform=transform,
                                      extra_info=False,
                                      precomputed=opt.precomputed
                                     )
    trainloader = torch.utils.data.DataLoader(labeled_trainset, batch_size=2, shuffle=True, num_workers=16, collate_fn=collate_fn)
    labeled_valset = LabeledDataset(image_folder=image_folder,
                                      annotation_file=annotation_csv,
                                      scene_index=labeled_scene_index_val,
                                      transform=transform,
                                      extra_info=False,
                                      precomputed=opt.precomputed
                                     )
    valloader = torch.utils.data.DataLoader(labeled_valset, batch_size=2, shuffle=True, num_workers=16, collate_fn=collate_fn)