def build_clevr_mv_dataset(
    args, configs, image_root, depth_root, scenes_json, questions_json
):
    import jactorch.transforms.bbox as T

    image_transform = T.Compose(
        [
            # T.NormalizeBbox(),
            # T.Resize(configs.data.image_size),
            # T.DenormalizeBbox(),
            T.ToTensor(),
            T.Lambda(lambda x, y: (x - 0.5, y))
            # T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]
    )

    from nscl.datasets.datasets import NSCLMultiviewDataset

    dataset = NSCLMultiviewDataset(
        scenes_json,
        questions_json,
        image_root=image_root,
        image_transform=image_transform,
        depth_root=depth_root,
        vocab_json=args.data_vocab_json,
    )

    return dataset
Example #2
0
def gen_bbox_transform(img_size):
    transform = T.Compose([
        T.NormalizeBbox(),
        T.Resize(img_size),
        T.DenormalizeBbox(),
    ])

    def fun(img, bbox):
        _, bbox = transform(img, bbox)
        return torch.from_numpy(bbox)

    return fun
Example #3
0
def build_concept_quantization_clevr_dataset(args, configs, image_root, scenes_json):
    import jactorch.transforms.bbox as T
    image_transform = T.Compose([
        T.NormalizeBbox(),
        T.Resize(configs.data.image_size),
        T.DenormalizeBbox(),
        T.ToTensor(),
        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    from nscl.datasets.datasets import ConceptQuantizationDataset
    dataset = ConceptQuantizationDataset(scenes_json, image_root=image_root, image_transform=image_transform)
    return dataset
Example #4
0
def build_clevr_dataset(args, configs, image_root, scenes_json, questions_json):
    import jactorch.transforms.bbox as T
    image_transform = T.Compose([
        T.NormalizeBbox(),
        T.Resize(configs.data.image_size),
        T.DenormalizeBbox(),
        T.ToTensor(),
        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    from nscl.datasets.datasets import NSCLDataset
    dataset = NSCLDataset(
        scenes_json, questions_json,
        image_root=image_root, image_transform=image_transform,
        vocab_json=args.data_vocab_json
    )

    return dataset
    def __init__(self, args, phase):
        self.args = args
        self.phase = phase
        self.loader = default_loader
        self.data_dir = args.data_dir
        self.label_dir = args.label_dir
        self.prp_dir = args.prp_dir
        self.ann_dir = args.ann_dir
        self.tube_dir = args.tube_dir

        self.valid_idx_lst = 'valid_idx_' + self.phase + '.txt'
        self.H = 100
        self.W = 150
        self.bbox_size = 24

        ratio = self.args.train_valid_ratio
        n_train = round(self.args.n_rollout * ratio)
        if phase == 'train':
            self.st_idx = 0
            self.n_rollout = n_train
        elif phase == 'valid':
            self.st_idx = n_train
            self.n_rollout = self.args.n_rollout - n_train
        else:
            raise AssertionError("Unknown phase")

        if self.args.gen_valid_idx:
            if self.args.visualize_flag == 1:
                self.gen_predict_input()
            elif self.args.version == 'v3':
                self.gen_valid_idx_from_tube_info_v3()
            else:
                self.gen_valid_idx_from_tube_info()
        else:
            self.read_valid_idx()

        self.img_transform = T.Compose([
            T.Resize(self.args.img_size),
            T.ToTensor(),
            T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        self.W = 480
        self.H = 320
Example #6
0
def build_concept_retrieval_clevrer_dataset(args, configs, program, image_root,
                                            scenes_json):
    import jactorch.transforms.bbox as T

    image_transform = T.Compose([
        T.NormalizeBbox(),
        T.Resize(configs.data.image_size),
        T.DenormalizeBbox(),
        T.ToTensor(),
        T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])

    from nscl.datasets.datasets import ConceptRetrievalDataset

    dataset = ConceptRetrievalDataset(program,
                                      scenes_json,
                                      image_root=image_root,
                                      image_transform=image_transform)
    return dataset
Example #7
0
    def __init__(self, scenes_json, image_root, depth_root, incl_scene=True):

        super().__init__()

        self.scenes_json = scenes_json
        self.image_root = image_root
        self.depth_root = depth_root
        self.incl_scene = incl_scene

        logger.info('Loading scenes from: "{}".'.format(self.scenes_json))
        self.scenes = io.load_json(self.scenes_json)["scenes"]
        import jactorch.transforms.bbox as T

        self.image_transform = T.Compose(
            [
                # T.NormalizeBbox(),
                # T.Resize(configs.data.image_size),
                # T.DenormalizeBbox(),
                T.ToTensor(),
                T.Lambda(lambda x, y: (x - 0.5, y))
                # T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            ]
        )