Beispiel #1
0
    def __init__(self, transform=None, **config):
        from utils.photometric import ImgAugTransform
        self.ImgAugTransform = ImgAugTransform

        self.config = self.default_config
        self.config = dict_update(self.config, config)
        self.files = self._init_dataset(**self.config)
        sequence_set = []
        for (img, img_warped, mat_hom) in zip(self.files['image_paths'],
                                              self.files['warped_image_paths'],
                                              self.files['homography']):
            sample = {
                'image': img,
                'warped_image': img_warped,
                'homography': mat_hom
            }
            sequence_set.append(sample)
        self.samples = sequence_set
        self.transform = transform
        self.enable_photo = self.config['augmentation']['photometric'][
            'enable']

        if config['preprocessing']['resize']:
            self.sizer = np.array(config['preprocessing']['resize'])
        pass
Beispiel #2
0
    def __init__(self, transform=None, task='train', **config):
        self.task = task
        self.config = self.default_config
        self.config = dict_update(self.config, config)

        root = Path(DATA_PATH, 'COCO/' + task + '2014/')
        images = list(root.iterdir())
        self.images = [str(p) for p in images]
        # root = os.path.join(config['root'], COCO_TRAIN)
        # images = os.listdir(root)
        # self.images = [os.path.join(root, image) for image in images if image.endswith('.jpg')]
        self.transforms = transform
    def __init__(self, transform=None, split='train', **config):
        self.split = split
        # Update config
        self.config = self.default_config
        self.config = dict_update(self.config, config)

        self.transforms = transform
        self.action = 'train' if split == 'train' else 'val'

        # get files
        '''
        base_path = Path(DATA_PATH, 'COCO/' + task + '2014/')
        # base_path = Path(DATA_PATH, 'COCO_small/' + task + '2014/')
        image_paths = list(base_path.iterdir())
        # if config['truncate']:
        #     image_paths = image_paths[:config['truncate']]
        names = [p.stem for p in image_paths]
        image_paths = [str(p) for p in image_paths]
        files = {'image_paths': image_paths, 'names': names}
        '''

        if self.split not in ["train", "val"]:
            raise ValueError("Check split. It should be [train, val]")
        # get files
        self.data_path = "/ssd/data/phototourism/"
        self.image_path = osp.join(self.data_path, "orig")
        self.imgs = [line.rstrip("\n") for line in open(osp.join(self.data_path,
                                                                 self.split + "_phototourism_ms.txt"))]

        sequence_set = []
        # labels
        self.labels = False
        if self.config['labels']:
            self.labels = True
            # from models.model_wrap import labels2Dto3D
            # self.labels2Dto3D = labels2Dto3D
            print("load labels from: ", osp.join(self.config['labels'], self.split))
            count = 0
            for img_fname in self.imgs:
                data_fname = osp.join(self.config['labels'],
                                      self.split,
                                      "detection",
                                      img_fname.replace('/', '_') + ".npz")
                if osp.isfile(data_fname):
                    sample = {"image": osp.join(self.image_path, img_fname),
                              "name": img_fname,
                              "points": data_fname}

                    sequence_set.append(sample)
                    count += 1
            self.samples = sequence_set
        self.init_var()
Beispiel #4
0
    def __init__(self, data={}, n_gpus=1, data_shape=None, **config):
        self.datasets = data
        self.data_shape = data_shape
        self.n_gpus = n_gpus
        self.graph = tf.get_default_graph()
        self.name = self.__class__.__name__.lower()  # get child name
        self.trainable = getattr(self, 'trainable', True)

        # Update config
        self.config = dict_update(self._default_config,
                                  getattr(self, 'default_config', {}))
        self.config = dict_update(self.config, config)

        required = self.required_baseconfig + getattr(self, 'required_config_keys', [])
        for r in required:
            assert r in self.config, 'Required configuration entry: \'{}\''.format(r)
        assert set(self.datasets) <= self.dataset_names, \
            'Unknown dataset name: {}'.format(set(self.datasets)-self.dataset_names)
        assert n_gpus > 0, 'TODO: CPU-only training is currently not supported.'

        with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
            self._build_graph()
Beispiel #5
0
    def __init__(self, export=False, transform=None, task='train', **config):

        # Update config
        self.config = self.default_config
        self.config = dict_update(self.config, config)

        self.transforms = transform
        self.action = 'train' if task == 'train' else 'val'

        # get files
        base_path = Path(
            DATA_PATH,
            'Duck/' + task + 'Duck/' + self.config["type_data"] + "/")
        # base_path = Path(DATA_PATH, 'COCO_small/' + task + '2014/')
        image_paths = list(base_path.iterdir())
        # if config['truncate']:
        #     image_paths = image_paths[:config['truncate']]
        names = [p.stem for p in image_paths]
        image_paths = [str(p) for p in image_paths]
        files = {'image_paths': image_paths, 'names': names}

        sequence_set = []
        # labels
        self.labels = False
        if self.config['labels']:
            self.labels = True
            # from models.model_wrap import labels2Dto3D
            # self.labels2Dto3D = labels2Dto3D
            print("load labels from: ", self.config['labels'] + '/' + task)
            count = 0
            for (img, name) in zip(files['image_paths'], files['names']):
                p = Path(self.config['labels'], task, '{}.npz'.format(name))
                if p.exists():
                    sample = {'image': img, 'name': name, 'points': str(p)}
                    sequence_set.append(sample)
                    count += 1
                # if count > 100:
                #     print ("only load %d image!!!", count)
                #     print ("only load one image!!!")
                #     print ("only load one image!!!")
                #     break
            pass
        else:
            for (img, name) in zip(files['image_paths'], files['names']):
                sample = {'image': img, 'name': name}
                sequence_set.append(sample)
        self.samples = sequence_set

        self.init_var()

        pass
Beispiel #6
0
    def __init__(self, **config):
        # Update config
        self.config = dict_update(getattr(self, 'default_config', {}), config)

        self.dataset = self._init_dataset(**self.config)

        self.tf_splits = {}
        self.tf_next = {}
        with tf.device('/cpu:0'):
            for n in self.split_names:
                self.tf_splits[n] = self._get_data(self.dataset, n, **self.config)
                self.tf_next[n] = self.tf_splits[n].make_one_shot_iterator().get_next()
        self.end_set = tf.errors.OutOfRangeError
        self.sess = tf.Session()
Beispiel #7
0
 def __init__(self, transform=None, **config):
     self.config = self.default_config
     self.config = dict_update(self.config, config)
     self.files = self._init_dataset(**self.config)
     sequence_set = []
     for (img1, img2, rel_pose) in zip(self.files['image1_paths'],
                                       self.files['image2_paths'],
                                       self.files['relative_poses']):
         sample = {'image1': img1, 'image2': img2, 'rel_pose': rel_pose}
         sequence_set.append(sample)
     self.samples = sequence_set
     self.transform = transform
     if config['preprocessing']['resize']:
         self.sizer = np.array(config['preprocessing']['resize'])
     pass
    def __init__(self,
                 config,
                 save_path=Path('.'),
                 device='cpu',
                 verbose=False):
        print("using: Train_model_subpixel")
        self.config = self.default_config
        self.config = dict_update(self.config, config)
        self.device = device
        self.save_path = save_path
        self.cell_size = 8
        self.max_iter = config['train_iter']
        self._train = True
        self._eval = True

        pass
Beispiel #9
0
    def __init__(
        self,
        export=False,
        transform=None,
        task="train",
        seed=0,
        sequence_length=1,
        **config,
    ):
        # Update config
        self.config = self.default_config
        self.config = dict_update(self.config, config)

        self.transforms = transform
        self.action = "train" if task == "train" else "val"

        # get files
        self.root = Path(self.config["root"])  # Path(KITTI_DATA_PATH)

        root_split_txt = self.config.get("root_split_txt", None)
        self.root_split_txt = Path(
            self.root if root_split_txt is None else root_split_txt)
        scene_list_path = (self.root_split_txt / "train.txt" if task == "train"
                           else self.root_split_txt / "val.txt")
        self.scenes = [
            # (label folder, raw image path)
            (Path(self.root / folder[:-1]), Path(self.root / folder[:-4] / 'image_02' / 'data') ) \
                for folder in open(scene_list_path)
        ]
        # self.scenes_imgs = [
        #     Path(self.root / folder[:-4] / 'image_02' / 'data') for folder in open(scene_list_path)
        # ]

        ## only for export??

        if self.config["labels"]:
            self.labels = True
            self.labels_path = Path(self.config["labels"], task)
            print("load labels from: ", self.config["labels"] + "/" + task)
        else:
            self.labels = False

        self.crawl_folders(sequence_length)

        # other variables
        self.init_var()
Beispiel #10
0
    def __init__(self,
                 config,
                 save_path=Path("."),
                 device="cpu",
                 verbose=False):
        # config
        # Update config
        print("Load Train_model_heatmap!!")

        self.config = self.default_config
        self.config = dict_update(self.config, config)
        print("check config!!", self.config)

        # init parameters
        self.device = device
        self.save_path = save_path
        self._train = True
        self._eval = True
        self.cell_size = 8
        self.subpixel = False

        self.max_iter = config["train_iter"]

        self.gaussian = False
        if self.config["data"]["gaussian_label"]["enable"]:
            self.gaussian = True

        if self.config["model"]["dense_loss"]["enable"]:
            print("use dense_loss!")
            from utils.utils import descriptor_loss
            self.desc_params = self.config["model"]["dense_loss"]["params"]
            self.descriptor_loss = descriptor_loss
            self.desc_loss_type = "dense"
        elif self.config["model"]["sparse_loss"]["enable"]:
            print("use sparse_loss!")
            self.desc_params = self.config["model"]["sparse_loss"]["params"]
            from utils.loss_functions.sparse_loss import batch_descriptor_loss_sparse

            self.descriptor_loss = batch_descriptor_loss_sparse
            self.desc_loss_type = "sparse"

        # load model
        # self.net = self.loadModel(*config['model'])
        self.printImportantConfig()
        pass
 def __init__(self, transform=None, **config):
     self.config = self.default_config
     self.config = dict_update(self.config, config)
     self.files = self._init_dataset(**self.config)
     sequence_set = []
     for (img, img_warped, mat_hom) in zip(self.files['image_paths'],
                                           self.files['warped_image_paths'],
                                           self.files['homography']):
         sample = {
             'image': img,
             'warped_image': img_warped,
             'homography': mat_hom
         }
         sequence_set.append(sample)
     self.samples = sequence_set
     self.transform = transform
     if config['preprocessing']['resize']:
         self.sizer = np.array(config['preprocessing']['resize'])
     pass
Beispiel #12
0
    def __init__(
        self,
        export=False,
        transform=None,
        task="train",
        seed=0,
        sequence_length=1,
        **config
    ):
        # Update config
        self.config = self.default_config
        self.config = dict_update(self.config, config)

        self.transforms = transform
        self.action = "train" if task == "train" else "val"

        # get files
        self.root =  Path(self.config['root'])
        self.task = task
        # get dataset split files
        root_split_txt = self.config.get('root_split_txt', None)
        self.root_split_txt = Path(self.root if root_split_txt is None else root_split_txt)
        # scene_list_path = (
        #     self.root_split_txt / "train.txt" if task == "train" else self.root_split_txt / "val.txt"
        # )
        self.scenes = [
            Path(self.root / "train")
        ]
        if self.config["labels"]:
            self.labels = True
            self.labels_path = Path(self.config["labels"], task)
            print("load labels from: ", self.config["labels"] + "/" + task)
        else:
            self.labels = False

        self.crawl_folders(sequence_length)

        # other variables
        self.init_var()
Beispiel #13
0
    def __init__(
        self,
        seed=None,
        task="train",
        sequence_length=3,
        transform=None,
        target_transform=None,
        getPts=False,
        warp_input=False,
        **config,
    ):
        from utils.homographies import sample_homography_np as sample_homography
        from utils.photometric import ImgAugTransform, customizedTransform
        from utils.utils import compute_valid_mask
        from utils.utils import inv_warp_image, warp_points

        torch.set_default_tensor_type(torch.FloatTensor)
        np.random.seed(seed)
        random.seed(seed)

        # Update config
        self.config = self.default_config
        self.config = dict_update(self.config, dict(config))

        self.transform = transform
        self.sample_homography = sample_homography
        self.compute_valid_mask = compute_valid_mask
        self.inv_warp_image = inv_warp_image
        self.warp_points = warp_points
        self.ImgAugTransform = ImgAugTransform
        self.customizedTransform = customizedTransform

        ######
        self.enable_photo_train = self.config["augmentation"]["photometric"]["enable"]
        self.enable_homo_train = self.config["augmentation"]["homographic"]["enable"]
        self.enable_homo_val = False
        self.enable_photo_val = False
        ######


        if task == "train":
            self.action = "training"
            self.num_data = self.config["generation"]["split_sizes"]["training"]
        else:
            self.action = "validation"
            self.num_data = self.config["generation"]["split_sizes"]["validation"]
        

        self.cell_size = 8
        self.getPts = getPts

        self.gaussian_label = False
        if self.config["gaussian_label"]["enable"]:
            # self.params_transform = {'crop_size_y': 120, 'crop_size_x': 160, 'stride': 1, 'sigma': self.config['gaussian_label']['sigma']}
            self.gaussian_label = True

        self.pool = multiprocessing.Pool(6)

        # Parse drawing primitives
        primitives = self.parse_primitives(
            config["primitives"], self.drawing_primitives
        )

        basepath = Path(
            DATA_PATH,
            "synthetic_shapes"
            + ("_{}".format(config["suffix"]) if config["suffix"] is not None else ""),
        )
        basepath.mkdir(parents=True, exist_ok=True)

        splits = {s: {"images": [], "points": []} for s in [self.action]}
Beispiel #14
0
    def __init__(self,
                 config,
                 save_path=Path("."),
                 device="cpu",
                 verbose=False):
        """
        ## default dimension:
            heatmap: torch (batch_size, H, W, 1)
            dense_desc: torch (batch_size, H, W, 256)
            pts: [batch_size, np (N, 3)]
            desc: [batch_size, np(256, N)]
        
        :param config:
            dense_loss, sparse_loss (default)
            
        :param save_path:
        :param device:
        :param verbose:
        """
        # config
        print("Load Train_model_frontend!!")
        self.config = self.default_config
        self.config = dict_update(self.config, config)
        print("check config!!", self.config)

        # init parameters
        self.device = device
        self.save_path = save_path
        self._train = True
        self._eval = True
        self.cell_size = 8
        self.subpixel = False
        self.loss = 0

        self.max_iter = config["train_iter"]

        if self.config["model"]["dense_loss"]["enable"]:
            ## original superpoint paper uses dense loss
            print("use dense_loss!")
            from utils.utils import descriptor_loss

            self.desc_params = self.config["model"]["dense_loss"]["params"]
            self.descriptor_loss = descriptor_loss
            self.desc_loss_type = "dense"
        elif self.config["model"]["sparse_loss"]["enable"]:
            ## our sparse loss has similar performace, more efficient
            print("use sparse_loss!")
            self.desc_params = self.config["model"]["sparse_loss"]["params"]
            from utils.loss_functions.sparse_loss import batch_descriptor_loss_sparse

            self.descriptor_loss = batch_descriptor_loss_sparse
            self.desc_loss_type = "sparse"

        if self.config["model"]["subpixel"]["enable"]:
            ## deprecated: only for testing subpixel prediction
            self.subpixel = True

            def get_func(path, name):
                logging.info("=> from %s import %s", path, name)
                mod = __import__("{}".format(path), fromlist=[""])
                return getattr(mod, name)

            self.subpixel_loss_func = get_func(
                "utils.losses", self.config["model"]["subpixel"]["loss_func"])

        # load model
        # self.net = self.loadModel(*config['model'])
        self.printImportantConfig()

        pass
Beispiel #15
0
def homography_adaptation(image, net, config):
    """Perfoms homography adaptation.
    Inference using multiple random warped patches of the same input image for robust
    predictions.
    Arguments:
        image: A `Tensor` with shape `[N, H, W, 1]`.
        net: A function that takes an image as input, performs inference, and outputs the
            prediction dictionary.
        config: A configuration dictionary containing optional entries such as the number
            of sampled homographies `'num'`, the aggregation method `'aggregation'`.
    Returns:
        A dictionary which contains the aggregated detection probabilities.
    """

    probs = net(image)['prob']
    counts = tf.ones_like(probs)
    images = image

    probs = tf.expand_dims(probs, axis=-1)
    counts = tf.expand_dims(counts, axis=-1)
    images = tf.expand_dims(images, axis=-1)

    shape = tf.shape(image)[1:3]
    config = dict_update(homography_adaptation_default_config, config)

    def step(i, probs, counts, images):
        # Sample image patch
        H = sample_homography(shape, **config['homographies'])
        H_inv = invert_homography(H)
        warped = H_transform(image, H, interpolation='BILINEAR')
        count = H_transform(tf.expand_dims(tf.ones(tf.shape(image)[:3]), -1),
                            H_inv, interpolation='NEAREST')[..., 0]

        # Predict detection probabilities
        warped_shape = tf.to_int32(
                tf.to_float(shape)*config['homographies']['patch_ratio'])
        input_warped = tf.image.resize_images(warped, warped_shape)
        prob = net(input_warped)['prob']
        prob = tf.image.resize_images(tf.expand_dims(prob, axis=-1), shape)[..., 0]
        prob_proj = H_transform(tf.expand_dims(prob, -1), H_inv,
                                interpolation='BILINEAR')[..., 0]

        probs = tf.concat([probs, tf.expand_dims(prob_proj, -1)], axis=-1)
        counts = tf.concat([counts, tf.expand_dims(count, -1)], axis=-1)
        images = tf.concat([images, tf.expand_dims(warped, -1)], axis=-1)
        return i + 1, probs, counts, images

    _, probs, counts, images = tf.while_loop(
            lambda i, p, c, im: tf.less(i, config['num'] - 1),
            step,
            [0, probs, counts, images],
            parallel_iterations=1,
            back_prop=False,
            shape_invariants=[
                    tf.TensorShape([]),
                    tf.TensorShape([None, None, None, None]),
                    tf.TensorShape([None, None, None, None]),
                    tf.TensorShape([None, None, None, 1, None])])

    counts = tf.reduce_sum(counts, axis=-1)
    max_prob = tf.reduce_max(probs, axis=-1)
    mean_prob = tf.reduce_sum(probs, axis=-1) / counts

    if config['aggregation'] == 'max':
        prob = max_prob
    elif config['aggregation'] == 'sum':
        prob = mean_prob
    else:
        raise ValueError('Unkown aggregation method: {}'.format(config['aggregation']))

    if config['filter_counts']:
        prob = tf.where(tf.greater_equal(counts, config['filter_counts']),
                        prob, tf.zeros_like(prob))

    return {'prob': prob, 'counts': counts,
            'mean_prob': mean_prob, 'input_images': images, 'H_probs': probs}  # debug
    def __init__(
        self,
        seed=None,
        task="train",
        sequence_length=3,
        transform=None,
        target_transform=None,
        getPts=False,
        warp_input=False,
        **config,
    ):
        from utils.homographies import sample_homography_np as sample_homography
        from utils.photometric import ImgAugTransform, customizedTransform
        from utils.utils import compute_valid_mask
        from utils.utils import inv_warp_image, warp_points

        torch.set_default_tensor_type(torch.FloatTensor)
        np.random.seed(seed)
        random.seed(seed)

        # Update config
        self.config = self.default_config
        self.config = dict_update(self.config, dict(config))

        self.transform = transform
        self.sample_homography = sample_homography
        self.compute_valid_mask = compute_valid_mask
        self.inv_warp_image = inv_warp_image
        self.warp_points = warp_points
        self.ImgAugTransform = ImgAugTransform
        self.customizedTransform = customizedTransform

        ######
        self.enable_photo_train = self.config["augmentation"]["photometric"][
            "enable"]
        self.enable_homo_train = self.config["augmentation"]["homographic"][
            "enable"]
        self.enable_homo_val = False
        self.enable_photo_val = False
        ######

        self.action = "training" if task == "train" else "validation"
        # self.warp_input = warp_input

        self.cell_size = 8
        self.getPts = getPts

        self.gaussian_label = False
        if self.config["gaussian_label"]["enable"]:
            # self.params_transform = {'crop_size_y': 120, 'crop_size_x': 160, 'stride': 1, 'sigma': self.config['gaussian_label']['sigma']}
            self.gaussian_label = True

        self.pool = multiprocessing.Pool(6)

        # Parse drawing primitives
        primitives = self.parse_primitives(config["primitives"],
                                           self.drawing_primitives)

        basepath = Path(
            DATA_PATH,
            "synthetic_shapes" + ("_{}".format(config["suffix"])
                                  if config["suffix"] is not None else ""),
        )
        basepath.mkdir(parents=True, exist_ok=True)

        splits = {s: {"images": [], "points": []} for s in [self.action]}
        for primitive in primitives:
            tar_path = Path(basepath, "{}.tar.gz".format(primitive))
            if not tar_path.exists():
                self.dump_primitive_data(primitive, tar_path, self.config)

            # Untar locally
            logging.info(
                "Extracting archive for primitive {}.".format(primitive))
            logging.info(f"tar_path: {tar_path}")
            tar = tarfile.open(tar_path)
            # temp_dir = Path(os.environ['TMPDIR'])
            temp_dir = Path(TMPDIR)
            tar.extractall(path=temp_dir)
            tar.close()

            # Gather filenames in all splits, optionally truncate
            truncate = self.config["truncate"].get(primitive, 1)
            path = Path(temp_dir, primitive)
            for s in splits:
                e = [str(p) for p in Path(path, "images", s).iterdir()]
                f = [p.replace("images", "points") for p in e]
                f = [p.replace(".png", ".npy") for p in f]
                splits[s]["images"].extend(e[:int(truncate * len(e))])
                splits[s]["points"].extend(f[:int(truncate * len(f))])

        # Shuffle
        for s in splits:
            perm = np.random.RandomState(0).permutation(
                len(splits[s]["images"]))
            for obj in ["images", "points"]:
                splits[s][obj] = np.array(splits[s][obj])[perm].tolist()

        self.crawl_folders(splits)