Exemplo n.º 1
0
 def init_queries(self, query_file, query_config, prefix=''):
     queries = read_query_list(
         Path(self.base_path, query_file), prefix=prefix)
     Dataset = get_dataset(query_config.get('name', self.dataset_name))
     query_config = {
         **query_config, 'image_names': [q.name for q in queries]}
     query_dataset = Dataset(**query_config)
     return queries, query_dataset
Exemplo n.º 2
0
 def init_queries(self, query_file, query_config, prefix=""):
     queries = read_query_list(Path(self.base_path, query_file),
                               prefix=prefix)
     Dataset = get_dataset(query_config.get("name", self.dataset_name))
     query_config = {
         **query_config, "image_names": [q.name for q in queries]
     }
     query_dataset = Dataset(**query_config)
     # load GPS data for queries
     query_gps = Traverse(self.dataset_name, self.config["queries"],
                          self.gps.experiment_name)
     return queries, query_dataset, query_gps
def _init_graph(config, with_dataset=False):
    set_seed(config.get('seed', int.from_bytes(os.urandom(4), byteorder='big')))
    n_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
    logging.info('Number of GPUs detected: {}'.format(n_gpus))

    dataset = get_dataset(config['data']['name'])(**config['data'])
    model = get_model(config['model']['name'])(
            data=dataset.get_tf_datasets(), n_gpus=n_gpus, **config['model'])
    model.__enter__()
    if with_dataset:
        yield model, dataset
    else:
        yield model
    model.__exit__()
    tf.reset_default_graph()
Exemplo n.º 4
0
def export_for_sfm(data_config, exper_config, export_name):
    export_dir = Path(EXPER_PATH, 'exports', export_name)
    export_dir.mkdir(exist_ok=True)

    dataset = get_dataset(data_config['name'])(**data_config)
    data_iter = dataset.get_test_set()

    for data in tqdm(data_iter):
        predictions = exper_config['predictor'](
            data['image'], data['name'], **exper_config)
        # Scale the keypoints to the original image size
        # and convert to Colmap convention (origin = corner of upper left pix)
        scale = ((np.array(data['original_size'][:2]) - 1)
                 / (np.array(data['image'].shape[:2]) - 1))
        export = {
            'keypoints': scale[::-1] * predictions['keypoints'] + 0.5,
            'scores': predictions['scores'],
            'descriptors': predictions['descriptors'],
            'image_size': data['image'].shape[:2][::-1]
        }
        name = data['name'].decode('utf-8')
        Path(export_dir, Path(name).parent).mkdir(parents=True, exist_ok=True)
        np.savez(Path(export_dir, f'{name}.npz'), **export)
        with open(Path(EXPER_PATH, exper_name, 'config.yaml'), 'r') as f:
            config['model'] = tools.dict_update(
                yaml.load(f)['model'], config.get('model', {}))
        checkpoint_path = Path(EXPER_PATH, exper_name)
        if config.get('weights', None):
            checkpoint_path = Path(checkpoint_path, config['weights'])
    else:
        if config.get('weights', None):
            checkpoint_path = Path(DATA_PATH, 'weights', config['weights'])
        else:
            checkpoint_path = None
            logging.info('No weights provided.')
    logging.info(f'Starting export with configuration:\n{pformat(config)}')

    with get_model(config['model']['name'])(data_shape={
            'image': [None, None, None, config['model']['image_channels']]
    },
                                            **config['model']) as net:
        if checkpoint_path is not None:
            net.load(str(checkpoint_path))
        dataset = get_dataset(config['data']['name'])(**config['data'])
        test_set = dataset.get_test_set()

        for data in tqdm(test_set):
            predictions = net.predict(data, keys=keys)
            predictions['input_shape'] = data['image'].shape
            name = data['name'].decode('utf-8')
            Path(base_dir,
                 Path(name).parent).mkdir(parents=True, exist_ok=True)
            np.savez(Path(base_dir, '{}.npz'.format(name)), **predictions)