def __init__(self,
                 iccv_res_dir,
                 image_dir,
                 dataset_list,
                 lmdb_paths=None,
                 downsample_scale=0.25,
                 sampling_num=100,
                 sub_graph_nodes=24,
                 transform_func='default'):
        # sampling_count: sampling the numbers of subgraph for a dataset

        self.num_dataset = len(dataset_list)
        self.iccv_res_dir = iccv_res_dir
        self.sampling_num = sampling_num
        self.image_dir = image_dir
        self.sub_graph_nodes = sub_graph_nodes
        self.transform_func = transform_func
        self.downsample_scale = downsample_scale
        if lmdb_paths is not None:
            self.use_lmdb = True
            self.lmdb_db = LMDBModel(lmdb_paths[0])
            self.lmdb_meta = pickle.load(open(lmdb_paths[1], 'rb'))
        else:
            self.use_lmdb = False

        if self.transform_func == 'default':
            self.transform_func = transforms.Compose([
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])

        # read image list and calibration
        self.frame_list = {}
        self.K = {}
        self.dataset_names = []
        for ds in dataset_list:
            dataset_name = ds['name']
            self.dataset_names.append(dataset_name)

            frame_list = read_image_list(
                os.path.join(iccv_res_dir, dataset_name, 'ImageList.txt'))
            frame_list = [f[2:].split('.')[0].strip() for f in frame_list]

            self.frame_list[dataset_name] = frame_list

            K, img_dim = read_calibration(
                os.path.join(iccv_res_dir, dataset_name, 'calibration.txt'))
            self.K[dataset_name] = K

        self.Es = {}
        self.Cs = {}

        self.edge_sampler = {}
        self.covis_map = {}
        self.edge_local_feat_cache = {}

        print(
            '[1dsfm dataset Init] load in. and out. edges and sampling sub_graphs'
        )
        for ds in tqdm(dataset_list):
            dataset_name = ds['name']
            # eg_file_path = os.path.join(image_dir, dataset_name, 'EGs.txt')
            # bundle_file_name = os.path.join(image_dir, dataset_name, ds['bundle_file'])

            Es, Cs = read_poses(
                os.path.join(iccv_res_dir, dataset_name, 'bundle.out'))
            self.Es[dataset_name] = Es
            self.Cs[dataset_name] = Cs

            n_Cameras = len(Cs)
            inoutMat = np.load(
                os.path.join(iccv_res_dir, dataset_name, 'inoutMat.npy'))
            covis_map = np.load(
                os.path.join(iccv_res_dir, dataset_name, 'covis_map.npy'))

            with open(
                    os.path.join(iccv_res_dir, dataset_name,
                                 'edge_feat_pos_cache.bin'), 'rb') as f:
                edge_feat_pos_cache = pickle.load(f)

            # random generate a sub graph
            # todo: fix sub_graph_nodes
            gen = SamplingGenerator(n_Cameras, inoutMat)
            gen.setSamplingSize(sub_graph_nodes)
            gen.setSamplingNumber(sampling_num)
            gen.generation()
            self.edge_sampler[dataset_name] = gen
            self.covis_map[dataset_name] = covis_map
            self.edge_local_feat_cache[dataset_name] = edge_feat_pos_cache

        print('[1dsfm Init] Done')
Example #2
0

def read_lmdb(dataset, lmdb, processed_edge_dict, processed_node_dict):
    train_loader = DataLoader(dataset, num_workers=0, shuffle=True)

    pbar = tqdm(total=len(dataset))
    for sample in train_loader:
        dataset_name, idx, img_names, imgs, img_ori_dim, cam_Es, cam_Ks, _, img_id2sub_id, sub_id2img_id, _, edge_subnode_idx, edge_type, edge_local_matches_n1, edge_local_matches_n2, edge_rel_Rt = sample

        for e_i, e in enumerate(edge_subnode_idx):
            sub_n1, sub_n2 = e[0].item(), e[1].item()
            n1, n2 = sub_id2img_id[sub_n1], sub_id2img_id[sub_n2]
            node_key = '%s,%d' % (dataset_name[0], sub_n1)
            edge_key = '%s,%d-%d' % (dataset_name[0], n1, n2)

            node_feat = lmdb.read_ndarray_by_key(node_key)
            edge_feat = lmdb.read_ndarray_by_key(edge_key)

        pbar.update(1)


""" Dump to lmdb
"""
# init lmdb
lmdb = LMDBModel(out_node_edge_feat_lmdb_path, read_only=True)
if os.path.exists(out_node_edge_feat_meta_path):
    with open(out_node_edge_feat_meta_path, 'rb') as f:
        o = pickle.load(f)
        processed_edge_dict, processed_node_dict = o
read_lmdb(train_set, lmdb, processed_edge_dict, processed_node_dict)
    def __init__(self,
                 iccv_res_dir,
                 image_dir,
                 dataset_list,
                 lmdb_paths=None,
                 node_edge_lmdb=None,
                 img_max_dim=480,
                 sampling_num_range=[100, 500],
                 sub_graph_nodes=24,
                 sample_res_cache=None,
                 sampling_undefined_edge=False,
                 load_img=True,
                 transform_func='default',
                 training=True):
        # sampling_count: sampling the numbers of subgraph for a dataset
        assert node_edge_lmdb is not None
        # assert lmdb_paths is not None

        self.load_img = load_img
        self.num_dataset = len(dataset_list)
        self.iccv_res_dir = iccv_res_dir
        self.image_dir = image_dir
        self.sampling_num_range = sampling_num_range
        self.sub_graph_nodes = sub_graph_nodes
        self.transform_func = transform_func
        self.img_max_dim = img_max_dim
        self.sampling_undefined_edge = sampling_undefined_edge

        if lmdb_paths is not None:
            self.use_lmdb = True
            # self.lmdb_db = LMDBModel(lmdb_paths[0])
            self.lmdb_meta = pickle.load(open(lmdb_paths[1], 'rb'))
        else:
            self.use_lmdb = False

        if self.transform_func == 'default':
            self.transform_func = transforms.Compose([
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])

        # edge and node feature lmdb
        self.edge_lmdb = LMDBModel(node_edge_lmdb['edge'],
                                   lock=False,
                                   read_only=True)
        self.node_lmdb = LMDBModel(node_edge_lmdb['node'],
                                   lock=False,
                                   read_only=True)

        # read image list and calibration
        self.frame_list = {}
        self.K = {}
        self.dataset_names = []
        for ds in dataset_list:
            dataset_name = ds['name']
            bundle_prefix = ds['bundle_prefix']

            self.dataset_names.append(dataset_name)

            frame_list = read_image_list(
                os.path.join(iccv_res_dir, dataset_name,
                             bundle_prefix + '.list.txt'))
            frame_list = [f.split('.')[0].strip() for f in frame_list]

            self.frame_list[dataset_name] = frame_list

            K, img_dim = read_calibration(
                os.path.join(iccv_res_dir, dataset_name, 'calibration.txt'))
            self.K[dataset_name] = K

        self.Es = {}
        self.Cs = {}

        self.covis_map = {}
        self.edge_local_feat_cache = {}
        self.inout_mat = {}
        self.total_sample_num = 0

        max_scene_edges = 0  # determine the max edges for ratio sampling
        min_scene_edges = 1400000
        print('[Captured dataset Init] load in. and out. edges')
        # z_flip = np.diag([1, 1, -1])
        for ds in tqdm(dataset_list):
            dataset_name = ds['name']
            bundle_prefix = ds['bundle_prefix']

            Es, Cs = read_poses(
                os.path.join(iccv_res_dir, dataset_name, bundle_prefix))
            # Es = [np.matmul(z_flip, E) for E in Es]
            self.Es[dataset_name] = Es
            self.Cs[dataset_name] = Cs

            inoutMat = np.load(
                os.path.join(iccv_res_dir, dataset_name, 'inoutMat.npy'))
            covis_map = np.load(
                os.path.join(iccv_res_dir, dataset_name, 'covis.npy'))

            with open(
                    os.path.join(iccv_res_dir, dataset_name,
                                 'edge_feat_pos_cache.bin'), 'rb') as f:
                edge_feat_pos_cache = pickle.load(f)

                # check refine_Rt:
                sampled_key = list(edge_feat_pos_cache.keys())[0]
                # if 'refine_Rt' not in edge_feat_pos_cache[sampled_key]:
                #     raise Exception('dataset: %s has no refine_Rt' % dataset_name)

            self.edge_local_feat_cache[dataset_name] = edge_feat_pos_cache
            num_edges = len(edge_feat_pos_cache)
            if num_edges > max_scene_edges:
                max_scene_edges = num_edges
            if num_edges < min_scene_edges:
                min_scene_edges = num_edges

            self.inout_mat[dataset_name] = inoutMat
            self.covis_map[dataset_name] = covis_map
            self.edge_local_feat_cache[dataset_name] = edge_feat_pos_cache

        if min_scene_edges * 40 < max_scene_edges:
            # sampling ratio from the scene has most edges should be clamped.
            max_scene_edges = 40 * min_scene_edges
        """ Sampling ---------------------------------------------------------------------------------------------------
        """
        self.edge_sampler = {}
        self.samples = []  # (dataset_id, sub-graph sample_id)

        if sample_res_cache is None or not os.path.exists(sample_res_cache):

            print('[Captured dataset Init] sampling sub_graphs')
            for ds_id, ds in enumerate(dataset_list):
                dataset_name = ds['name']
                edge_feat_pos_cache = self.edge_local_feat_cache[dataset_name]

                n_Cameras = len(self.Cs[dataset_name])
                inoutMat = self.inout_mat[dataset_name]
                # edge_feat_pos_cache = self.edge_local_feat_cache[dataset_name]
                for i in range(n_Cameras):
                    for j in range(n_Cameras):
                        if inoutMat[i, j] != 1 and (
                            ("%d-%d" % (i, j)) in edge_feat_pos_cache or
                            ("%d-%d" % (j, i)) in edge_feat_pos_cache):
                            inoutMat[i, j] = -1
                for i in range(n_Cameras):
                    for j in range(n_Cameras):
                        if ("%d-%d" % (i, j) not in edge_feat_pos_cache) and (
                                "%d-%d" % (j, i) not in edge_feat_pos_cache):
                            inoutMat[i, j] = 0
                num_edges = len(self.edge_local_feat_cache[dataset_name])

                # determine sampling number based on ratio of edges among other scenes
                sample_ratio = float(num_edges) / float(max_scene_edges)
                print('%s: Sampling Ratio: %.2f' %
                      (dataset_name, sample_ratio))
                sample_num = int(sampling_num_range[1] * sample_ratio)
                if sample_num < sampling_num_range[0]:
                    sample_num = sampling_num_range[0]
                if sample_num > sampling_num_range[1]:
                    sample_num = sampling_num_range[1]

                # todo: fix sub_graph_nodes
                gen = SamplingGenerator(n_Cameras, inoutMat)
                gen.setSamplingSize(sub_graph_nodes)
                gen.setSamplingNumber(sample_num)
                gen.generation(use_undefine=self.sampling_undefined_edge,
                               get_max_node=False)

                print("test inoutmat")
                for edges in gen.sampling_edge:
                    flag = False
                    for edge in edges:

                        # if (edge[0] == 29 and edge[1] == 21) or (edge[1] == 29 and edge[0] == 21) and dataset_name=='furniture13':
                        #     lenth = 0
                        #     print(lenth)

                        if ("%d-%d" %
                            (edge[0], edge[1]) in edge_feat_pos_cache) or (
                                "%d-%d" %
                                (edge[1], edge[0]) in edge_feat_pos_cache):
                            continue
                        else:
                            #print("%d-%d" % (edge[0], edge[1]))
                            flag = True
                    if flag:
                        print("bad")
                filtered_sampled_num = len(gen.sampling_node)
                print('[Captured dataset Init] %s: (filtered: %d, all: %d)' %
                      (dataset_name, filtered_sampled_num, num_edges))

                self.samples += [(ds_id, i)
                                 for i in range(filtered_sampled_num)]
                self.edge_sampler[dataset_name] = (gen.sampling_node,
                                                   gen.sampling_edge,
                                                   gen.sampling_edge_label)

            if sample_res_cache is not None:
                with open(sample_res_cache, 'wb') as f:
                    pickle.dump([self.samples, self.edge_sampler], f)
                print('[Captured Init] Save subgraph fast cache to %s.' %
                      sample_res_cache)

        elif os.path.exists(sample_res_cache):
            with open(sample_res_cache, 'rb') as f:
                s = pickle.load(f)
                self.samples, self.edge_sampler = s
            print('[Captured Init] Load subgraph fast cache from %s.' %
                  sample_res_cache)

        print('[Captured Init] Done, %d samples' % len(self.samples))
        print('Rt_rel_12: n2 to n1')
    def __init__(self,
                 iccv_res_dir,
                 image_dir,
                 dataset_list,
                 lmdb_paths=None,
                 img_max_dim=480,
                 sampling_num_range=[100, 500],
                 sub_graph_nodes=24,
                 sample_res_cache=None,
                 sampling_undefined_edge=False,
                 transform_func='default'):
        # sampling_count: sampling the numbers of subgraph for a dataset

        self.num_dataset = len(dataset_list)
        self.iccv_res_dir = iccv_res_dir
        self.image_dir = image_dir
        self.sampling_num_range = sampling_num_range
        self.sub_graph_nodes = sub_graph_nodes
        self.transform_func = transform_func
        self.img_max_dim = img_max_dim
        self.sampling_undefined_edge = sampling_undefined_edge

        if lmdb_paths is not None:
            self.use_lmdb = True
            self.lmdb_db = LMDBModel(lmdb_paths[0])
            self.lmdb_meta = pickle.load(open(lmdb_paths[1], 'rb'))
        else:
            self.use_lmdb = False

        if self.transform_func == 'default':
            self.transform_func = transforms.Compose([
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])

        # read image list and calibration
        self.frame_list = {}
        self.K = {}
        self.dataset_names = []
        for ds in dataset_list:
            dataset_name = ds['name']
            bundle_prefix = ds['bundle_prefix']

            self.dataset_names.append(dataset_name)

            frame_list = read_image_list(
                os.path.join(iccv_res_dir, dataset_name,
                             bundle_prefix + '.list.txt'))
            frame_list = [f.split('.')[0].strip() for f in frame_list]

            self.frame_list[dataset_name] = frame_list

            K, img_dim = read_calibration(
                os.path.join(iccv_res_dir, dataset_name, 'calibration.txt'))
            self.K[dataset_name] = K

        self.Es = {}
        self.Cs = {}

        self.covis_map = {}
        self.edge_local_feat_cache = {}
        self.inout_mat = {}
        self.total_sample_num = 0

        max_scene_edges = 0  # determine the max edges for ratio sampling
        min_scene_edges = 1400000
        print('[Captured dataset Init] load in. and out. edges')
        for ds in tqdm(dataset_list):
            dataset_name = ds['name']
            bundle_prefix = ds['bundle_prefix']

            Es, Cs = read_poses(
                os.path.join(iccv_res_dir, dataset_name, bundle_prefix))
            self.Es[dataset_name] = Es
            self.Cs[dataset_name] = Cs

            inoutMat = np.load(
                os.path.join(iccv_res_dir, dataset_name, 'inoutMat.npy'))
            covis_map = np.load(
                os.path.join(iccv_res_dir, dataset_name, 'covis.npy'))

            with open(
                    os.path.join(iccv_res_dir, dataset_name,
                                 'edge_feat_pos_cache.bin'), 'rb') as f:
                edge_feat_pos_cache = pickle.load(f)
            self.edge_local_feat_cache[dataset_name] = edge_feat_pos_cache
            num_edges = len(edge_feat_pos_cache)
            if num_edges > max_scene_edges:
                max_scene_edges = num_edges
            if num_edges < min_scene_edges:
                min_scene_edges = num_edges

            self.inout_mat[dataset_name] = inoutMat
            self.covis_map[dataset_name] = covis_map
            self.edge_local_feat_cache[dataset_name] = edge_feat_pos_cache

        if min_scene_edges * 6 < max_scene_edges:
            # sampling ratio from the scene has most edges should be clamped.
            max_scene_edges = 6 * min_scene_edges
        """ Sampling ---------------------------------------------------------------------------------------------------
        """
        self.edge_sampler = {}
        self.samples = []  # (dataset_id, sub-graph sample_id)

        if sample_res_cache is None or not os.path.exists(sample_res_cache):

            print('[Captured dataset Init] sampling sub_graphs')
            for ds_id, ds in enumerate(dataset_list):
                dataset_name = ds['name']

                n_Cameras = len(self.Cs[dataset_name])
                inoutMat = self.inout_mat[dataset_name]
                num_edges = len(self.edge_local_feat_cache[dataset_name])

                # determine sampling number based on ratio of edges among other scenes
                sample_ratio = num_edges / max_scene_edges
                sample_num = int(sampling_num_range[1] * sample_ratio)
                if sample_num < sampling_num_range[0]:
                    sample_num = sampling_num_range[0]
                if sample_num > sampling_num_range[1]:
                    sample_num = sampling_num_range[1]

                # todo: fix sub_graph_nodes
                gen = SamplingGenerator(n_Cameras, inoutMat)
                gen.setSamplingSize(sub_graph_nodes)
                gen.setSamplingNumber(sample_num)
                gen.generation(use_undefine=self.sampling_undefined_edge)

                filtered_sampled_num = len(gen.sampling_node)
                print('[Captured dataset Init] %s: (filtered: %d, all: %d)' %
                      (dataset_name, filtered_sampled_num, num_edges))

                self.samples += [(ds_id, i)
                                 for i in range(filtered_sampled_num)]
                self.edge_sampler[dataset_name] = (gen.sampling_node,
                                                   gen.sampling_edge,
                                                   gen.sampling_edge_label)

            if sample_res_cache is not None:
                with open(sample_res_cache, 'wb') as f:
                    pickle.dump([self.samples, self.edge_sampler], f)
                print('[Captured Init] Save subgraph fast cache to %s.' %
                      sample_res_cache)

        elif os.path.exists(sample_res_cache):
            with open(sample_res_cache, 'rb') as f:
                s = pickle.load(f)
                self.samples, self.edge_sampler = s
            print('[Captured Init] Load subgraph fast cache from %s.' %
                  sample_res_cache)

        print('[Captured Init] Done, %d samples' % len(self.samples))
    def __init__(self,
                 iccv_res_dir,
                 image_dir,
                 dataset_list,
                 lmdb_paths=None,
                 downsample_scale=0.25,
                 sampling_num=100,
                 sub_graph_nodes=24,
                 sample_res_cache=None,
                 manual_modification_dict=None,
                 transform_func='default'):

        # sampling_count: sampling the numbers of subgraph for a dataset
        self.num_dataset = len(dataset_list)
        self.iccv_res_dir = iccv_res_dir
        self.sampling_num = sampling_num
        self.image_dir = image_dir
        self.sub_graph_nodes = sub_graph_nodes
        self.transform_func = transform_func
        self.downsample_scale = downsample_scale
        self.manual_modification_dict = manual_modification_dict

        if lmdb_paths is not None:
            self.use_lmdb = True
            self.lmdb_db = LMDBModel(lmdb_paths[0])
            self.lmdb_meta = pickle.load(open(lmdb_paths[1], 'rb'))
        else:
            self.use_lmdb = False

        if self.transform_func == 'default':
            self.transform_func = transforms.Compose([
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])

        # read image list and calibration
        self.frame_list = {}
        self.K = {}
        self.dataset_names = []
        for ds in dataset_list:
            dataset_name = ds['name']
            self.dataset_names.append(dataset_name)

            frame_list = read_image_list(
                os.path.join(iccv_res_dir, dataset_name, 'ImageList.txt'))
            frame_list = [f[2:].split('.')[0].strip() for f in frame_list]

            self.frame_list[dataset_name] = frame_list

            K, img_dim = read_calibration(
                os.path.join(iccv_res_dir, dataset_name, 'calibration.txt'))
            self.K[dataset_name] = K

        self.Es = {}
        self.Cs = {}

        self.edge_sampler = {}

        self.e_sampling_node = dict()
        self.e_sampling_edge = dict()
        self.e_sampling_edge_label = dict()

        self.covis_map = {}
        self.edge_local_feat_cache = {}

        if sample_res_cache is None or not os.path.exists(sample_res_cache):

            print(
                '[Ambi dataset Init] load in. and out. edges and sampling sub_graphs'
            )
            for ds in tqdm(dataset_list):
                dataset_name = ds['name']
                bundle_file_path = ds['bundle_file']
                # eg_file_path = os.path.join(image_dir, dataset_name, 'EGs.txt')
                # bundle_file_name = os.path.join(image_dir, dataset_name, ds['bundle_file'])

                Es, Cs = read_poses(
                    os.path.join(iccv_res_dir, dataset_name, bundle_file_path))
                self.Es[dataset_name] = Es
                self.Cs[dataset_name] = Cs

                n_Cameras = len(Cs)
                inoutMat = np.load(
                    os.path.join(iccv_res_dir, dataset_name, 'inoutMat.npy'))
                covis_map = np.load(
                    os.path.join(iccv_res_dir, dataset_name, 'covis_map.npy'))

                with open(
                        os.path.join(iccv_res_dir, dataset_name,
                                     'edge_feat_pos_cache.bin'), 'rb') as f:
                    edge_feat_pos_cache = pickle.load(f)
                self.covis_map[dataset_name] = covis_map
                self.edge_local_feat_cache[dataset_name] = edge_feat_pos_cache

                gen = SamplingGenerator(n_Cameras, inoutMat)
                gen.setSamplingSize(sub_graph_nodes)
                gen.setSamplingNumber(sampling_num)
                gen.generation()

                self.edge_sampler[dataset_name] = (gen.sampling_node,
                                                   gen.sampling_edge,
                                                   gen.sampling_edge_label)

            if sample_res_cache is not None and not os.path.exists(
                    sample_res_cache):
                with open(sample_res_cache, 'wb') as f:
                    pickle.dump([self.edge_sampler], f)
                print('[Ambi Init] Save subgraph fast cache to %s.' %
                      sample_res_cache)

        elif os.path.exists(sample_res_cache):

            for ds in tqdm(dataset_list):
                dataset_name = ds['name']
                bundle_file_path = ds['bundle_file']

                Es, Cs = read_poses(
                    os.path.join(iccv_res_dir, dataset_name, bundle_file_path))
                self.Es[dataset_name] = Es
                self.Cs[dataset_name] = Cs

                # n_Cameras = len(Cs)
                # inoutMat = np.load(os.path.join(iccv_res_dir, dataset_name, 'inoutMat.npy'))
                covis_map = np.load(
                    os.path.join(iccv_res_dir, dataset_name, 'covis_map.npy'))

                with open(
                        os.path.join(iccv_res_dir, dataset_name,
                                     'edge_feat_pos_cache.bin'), 'rb') as f:
                    edge_feat_pos_cache = pickle.load(f)
                self.covis_map[dataset_name] = covis_map
                self.edge_local_feat_cache[dataset_name] = edge_feat_pos_cache

            with open(sample_res_cache, 'rb') as f:
                s = pickle.load(f)
                self.edge_sampler = s[0]
            print('[Ambi Init] Load subgraph fast cache from %s.' %
                  sample_res_cache)

        print('[Ambi Init] Done')
""" Network ------------------------------------------------------------------------------------------------------------
"""
train_params = TrainParameters()
train_params.DEV_IDS = run_dev_ids
train_params.VERBOSE_MODE = True
prior_box = LocalGlobalGATTrainBox_Prior(train_params=train_params, ckpt_path_dict={
    'vlad': '/mnt/Exp_4/valid_cache/netvlad_vgg16.tar',
    'ckpt': '/mnt/Exp_4/valid_cache/iter_nogat.pth.tar'
})
prior_box._prepare_eval()


""" Pipeline -----------------------------------------------------------------------------------------------------------
"""
img_lmdb = LMDBModel(lmdb_path, lock=False, read_only=True)
img_lmdb_meta = pickle.load(open(lmdb_meta_path, 'rb'))

# load data
for dataset in dataset_list:
    dataset_name = dataset['name']
    bundle_prefix = dataset['bundle_prefix']

    print("Processing on %s" % dataset_name)

    # load edge cache
    with open(os.path.join(cap_res_dir, dataset_name, 'edge_feat_pos_cache.bin'), 'rb') as f:
        edge_cache = pickle.load(f)

    # load frame list
    frame_list = read_image_list(os.path.join(cap_res_dir, dataset_name, bundle_prefix + '.list.txt'))