Exemple #1
0
def readinfo():
    global BACKEND
    print(BACKEND)
    instance = request.json
    root_path = Path(instance["root_path"])
    response = {"status": "normal"}
    BACKEND.root_path = root_path
    info_path = Path(instance["info_path"])
    dataset_class_name = instance["dataset_class_name"]
    BACKEND.dataset = get_dataset_class(dataset_class_name)(
        root_path=root_path, info_path=info_path)
    BACKEND.image_idxes = list(range(len(BACKEND.dataset)))
    response["image_indexes"] = BACKEND.image_idxes
    app.logger.info(f'{len(BACKEND.image_idxes)} data')

    response = jsonify(results=[response])
    response.headers['Access-Control-Allow-Headers'] = '*'
    return response
Exemple #2
0
def kitti_gt_fgm_data_prep(old_root_path, old_trainval_info_path,
                           new_root_path, new_train_info_path):
    from second.core import box_np_ops
    from second.data.dataset import get_dataset_class
    dataset = get_dataset_class('KittiDataset')(
        root_path=old_root_path, info_path=old_trainval_info_path)
    for i in trange(len(dataset)):
        image_idx = i
        sensor_data = dataset.get_sensor_data(i)
        if 'image_idx' in sensor_data['metadata']:
            image_idx = sensor_data['metadata']['image_idx']
        points = sensor_data['lidar']['points']
        annos = sensor_data['lidar']['annotations']
        gt_boxes = annos['boxes']
        gt_mask = box_np_ops.points_in_rbbox(points, gt_boxes)
        points_aug = np.concatenate(
            (points, gt_mask.max(axis=1, keepdims=True)), axis=1)
        points_aug = points_aug.astype(np.float32)
        velo_file = 'training/velodyne_reduced/%06d.bin' % (image_idx)
        with open(f'{new_root_path}/{velo_file}', 'w') as f:
            points_aug.tofile(f)
    create_groundtruth_database(dataset_class_name='KittiFGMDataset',
                                data_path=new_root_path,
                                info_path=new_train_info_path)
def create_groundtruth_database(dataset_class_name,
                                data_path,
                                info_path=None,
                                used_classes=None,
                                database_save_path=None,
                                db_info_save_path=None,
                                relative_path=True,
                                add_rgb=False,
                                lidar_only=False,
                                bev_only=False,
                                coors_range=None):
    dataset = get_dataset_class(dataset_class_name)(
        info_path=info_path,
        root_path=data_path,
    )
    root_path = Path(data_path)
    if database_save_path is None:
        database_save_path = root_path / 'gt_database'
    else:
        database_save_path = Path(database_save_path)
    if db_info_save_path is None:
        db_info_save_path = root_path / "kitti_dbinfos_train.pkl"
    database_save_path.mkdir(parents=True, exist_ok=True)
    all_db_infos = {}

    group_counter = 0
    for j in prog_bar(list(range(len(dataset)))):
        image_idx = j
        sensor_data = dataset.get_sensor_data(j)
        if "image_idx" in sensor_data["metadata"]:
            image_idx = sensor_data["metadata"]["image_idx"]
        points = sensor_data["lidar"]["points"]
        annos = sensor_data["lidar"]["annotations"]
        gt_boxes = annos["boxes"]
        names = annos["names"]
        group_dict = {}
        group_ids = np.full([gt_boxes.shape[0]], -1, dtype=np.int64)
        if "group_ids" in annos:
            group_ids = annos["group_ids"]
        else:
            group_ids = np.arange(gt_boxes.shape[0], dtype=np.int64)
        difficulty = np.zeros(gt_boxes.shape[0], dtype=np.int32)
        if "difficulty" in annos:
            difficulty = annos["difficulty"]

        num_obj = gt_boxes.shape[0]
        point_indices = box_np_ops.points_in_rbbox(points, gt_boxes)
        for i in range(num_obj):
            filename = f"{image_idx}_{names[i]}_{i}.bin"
            filepath = database_save_path / filename
            gt_points = points[point_indices[:, i]]

            gt_points[:, :3] -= gt_boxes[i, :3]
            with open(filepath, 'w') as f:
                gt_points.tofile(f)
            if (used_classes is None) or names[i] in used_classes:
                if relative_path:
                    db_path = str(database_save_path.stem + "/" + filename)
                else:
                    db_path = str(filepath)
                db_info = {
                    "name": names[i],
                    "path": db_path,
                    "image_idx": image_idx,
                    "gt_idx": i,
                    "box3d_lidar": gt_boxes[i],
                    "num_points_in_gt": gt_points.shape[0],
                    "difficulty": difficulty[i],
                    # "group_id": -1,
                    # "bbox": bboxes[i],
                }
                local_group_id = group_ids[i]
                # if local_group_id >= 0:
                if local_group_id not in group_dict:
                    group_dict[local_group_id] = group_counter
                    group_counter += 1
                db_info["group_id"] = group_dict[local_group_id]
                if "score" in annos:
                    db_info["score"] = annos["score"][i]
                if names[i] in all_db_infos:
                    all_db_infos[names[i]].append(db_info)
                else:
                    all_db_infos[names[i]] = [db_info]
    for k, v in all_db_infos.items():
        print(f"load {len(v)} {k} database infos")

    with open(db_info_save_path, 'wb') as f:
        pickle.dump(all_db_infos, f)
Exemple #4
0
def build(input_reader_config,
          model_config,
          training,
          voxel_generator,
          target_assigner,
          multi_gpu=False):
    """Builds a tensor dictionary based on the InputReader config.
    Args:
        input_reader_config: A input_reader_pb2.InputReader object.
    Returns:
        A tensor dict based on the input_reader_config.
    Raises:
        ValueError: On invalid input reader proto.
        ValueError: If no input paths are specified.
    """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')
    prep_cfg = input_reader_config.preprocess
    #import pdb; pdb.set_trace()
    dataset_cfg = input_reader_config.dataset
    num_point_features = model_config.num_point_features
    out_size_factor = get_downsample_factor(model_config)
    assert out_size_factor > 0
    cfg = input_reader_config
    db_sampler_cfg = prep_cfg.database_sampler
    db_sampler = None
    if len(db_sampler_cfg.sample_groups
           ) > 0 or db_sampler_cfg.database_info_path != "":  # enable sample
        db_sampler = dbsampler_builder.build(db_sampler_cfg)
    #import pdb; pdb.set_trace()
    grid_size = voxel_generator.grid_size
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    #print("feature_map_size", feature_map_size)
    assert all([n != '' for n in target_assigner.classes
                ]), "you must specify class_name in anchor_generators."
    dataset_cls = get_dataset_class(dataset_cfg.dataset_class_name)
    assert dataset_cls.NumPointFeatures >= 3, "you must set this to correct value"
    assert dataset_cls.NumPointFeatures == num_point_features, "currently you need keep them same"
    prep_func = partial(
        prep_pointcloud,
        root_path=dataset_cfg.kitti_root_path,
        voxel_generator=voxel_generator,
        target_assigner=target_assigner,
        training=training,
        max_voxels=prep_cfg.max_number_of_voxels,
        remove_outside_points=False,
        remove_unknown=prep_cfg.remove_unknown_examples,
        create_targets=training,
        shuffle_points=prep_cfg.shuffle_points,
        gt_rotation_noise=list(prep_cfg.groundtruth_rotation_uniform_noise),
        gt_loc_noise_std=list(prep_cfg.groundtruth_localization_noise_std),
        global_rotation_noise=list(prep_cfg.global_rotation_uniform_noise),
        global_scaling_noise=list(prep_cfg.global_scaling_uniform_noise),
        global_random_rot_range=list(
            prep_cfg.global_random_rotation_range_per_object),
        global_translate_noise_std=list(prep_cfg.global_translate_noise_std),
        db_sampler=db_sampler,
        num_point_features=dataset_cls.NumPointFeatures,
        anchor_area_threshold=prep_cfg.anchor_area_threshold,
        gt_points_drop=prep_cfg.groundtruth_points_drop_percentage,
        gt_drop_max_keep=prep_cfg.groundtruth_drop_max_keep_points,
        remove_points_after_sample=prep_cfg.remove_points_after_sample,
        remove_environment=prep_cfg.remove_environment,
        use_group_id=prep_cfg.use_group_id,
        out_size_factor=out_size_factor,
        multi_gpu=multi_gpu,
        min_points_in_gt=prep_cfg.min_num_of_points_in_gt,
        random_flip_x=prep_cfg.random_flip_x,
        random_flip_y=prep_cfg.random_flip_y,
        sample_importance=prep_cfg.sample_importance)

    #import pdb; pdb.set_trace()
    ret = target_assigner.generate_anchors(feature_map_size)
    class_names = target_assigner.classes
    anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
    anchors_list = []
    for k, v in anchors_dict.items():
        anchors_list.append(v["anchors"])

    # anchors = ret["anchors"]
    anchors = np.concatenate(anchors_list, axis=0)
    anchors = anchors.reshape([-1, target_assigner.box_ndim])  # (199888, 7)
    assert np.allclose(anchors,
                       ret["anchors"].reshape(-1, target_assigner.box_ndim))
    matched_thresholds = ret["matched_thresholds"]
    unmatched_thresholds = ret["unmatched_thresholds"]
    anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:, [0, 1, 3, 4, 6]])
    anchor_cache = {
        "anchors": anchors,
        "anchors_bv": anchors_bv,
        "matched_thresholds": matched_thresholds,
        "unmatched_thresholds": unmatched_thresholds,
        "anchors_dict": anchors_dict,
    }
    prep_func = partial(prep_func, anchor_cache=anchor_cache)

    dataset = dataset_cls(info_path=dataset_cfg.kitti_info_path,
                          root_path=dataset_cfg.kitti_root_path,
                          class_names=class_names,
                          prep_func=prep_func)

    #import pdb; pdb.set_trace()
    return dataset
def build(input_reader_config,
          model_config,
          training,
          voxel_generator,
          target_assigner,
          multi_gpu=False):
    """Builds a tensor dictionary based on the InputReader config.

    Args:
        input_reader_config: A input_reader_pb2.InputReader object.

    Returns:
        A tensor dict based on the input_reader_config.

    Raises:
        ValueError: On invalid input reader proto.
        ValueError: If no input paths are specified.
    """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')
    prep_cfg = input_reader_config.preprocess
    dataset_cfg = input_reader_config.dataset  #kitti_info_path: "/home/lichao/v1.0-mini/infos_train.pkl"
    # kitti_root_path: "/home/lichao/v1.0-mini"
    # dataset_class_name: "NuScenesDataset"
    num_point_features = model_config.num_point_features  #4
    out_size_factor = get_downsample_factor(model_config)  #8
    assert out_size_factor > 0
    cfg = input_reader_config
    db_sampler_cfg = prep_cfg.database_sampler  #database_info_path: "/home/lichao/v1.0-mini/kitti_dbinfos_train.pkl"
    # sample_groups {   name_to_max_num     key: "car"     value: 30   } } global_random_rotation_range_per_object: 0.0# global_random_rotation_range_per_object: 0.0# rate: 1.0
    db_sampler = None
    if len(db_sampler_cfg.sample_groups
           ) > 0 or db_sampler_cfg.database_info_path != "":  # enable sample
        db_sampler = dbsampler_builder.build(db_sampler_cfg)  #加载了gt_base的一些东西
    grid_size = voxel_generator.grid_size  #[400,400]
    feature_map_size = grid_size[:2] // out_size_factor  #[50,50]
    feature_map_size = [*feature_map_size, 1][::-1]  #[50,50]
    print("feature_map_size", feature_map_size)
    assert all([n != '' for n in target_assigner.classes
                ]), "you must specify class_name in anchor_generators."
    dataset_cls = get_dataset_class(
        dataset_cfg.dataset_class_name)  # NuScenesDataset
    assert dataset_cls.NumPointFeatures >= 3, "you must set this to correct value"
    assert dataset_cls.NumPointFeatures == num_point_features, "currently you need keep them same"
    prep_func = partial(  #pre_func partial 的功能:固定函数参数,返回一个新的函数。
        prep_pointcloud,  #data\preprocess.py
        root_path=dataset_cfg.kitti_root_path,
        voxel_generator=voxel_generator,  #VoxelGeneratorV2
        target_assigner=target_assigner,
        training=training,
        max_voxels=prep_cfg.max_number_of_voxels,  #25000    eval 30000
        remove_outside_points=False,
        remove_unknown=prep_cfg.remove_unknown_examples,
        create_targets=training,
        shuffle_points=prep_cfg.shuffle_points,
        gt_rotation_noise=list(prep_cfg.groundtruth_rotation_uniform_noise),
        gt_loc_noise_std=list(prep_cfg.groundtruth_localization_noise_std),
        global_rotation_noise=list(prep_cfg.global_rotation_uniform_noise),
        global_scaling_noise=list(prep_cfg.global_scaling_uniform_noise),
        global_random_rot_range=list(
            prep_cfg.global_random_rotation_range_per_object),
        global_translate_noise_std=list(prep_cfg.global_translate_noise_std),
        db_sampler=db_sampler,
        num_point_features=dataset_cls.NumPointFeatures,
        anchor_area_threshold=prep_cfg.anchor_area_threshold,
        gt_points_drop=prep_cfg.groundtruth_points_drop_percentage,
        gt_drop_max_keep=prep_cfg.groundtruth_drop_max_keep_points,
        remove_points_after_sample=prep_cfg.remove_points_after_sample,
        remove_environment=prep_cfg.remove_environment,
        use_group_id=prep_cfg.use_group_id,
        out_size_factor=out_size_factor,  #8
        multi_gpu=multi_gpu,
        min_points_in_gt=prep_cfg.min_num_of_points_in_gt,
        random_flip_x=prep_cfg.random_flip_x,
        random_flip_y=prep_cfg.random_flip_y,
        sample_importance=prep_cfg.sample_importance)

    ret = target_assigner.generate_anchors(feature_map_size)
    class_names = target_assigner.classes  # ['car', 'bicycle', 'bus', 'construction_vehicle', 'motorcycle', 'pedestrian', 'traffic_cone', 'trailer', 'truck', 'barrier']
    anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
    anchors_list = []
    for k, v in anchors_dict.items():
        anchors_list.append(v["anchors"])  #50000个

    # anchors = ret["anchors"]
    anchors = np.concatenate(anchors_list, axis=0)
    anchors = anchors.reshape([-1, target_assigner.box_ndim])
    assert np.allclose(anchors,
                       ret["anchors"].reshape(-1, target_assigner.box_ndim))
    matched_thresholds = ret["matched_thresholds"]
    unmatched_thresholds = ret["unmatched_thresholds"]
    anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
        anchors[:, [0, 1, 3, 4, 6]])  #bev_anchor 4维
    anchor_cache = {
        "anchors": anchors,
        "anchors_bv": anchors_bv,
        "matched_thresholds": matched_thresholds,
        "unmatched_thresholds": unmatched_thresholds,
        "anchors_dict": anchors_dict,
    }
    prep_func = partial(prep_func, anchor_cache=anchor_cache)
    dataset = dataset_cls(
        info_path=dataset_cfg.kitti_info_path,  #数据的路径
        root_path=dataset_cfg.kitti_root_path,
        class_names=class_names,  #10个类
        prep_func=prep_func)

    return dataset  # _nusc_infos <class 'dict'>: {'lidar_path': '/home/lichao/v1.0-mini/samples/LIDAR_TOP/n015-2018-07-24-11-22-45+0800__LIDAR_TOP__1532402927647951.pcd.bin', 'cam_front_path': '/home/lichao/v1.0-mini/samples/CAM_FRONT/n015-2018-07-24-11-22-45+0800__CAM_FRONT__1532402927612460.jpg', 'token': 'ca9a282c9e77460f8360f564131a8af5', 'sweeps': [], 'lidar2ego_translation': [0.943713, 0.0, 1.84023], 'lidar2ego_rotation': [0.7077955119163518, -0.006492242056004365, 0.010646214713995808, -0.7063073142877817], 'ego2global_translation': [411.3039349319818, 1180.8903791765097, 0.0], 'ego2global_rotation': [0.5720320396729045, -0.0016977771610471074, 0.011798001930183783, -0.8201446642457809], 'timestamp': 1532402927647951, 'gt_boxes': array([[ 1.84143850e+01,  5.95160251e+01,  7.69634574e-01,
def create_my_groundtruth_database(dataset_class_name,
                                   data_path,
                                   info_path=None,
                                   used_classes=None,
                                   database_save_path=None,
                                   db_info_save_path=None,
                                   relative_path=True):
    dataset = get_dataset_class(dataset_class_name)(
        info_path=info_path,
        root_path=data_path,
    )
    root_path = Path(data_path)
    if database_save_path is None:
        database_save_path = root_path / 'gt_database'
    else:
        database_save_path = Path(database_save_path)
    if db_info_save_path is None:
        db_info_save_path = root_path / "my_dbinfos_train.pkl"
    database_save_path.mkdir(parents=True, exist_ok=True)
    all_db_infos = {}

    group_counter = 0
    for j in prog_bar(list(range(len(dataset)))):
        image_idx = j
        sensor_data = dataset.get_sensor_data(j)
        if "image_idx" in sensor_data["metadata"]:
            image_idx = sensor_data["metadata"]["image_idx"]
        points = sensor_data["lidar"]["points"]
        annos = sensor_data["lidar"]["annotations"]
        gt_boxes = annos["boxes"]
        names = annos["names"]
        group_dict = {}
        group_ids = np.arange(gt_boxes.shape[0], dtype=np.int64)
        difficulty = np.zeros(gt_boxes.shape[0], dtype=np.int32)

        num_obj = gt_boxes.shape[0]
        point_indices = None
        if num_obj > 0:
            point_indices = box_np_ops.points_in_rbbox(points, gt_boxes)
        for i in range(num_obj):
            filename = "{}_{}_{}.bin".format(image_idx, names[i], i)
            filepath = database_save_path / filename
            gt_points = points[point_indices[:, i]]
            if gt_points.shape[0] >= 5:
                gt_points[:, :3] -= gt_boxes[i, :3]
                with open(filepath, 'w') as f:
                    gt_points.tofile(f)
                if (used_classes is None) or names[i] in used_classes:
                    if relative_path:
                        db_path = str(database_save_path.stem + "/" + filename)
                    else:
                        db_path = str(filepath)
                    db_info = {
                        "name": names[i],
                        "path": db_path,
                        "image_idx": image_idx,
                        "gt_idx": i,
                        "box3d_lidar": gt_boxes[i],
                        "num_points_in_gt": gt_points.shape[0],
                        "difficulty": difficulty[i],
                    }
                    local_group_id = group_ids[i]
                    if local_group_id not in group_dict:
                        group_dict[local_group_id] = group_counter
                        group_counter += 1
                    db_info["group_id"] = group_dict[local_group_id]
                    if names[i] in all_db_infos:
                        all_db_infos[names[i]].append(db_info)
                    else:
                        all_db_infos[names[i]] = [db_info]
    for k, v in all_db_infos.items():
        print("load {} {} database infos".format(len(v), k))

    with open(db_info_save_path, 'wb') as f:
        pickle.dump(all_db_infos, f)
Exemple #7
0
def create_groundtruth_database_with_sweep_info(dataset_class_name,
                                                data_path,
                                                info_path=None,
                                                used_classes=None,
                                                database_save_path=None,
                                                db_info_save_path=None,
                                                relative_path=True,
                                                add_rgb=False,
                                                lidar_only=False,
                                                bev_only=False,
                                                coors_range=None):
    dataset = get_dataset_class(dataset_class_name)(
        info_path=info_path,
        root_path=data_path,
    )
    root_path = Path(data_path)
    if database_save_path is None:
        database_save_path = root_path / 'gt_database_with_sweep_info'
    else:
        database_save_path = Path(database_save_path)
    if db_info_save_path is None:
        db_info_save_path = root_path / "dbinfos_train_with_sweep_info.pkl"
    database_save_path.mkdir(parents=True, exist_ok=True)
    all_db_infos = {}

    group_counter = 0
    for j in prog_bar(list(range(len(dataset)))):
        image_idx = j
        sensor_data = dataset.get_sensor_data(j)
        if "image_idx" in sensor_data["metadata"]:
            image_idx = sensor_data["metadata"]["image_idx"]

        assert("sweep_points_list" in sensor_data["lidar"])
        sweep_points_list = sensor_data["lidar"]["sweep_points_list"]

        assert("sweep_annotations" in sensor_data["lidar"])
        sweep_gt_boxes_list = sensor_data["lidar"]["sweep_annotations"]["boxes_list"]
        sweep_gt_tokens_list = sensor_data["lidar"]["sweep_annotations"]["tokens_list"]
        sweep_gt_names_list = sensor_data["lidar"]["sweep_annotations"]["names_list"]

        # we focus on the objects in the first frame
        # and find the bounding box index of the same object in every frame
        points = sensor_data["lidar"]["points"]
        annos = sensor_data["lidar"]["annotations"]
        names = annos["names"]
        gt_boxes = annos["boxes"]
        attrs = annos["attrs"]
        tokens = sweep_gt_tokens_list[0]

        # sanity check with redundancy
        assert(len(sweep_gt_boxes_list) == len(sweep_gt_tokens_list) == len(sweep_gt_names_list))
        assert(len(gt_boxes) == len(attrs) == len(tokens))
        assert(np.all(names == sweep_gt_names_list[0]))
        assert(np.all(gt_boxes == sweep_gt_boxes_list[0]))

        # on every frame, we mask points inside each bounding box
        # but we are not looking at every bounding box 
        sweep_point_indices_list = []
        for sweep_points, sweep_gt_boxes in zip(sweep_points_list, sweep_gt_boxes_list):
            sweep_point_indices_list.append(box_np_ops.points_in_rbbox(sweep_points, sweep_gt_boxes))

        # crop point cloud based on boxes in the current frame
        point_indices = box_np_ops.points_in_rbbox(points, gt_boxes)

        # 
        group_dict = {}
        group_ids = np.full([gt_boxes.shape[0]], -1, dtype=np.int64)
        if "group_ids" in annos:
            group_ids = annos["group_ids"]
        else:
            group_ids = np.arange(gt_boxes.shape[0], dtype=np.int64)

        #
        difficulty = np.zeros(gt_boxes.shape[0], dtype=np.int32)
        if "difficulty" in annos:
            difficulty = annos["difficulty"]

        num_obj = gt_boxes.shape[0]
        for i in range(num_obj):
            filename = f"{image_idx}_{names[i]}_{i}.bin"
            filepath = database_save_path / filename
            
            # only use non-key frame boxes when the object is moving
            if attrs[i] in ['vehicle.moving', 'cycle.with_rider', 'pedestrian.moving']:
                gt_points_list = []
                for t in range(len(sweep_points_list)):
                    # fast pass for most frames
                    if i < len(sweep_gt_tokens_list[t]) and sweep_gt_tokens_list[t][i] == tokens[i]:
                        box_idx = i
                    else:
                        I = np.flatnonzero(tokens[i] == sweep_gt_tokens_list[t])
                        if len(I) == 0: continue 
                        elif len(I) == 1: box_idx = I[0]
                        else: raise ValueError('Identical object tokens')
                    gt_points_list.append(sweep_points_list[t][sweep_point_indices_list[t][:, box_idx]])
                gt_points = np.concatenate(gt_points_list, axis=0)[:, [0, 1, 2, 4]]
            else: 
                gt_points = points[point_indices[:, i]]

            # offset points based on the bounding box in the current frame
            gt_points[:, :3] -= gt_boxes[i, :3]

            with open(filepath, 'w') as f:
                gt_points.tofile(f)

            if (used_classes is None) or names[i] in used_classes:
                if relative_path: 
                    db_path = str(database_save_path.stem + "/" + filename)
                else: 
                    db_path = str(filepath)
                db_info = {
                    "name": names[i],
                    "path": db_path, 
                    "image_idx": image_idx, 
                    "gt_idx": i, 
                    "box3d_lidar": gt_boxes[i], 
                    "num_points_in_gt": gt_points.shape[0],
                    "difficulty": difficulty[i]
                }
                local_group_id = group_ids[i]
                if local_group_id not in group_dict:
                    group_dict[local_group_id] = group_counter
                    group_counter += 1
                db_info["group_id"] = group_dict[local_group_id]
                if "score" in annos:
                    db_info["score"] = annos["score"][i]
                if names[i] in all_db_infos:
                    all_db_infos[names[i]].append(db_info)
                else:
                    all_db_infos[names[i]] = [db_info]

    for k, v in all_db_infos.items():
        print(f"load {len(v)} {k} database infos")

    with open(db_info_save_path, 'wb') as f:
        pickle.dump(all_db_infos, f)