示例#1
0
    def _init_model(self):
        self.config = pipeline_pb2.TrainEvalPipelineConfig()
        with open(self.config_p, 'r') as f:
            proto_str = f.read()
            text_format.Merge(proto_str, self.config)

        self.input_cfg = self.config.eval_input_reader
        self.model_cfg = self.config.model.second
        config_tool.change_detection_range_v2(self.model_cfg,
                                              [-50, -50, 50, 50])
        logging.info('config loaded.')

        self.net = build_network(self.model_cfg).to(device).eval()
        self.net.load_state_dict(torch.load(self.model_p))

        self.target_assigner = self.net.target_assigner
        self.voxel_generator = self.net.voxel_generator
        logging.info('network done, voxel done.')

        grid_size = self.voxel_generator.grid_size
        feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(
            self.model_cfg)
        feature_map_size = [*feature_map_size, 1][::-1]

        self.anchors = self.target_assigner.generate_anchors(
            feature_map_size)['anchors']
        self.anchors = torch.tensor(self.anchors,
                                    dtype=torch.float32,
                                    device=device)
        self.anchors = self.anchors.view(1, -1, 7)
        logging.info('anchors generated.')
示例#2
0
    def __init__(self, model_cfg, box_coder):
        self.target_assigner_config = model_cfg.target_assigner
        if not isinstance(self.target_assigner_config,
                          (target_pb2.TargetAssigner)):
            raise ValueError('input_reader_config not of type '
                             'input_reader_pb2.InputReader.')
        self.classes_cfg = self.target_assigner_config.class_settings
        self.tasks = [{
            'num_class': 1,
            'class_names': ['car']
        }, {
            'num_class': 2,
            'class_names': ['truck', 'construction_vehicle']
        }, {
            'num_class': 2,
            'class_names': ['bus', 'trailer']
        }, {
            'num_class': 1,
            'class_names': ['barrier']
        }, {
            'num_class': 2,
            'class_names': ['motorcycle', 'bicycle']
        }, {
            'num_class': 2,
            'class_names': ['pedestrian', 'traffic_cone']
        }]
        classes = [
            class_setting.class_name for class_setting in self.classes_cfg
        ]

        self.similarity_calcs = []
        for class_setting in self.classes_cfg:
            self.similarity_calcs.append(
                similarity_calculator_builder.build(
                    class_setting.region_similarity_calculator))

        self.positive_fraction = self.target_assigner_config.sample_positive_fraction
        if self.positive_fraction < 0:
            positive_fraction = None

        self.out_size_factor = get_downsample_factor(model_cfg)
        self.pc_range_start_end = np.asarray(
            model_cfg.voxel_generator.point_cloud_range)
        pc_range = self.pc_range_start_end[3:6] - self.pc_range_start_end[0:3]

        grid_size = model_cfg.voxel_generator.voxel_size
        self.voxel_shape = pc_range // grid_size

        feature_map_size = self.voxel_shape[:2] // self.out_size_factor
        self._feature_map_size = [*feature_map_size, 1][::-1]
        self._box_coder = box_coder
        self._grid_size = grid_size
        self._pc_range = pc_range
        self.name = 'LabelAssigner'

        self._sim_calcs = self.similarity_calcs
        self._positive_fraction = positive_fraction
        self._sample_size = self.target_assigner_config.sample_size
        self._classes = classes
        self._assign_per_class = self.target_assigner_config.assign_per_class
示例#3
0
def generate_anchors(voxel_generator, target_assigner, model_cfg, device):    
    grid_size = voxel_generator.grid_size
    feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(model_cfg)
    feature_map_size = [*feature_map_size, 1][::-1]
    
    anchors = target_assigner.generate_anchors(feature_map_size)["anchors"]
    anchors = torch.tensor(anchors, dtype=torch.float32, device=device)
    anchors = anchors.view(1, -1, 7)
    
    return anchors
示例#4
0
def generate_example(net, model_cfg, points, device):

    target_assigner = net.target_assigner
    voxel_generator = net.voxel_generator

    grid_size = voxel_generator.grid_size
    feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(
        model_cfg)
    feature_map_size = [*feature_map_size, 1][::-1]

    anchors_np = target_assigner.generate_anchors(feature_map_size)["anchors"]

    anchors = torch.tensor(anchors_np, dtype=torch.float32, device=device)
    anchors = anchors.view(1, -1, 7)
    anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors_np[:,
                                                            [0, 1, 3, 4, 6]])

    res = voxel_generator.generate(points, max_voxels=12000)
    voxels = res["voxels"]
    coordinates = res["coordinates"]
    num_points = res["num_points_per_voxel"]

    # add batch idx to coords
    coords = np.pad(coordinates, ((0, 0), (1, 0)),
                    mode='constant',
                    constant_values=0)
    voxels = torch.tensor(voxels, dtype=torch.float32, device=device)
    coords = torch.tensor(coords, dtype=torch.float32, device=device)
    num_points = torch.tensor(num_points, dtype=torch.float32, device=device)

    # generate anchor mask
    # slow with high resolution. recommend disable this forever.
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    coors = coordinates
    dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
        coors, tuple(grid_size[::-1][1:]))
    dense_voxel_map = dense_voxel_map.cumsum(0)
    dense_voxel_map = dense_voxel_map.cumsum(1)
    anchors_area = box_np_ops.fused_get_anchors_area(dense_voxel_map,
                                                     anchors_bv, voxel_size,
                                                     pc_range, grid_size)
    anchors_mask = anchors_area > 1
    anchors_mask = torch.tensor(anchors_mask, dtype=torch.uint8, device=device)
    # example['anchors_mask'] = anchors_mask.astype(np.uint8)

    example = {
        "anchors": anchors,
        "voxels": voxels,
        "num_points": num_points,
        "coordinates": coords,
        'anchors_mask': anchors_mask,
    }

    return example
示例#5
0
    def __init__(self, config_filepath, weight_filepath):
        # ======================================================
        # Read Config file
        # ======================================================
        self.config = pipeline_pb2.TrainEvalPipelineConfig()
        with open(config_filepath, "r") as f:
            proto_str = f.read()
            text_format.Merge(proto_str, self.config)
        self.input_cfg = self.config.eval_input_reader
        self.model_cfg = self.config.model.second
        # config_tool.change_detection_range_v2(self.model_cfg, [-50, -50, 50, 50])

        # ======================================================
        # Build Network, Target Assigner and Voxel Generator
        # ======================================================
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.net = build_network(self.model_cfg).to(self.device).eval()
        self.net.load_state_dict(torch.load(weight_filepath))

        self.target_assigner = self.net.target_assigner
        self.voxel_generator = self.net.voxel_generator

        # ======================================================
        # Generate Anchors
        # ======================================================
        grid_size = self.voxel_generator.grid_size
        print("========= grid_size")
        print(grid_size)
        print("========= voxel_size")
        print(self.voxel_generator.voxel_size)
        print("========= point_cloud_range")
        print(self.voxel_generator.point_cloud_range)
        feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(
            self.model_cfg)
        feature_map_size = [*feature_map_size, 1][::-1]
        print("========= feature_map_size")
        print(feature_map_size)

        self.anchors = self.target_assigner.generate_anchors(
            feature_map_size)["anchors"]
        self.anchors = torch.tensor(self.anchors,
                                    dtype=torch.float32,
                                    device=self.device)
        self.anchors = self.anchors.view(1, -1, 7)
        print("========= anchors.shape")
        print(self.anchors.shape)
示例#6
0
def build(input_reader_config,
          model_config,
          training,
          voxel_generator,
          target_assigner,
          multi_gpu=False):
    """Builds a tensor dictionary based on the InputReader config.

    Args:
        input_reader_config: A input_reader_pb2.InputReader object.

    Returns:
        A tensor dict based on the input_reader_config.

    Raises:
        ValueError: On invalid input reader proto.
        ValueError: If no input paths are specified.
    """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')
    prep_cfg = input_reader_config.preprocess
    dataset_cfg = input_reader_config.dataset
    num_point_features = model_config.num_point_features
    out_size_factor = get_downsample_factor(model_config)
    assert out_size_factor > 0
    cfg = input_reader_config
    db_sampler_cfg = prep_cfg.database_sampler
    db_sampler = None
    if len(db_sampler_cfg.sample_groups) > 0:  # enable sample
        db_sampler = dbsampler_builder.build(db_sampler_cfg)
    grid_size = voxel_generator.grid_size
    # [352, 400]
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    print("feature_map_size", feature_map_size)
    assert all([n != '' for n in target_assigner.classes
                ]), "you must specify class_name in anchor_generators."
    dataset_cls = get_dataset_class(dataset_cfg.dataset_class_name)
    assert dataset_cls.NumPointFeatures >= 3, "you must set this to correct value"
    assert dataset_cls.NumPointFeatures == num_point_features, "currently you need keep them same"
    prep_func = partial(
        prep_pointcloud,
        root_path=dataset_cfg.kitti_root_path,
        voxel_generator=voxel_generator,
        target_assigner=target_assigner,
        training=training,
        max_voxels=prep_cfg.max_number_of_voxels,
        remove_outside_points=False,
        remove_unknown=prep_cfg.remove_unknown_examples,
        create_targets=training,
        shuffle_points=prep_cfg.shuffle_points,
        gt_rotation_noise=list(prep_cfg.groundtruth_rotation_uniform_noise),
        gt_loc_noise_std=list(prep_cfg.groundtruth_localization_noise_std),
        global_rotation_noise=list(prep_cfg.global_rotation_uniform_noise),
        global_scaling_noise=list(prep_cfg.global_scaling_uniform_noise),
        global_random_rot_range=list(
            prep_cfg.global_random_rotation_range_per_object),
        global_translate_noise_std=list(prep_cfg.global_translate_noise_std),
        db_sampler=db_sampler,
        num_point_features=dataset_cls.NumPointFeatures,
        anchor_area_threshold=prep_cfg.anchor_area_threshold,
        gt_points_drop=prep_cfg.groundtruth_points_drop_percentage,
        gt_drop_max_keep=prep_cfg.groundtruth_drop_max_keep_points,
        remove_points_after_sample=prep_cfg.remove_points_after_sample,
        remove_environment=prep_cfg.remove_environment,
        use_group_id=prep_cfg.use_group_id,
        out_size_factor=out_size_factor,
        multi_gpu=multi_gpu)

    ret = target_assigner.generate_anchors(feature_map_size)
    class_names = target_assigner.classes
    anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
    anchors = ret["anchors"]
    anchors = anchors.reshape([-1, 7])
    matched_thresholds = ret["matched_thresholds"]
    unmatched_thresholds = ret["unmatched_thresholds"]
    anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:, [0, 1, 3, 4, 6]])
    anchor_cache = {
        "anchors": anchors,
        "anchors_bv": anchors_bv,
        "matched_thresholds": matched_thresholds,
        "unmatched_thresholds": unmatched_thresholds,
        "anchors_dict": anchors_dict,
    }
    prep_func = partial(prep_func, anchor_cache=anchor_cache)

    dataset = dataset_cls(info_path=dataset_cfg.kitti_info_path,
                          root_path=dataset_cfg.kitti_root_path,
                          class_names=class_names,
                          prep_func=prep_func)

    return dataset
示例#7
0
def build(input_reader_config,
          model_config,
          training,
          voxel_generator,
          target_assigner=None):
    """Builds a tensor dictionary based on the InputReader config.

    Args:
        input_reader_config: A input_reader_pb2.InputReader object.

    Returns:
        A tensor dict based on the input_reader_config.

    Raises:
        ValueError: On invalid input reader proto.
        ValueError: If no input paths are specified.
    """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')
    generate_bev = model_config.use_bev
    without_reflectivity = model_config.without_reflectivity
    num_point_features = model_config.num_point_features
    downsample_factor = config_tool.get_downsample_factor(model_config)
    cfg = input_reader_config
    db_sampler_cfg = input_reader_config.database_sampler
    db_sampler = None
    if len(db_sampler_cfg.sample_groups) > 0:  # enable sample
        db_sampler = dbsampler_builder.build(db_sampler_cfg)
    u_db_sampler_cfg = input_reader_config.unlabeled_database_sampler
    u_db_sampler = None
    if len(u_db_sampler_cfg.sample_groups) > 0:  # enable sample
        u_db_sampler = dbsampler_builder.build(u_db_sampler_cfg)
    grid_size = voxel_generator.grid_size
    # [352, 400]
    feature_map_size = grid_size[:2] // downsample_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    print("feature_map_size", feature_map_size)
    assert all([n != '' for n in target_assigner.classes
                ]), "you must specify class_name in anchor_generators."
    prep_func = partial(
        prep_pointcloud,
        root_path=cfg.kitti_root_path,
        class_names=target_assigner.classes,
        voxel_generator=voxel_generator,
        target_assigner=target_assigner,
        training=training,
        max_voxels=cfg.max_number_of_voxels,
        remove_outside_points=False,
        remove_unknown=cfg.remove_unknown_examples,
        create_targets=training,
        shuffle_points=cfg.shuffle_points,
        gt_rotation_noise=list(cfg.groundtruth_rotation_uniform_noise),
        gt_loc_noise_std=list(cfg.groundtruth_localization_noise_std),
        global_rotation_noise=list(cfg.global_rotation_uniform_noise),
        global_scaling_noise=list(cfg.global_scaling_uniform_noise),
        global_random_rot_range=list(
            cfg.global_random_rotation_range_per_object),
        db_sampler=db_sampler,
        unlabeled_db_sampler=u_db_sampler,
        generate_bev=generate_bev,
        without_reflectivity=without_reflectivity,
        num_point_features=num_point_features,
        anchor_area_threshold=cfg.anchor_area_threshold,
        gt_points_drop=cfg.groundtruth_points_drop_percentage,
        gt_drop_max_keep=cfg.groundtruth_drop_max_keep_points,
        remove_points_after_sample=cfg.remove_points_after_sample,
        remove_environment=cfg.remove_environment,
        use_group_id=cfg.use_group_id,
        downsample_factor=downsample_factor)
    dataset = KittiDataset(info_path=cfg.kitti_info_path,
                           root_path=cfg.kitti_root_path,
                           num_point_features=num_point_features,
                           target_assigner=target_assigner,
                           feature_map_size=feature_map_size,
                           prep_func=prep_func)

    return dataset
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # device = torch.device("cpu")
    t1 = time.time()

    # Build Network, Target Assigner and Voxel Generator
    # ckpt_path = "/home/ogailab/autoware.ai.10.0/src/autoware/core_perception/lidar_second/70m_cb_all/voxelnet-148480.tckpt"
    ckpt_path = "/home/ogailab/tiatia/codes/TALite/model3.0/nuscene/all/fhd.rpnv2/voxelnet-140670.tckpt"
    net = build_network(model_cfg).to(device).eval()
    net.load_state_dict(torch.load(ckpt_path))
    target_assigner = net.target_assigner
    voxel_generator = net.voxel_generator
    t2 = time.time()

    # Generate Anchors
    grid_size = voxel_generator.grid_size
    feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(
        model_cfg)
    feature_map_size = [*feature_map_size, 1][::-1]

    anchors = target_assigner.generate_anchors(feature_map_size)["anchors"]
    anchors = torch.tensor(anchors, dtype=torch.float32, device=device)
    anchors = anchors.view(1, -1, 7)
    t3 = time.time()

    if measure_time:
        print(f" Read Config file time = {(t1-t0)* 1000:.3f} ms")
        print(
            f" Build Network, Target Assigner and Voxel Generator time = {(t2-t1)* 1000:.3f} ms"
        )
        print(f" Generate Anchors time = {(t3-t2)* 1000:.3f} ms")

    rospy.init_node('SECOND_network_pub_example')
示例#9
0
def build(
        input_reader_config,
        model_config,
        training,
        voxel_generator,
        target_assigner,
        multi_gpu=False,
        generate_anchors_cachae=True,  #True for pillar and second
        segmentation=False,
        bcl_keep_voxels=None,
        seg_keep_points=None,
        points_per_voxel=None):
    """Builds a tensor dictionary based on the InputReader config.

    Args:
        input_reader_config: A input_reader_pb2.InputReader object.

    Returns:
        A tensor dict based on the input_reader_config.

    Raises:
        ValueError: On invalid input reader proto.
        ValueError: If no input paths are specified.
    """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')
    prep_cfg = input_reader_config.preprocess
    dataset_cfg = input_reader_config.dataset
    num_point_features = model_config.num_point_features
    out_size_factor = get_downsample_factor(model_config)
    assert out_size_factor > 0
    cfg = input_reader_config
    db_sampler_cfg = prep_cfg.database_sampler
    db_sampler = None
    if len(db_sampler_cfg.sample_groups
           ) > 0 or db_sampler_cfg.database_info_path != "":  # enable sample
        db_sampler = dbsampler_builder.build(db_sampler_cfg)
    grid_size = voxel_generator.grid_size
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    print("feature_map_size", feature_map_size)
    assert all([n != '' for n in target_assigner.classes
                ]), "you must specify class_name in anchor_generators."
    dataset_cls = get_dataset_class(dataset_cfg.dataset_class_name)
    assert dataset_cls.NumPointFeatures >= 3, "you must set this to correct value"
    assert dataset_cls.NumPointFeatures == num_point_features, "currently you need keep them same"
    prep_func = partial(
        prep_pointcloud,
        root_path=dataset_cfg.kitti_root_path,
        voxel_generator=voxel_generator,
        target_assigner=target_assigner,
        training=training,
        max_voxels=prep_cfg.max_number_of_voxels,
        remove_outside_points=False,
        remove_unknown=prep_cfg.remove_unknown_examples,
        create_targets=training,
        shuffle_points=prep_cfg.shuffle_points,
        gt_rotation_noise=list(prep_cfg.groundtruth_rotation_uniform_noise),
        gt_loc_noise_std=list(prep_cfg.groundtruth_localization_noise_std),
        global_rotation_noise=list(prep_cfg.global_rotation_uniform_noise),
        global_scaling_noise=list(prep_cfg.global_scaling_uniform_noise),
        global_random_rot_range=list(
            prep_cfg.global_random_rotation_range_per_object),
        global_translate_noise_std=list(prep_cfg.global_translate_noise_std),
        db_sampler=db_sampler,
        num_point_features=dataset_cls.NumPointFeatures,
        anchor_area_threshold=prep_cfg.anchor_area_threshold,
        gt_points_drop=prep_cfg.groundtruth_points_drop_percentage,
        gt_drop_max_keep=prep_cfg.groundtruth_drop_max_keep_points,
        remove_points_after_sample=prep_cfg.remove_points_after_sample,
        remove_environment=prep_cfg.remove_environment,
        use_group_id=prep_cfg.use_group_id,
        out_size_factor=out_size_factor,
        multi_gpu=multi_gpu,
        min_points_in_gt=prep_cfg.min_num_of_points_in_gt,
        random_flip_x=prep_cfg.random_flip_x,
        random_flip_y=prep_cfg.random_flip_y,
        sample_importance=prep_cfg.sample_importance)
    """#leo add condition for catch is understand plz delete the comment"""
    anchor_cache = None
    class_names = target_assigner.classes

    if generate_anchors_cachae:
        ret = target_assigner.generate_anchors(
            feature_map_size)  #use bcl as voxel comment this line
        anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
        anchors_list = []
        for k, v in anchors_dict.items():
            anchors_list.append(v["anchors"])

        # anchor_cache = None #if wants generate anchors from voxels

        anchors = np.concatenate(anchors_list, axis=0)
        anchors = anchors.reshape([-1, target_assigner.box_ndim])
        assert np.allclose(
            anchors, ret["anchors"].reshape(-1, target_assigner.box_ndim))
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:,
                                                             [0, 1, 3, 4, 6]])
        anchor_cache = {
            "anchors": anchors,
            "anchors_bv": anchors_bv,
            "matched_thresholds": matched_thresholds,
            "unmatched_thresholds": unmatched_thresholds,
            "anchors_dict": anchors_dict,
        }

    prep_func = partial(prep_func,
                        anchor_cache=anchor_cache,
                        segmentation=segmentation,
                        bcl_keep_voxels=bcl_keep_voxels,
                        seg_keep_points=seg_keep_points,
                        points_per_voxel=points_per_voxel)

    dataset = dataset_cls(info_path=dataset_cfg.kitti_info_path,
                          root_path=dataset_cfg.kitti_root_path,
                          class_names=class_names,
                          prep_func=prep_func)

    return dataset
def build(input_reader_config,
          model_config,
          training,
          voxel_generator,
          target_assigner,
          multi_gpu=False):
    """Builds a tensor dictionary based on the InputReader config.

    Args:
        input_reader_config: A input_reader_pb2.InputReader object.

    Returns:
        A tensor dict based on the input_reader_config.

    Raises:
        ValueError: On invalid input reader proto.
        ValueError: If no input paths are specified.
    """
    if not isinstance(input_reader_config, input_reader_pb2.InputReader):
        raise ValueError('input_reader_config not of type '
                         'input_reader_pb2.InputReader.')
    prep_cfg = input_reader_config.preprocess
    dataset_cfg = input_reader_config.dataset  #kitti_info_path: "/home/lichao/v1.0-mini/infos_train.pkl"
    # kitti_root_path: "/home/lichao/v1.0-mini"
    # dataset_class_name: "NuScenesDataset"
    num_point_features = model_config.num_point_features  #4
    out_size_factor = get_downsample_factor(model_config)  #8
    assert out_size_factor > 0
    cfg = input_reader_config
    db_sampler_cfg = prep_cfg.database_sampler  #database_info_path: "/home/lichao/v1.0-mini/kitti_dbinfos_train.pkl"
    # sample_groups {   name_to_max_num     key: "car"     value: 30   } } global_random_rotation_range_per_object: 0.0# global_random_rotation_range_per_object: 0.0# rate: 1.0
    db_sampler = None
    if len(db_sampler_cfg.sample_groups
           ) > 0 or db_sampler_cfg.database_info_path != "":  # enable sample
        db_sampler = dbsampler_builder.build(db_sampler_cfg)  #加载了gt_base的一些东西
    grid_size = voxel_generator.grid_size  #[400,400]
    feature_map_size = grid_size[:2] // out_size_factor  #[50,50]
    feature_map_size = [*feature_map_size, 1][::-1]  #[50,50]
    print("feature_map_size", feature_map_size)
    assert all([n != '' for n in target_assigner.classes
                ]), "you must specify class_name in anchor_generators."
    dataset_cls = get_dataset_class(
        dataset_cfg.dataset_class_name)  # NuScenesDataset
    assert dataset_cls.NumPointFeatures >= 3, "you must set this to correct value"
    assert dataset_cls.NumPointFeatures == num_point_features, "currently you need keep them same"
    prep_func = partial(  #pre_func partial 的功能:固定函数参数,返回一个新的函数。
        prep_pointcloud,  #data\preprocess.py
        root_path=dataset_cfg.kitti_root_path,
        voxel_generator=voxel_generator,  #VoxelGeneratorV2
        target_assigner=target_assigner,
        training=training,
        max_voxels=prep_cfg.max_number_of_voxels,  #25000    eval 30000
        remove_outside_points=False,
        remove_unknown=prep_cfg.remove_unknown_examples,
        create_targets=training,
        shuffle_points=prep_cfg.shuffle_points,
        gt_rotation_noise=list(prep_cfg.groundtruth_rotation_uniform_noise),
        gt_loc_noise_std=list(prep_cfg.groundtruth_localization_noise_std),
        global_rotation_noise=list(prep_cfg.global_rotation_uniform_noise),
        global_scaling_noise=list(prep_cfg.global_scaling_uniform_noise),
        global_random_rot_range=list(
            prep_cfg.global_random_rotation_range_per_object),
        global_translate_noise_std=list(prep_cfg.global_translate_noise_std),
        db_sampler=db_sampler,
        num_point_features=dataset_cls.NumPointFeatures,
        anchor_area_threshold=prep_cfg.anchor_area_threshold,
        gt_points_drop=prep_cfg.groundtruth_points_drop_percentage,
        gt_drop_max_keep=prep_cfg.groundtruth_drop_max_keep_points,
        remove_points_after_sample=prep_cfg.remove_points_after_sample,
        remove_environment=prep_cfg.remove_environment,
        use_group_id=prep_cfg.use_group_id,
        out_size_factor=out_size_factor,  #8
        multi_gpu=multi_gpu,
        min_points_in_gt=prep_cfg.min_num_of_points_in_gt,
        random_flip_x=prep_cfg.random_flip_x,
        random_flip_y=prep_cfg.random_flip_y,
        sample_importance=prep_cfg.sample_importance)

    ret = target_assigner.generate_anchors(feature_map_size)
    class_names = target_assigner.classes  # ['car', 'bicycle', 'bus', 'construction_vehicle', 'motorcycle', 'pedestrian', 'traffic_cone', 'trailer', 'truck', 'barrier']
    anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
    anchors_list = []
    for k, v in anchors_dict.items():
        anchors_list.append(v["anchors"])  #50000个

    # anchors = ret["anchors"]
    anchors = np.concatenate(anchors_list, axis=0)
    anchors = anchors.reshape([-1, target_assigner.box_ndim])
    assert np.allclose(anchors,
                       ret["anchors"].reshape(-1, target_assigner.box_ndim))
    matched_thresholds = ret["matched_thresholds"]
    unmatched_thresholds = ret["unmatched_thresholds"]
    anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
        anchors[:, [0, 1, 3, 4, 6]])  #bev_anchor 4维
    anchor_cache = {
        "anchors": anchors,
        "anchors_bv": anchors_bv,
        "matched_thresholds": matched_thresholds,
        "unmatched_thresholds": unmatched_thresholds,
        "anchors_dict": anchors_dict,
    }
    prep_func = partial(prep_func, anchor_cache=anchor_cache)
    dataset = dataset_cls(
        info_path=dataset_cfg.kitti_info_path,  #数据的路径
        root_path=dataset_cfg.kitti_root_path,
        class_names=class_names,  #10个类
        prep_func=prep_func)

    return dataset  # _nusc_infos <class 'dict'>: {'lidar_path': '/home/lichao/v1.0-mini/samples/LIDAR_TOP/n015-2018-07-24-11-22-45+0800__LIDAR_TOP__1532402927647951.pcd.bin', 'cam_front_path': '/home/lichao/v1.0-mini/samples/CAM_FRONT/n015-2018-07-24-11-22-45+0800__CAM_FRONT__1532402927612460.jpg', 'token': 'ca9a282c9e77460f8360f564131a8af5', 'sweeps': [], 'lidar2ego_translation': [0.943713, 0.0, 1.84023], 'lidar2ego_rotation': [0.7077955119163518, -0.006492242056004365, 0.010646214713995808, -0.7063073142877817], 'ego2global_translation': [411.3039349319818, 1180.8903791765097, 0.0], 'ego2global_rotation': [0.5720320396729045, -0.0016977771610471074, 0.011798001930183783, -0.8201446642457809], 'timestamp': 1532402927647951, 'gt_boxes': array([[ 1.84143850e+01,  5.95160251e+01,  7.69634574e-01,