示例#1
0
    def predcit(self, pointclouds):
        t0 = time.time()
        #滤处雷达附近的噪点
        mask = np.sqrt(np.square(pointclouds[:, :2]).sum(axis=1)) > 5
        pointclouds = pointclouds[mask]
        ret = self.voxel_generator.generate(pointclouds, max_voxels=12000)
        voxels = ret['voxels']
        coords = ret['coordinates']
        num_points = ret['num_points_per_voxel']
        # print('befor filter voxels shape is ',voxels.shape)
        if self.bg_filter is not None:
            voxels_mask = self.bg_filter.filter_bg(voxels, num_points, coords)
            voxels = voxels[voxels_mask]
            coords = coords[voxels_mask]
            num_points = num_points[voxels_mask]
            # print("after filter voxels shape is ", voxels.shape)
        voxels, num_points, coords, voxel_mask = voxel_padding(
            voxels, num_points, coords, max_voxel_num=self.max_voxel_num)
        # print('after padding voxels shape is ', voxels.shape)

        # mask_points = np.logical_not(np.all(voxels.reshape(-1, 4) == 0, axis=1))
        # points = voxels.reshape(-1, 4)[mask_points]

        example = {
            "anchors": self.anchors,
            "voxels": voxels[np.newaxis, ...],
            "num_points": num_points[np.newaxis, ...],
            "coordinates": coords[np.newaxis, ...],
            'voxel_mask': voxel_mask[np.newaxis, ...],
            "metadata": [{
                "image_idx": '000000'
            }]
        }
        if self.anchors_area >= 0:
            # 计算每个grid map坐标位置是否有pillars(非空)
            dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
                coords, tuple(self.grid_size[::-1][1:]))
            dense_voxel_map = dense_voxel_map.cumsum(0)
            dense_voxel_map = dense_voxel_map.cumsum(1)
            # 计算每个anchor_bev占有的非空的pillars
            anchors_area = box_np_ops.fused_get_anchors_area(
                dense_voxel_map, self.anchors_bv, self.voxel_size,
                self.pc_range, self.grid_size)
            anchors_mask = anchors_area >= self.anchors_area
            if anchors_mask.sum() < 1:
                anchors_mask = np.zeros(anchors_area.shape[0], dtype=np.bool)
                print("anchors_mask is zero")
            example['anchors_mask'] = anchors_mask

        example_list = example_to_tensorlist(example,
                                             device=self.device,
                                             float_type=self.float_dtype)

        with torch.no_grad():
            boxes, scores = self.net(*example_list)[0][:2]
            boxes = boxes.detach().cpu().numpy()
            scores = scores.detach().cpu().numpy()
            print("current frame process time is {:.3f}ms".format(
                (time.time() - t0) * 1000))
        return boxes, scores
示例#2
0
def generate_example(net, model_cfg, points, device):

    target_assigner = net.target_assigner
    voxel_generator = net.voxel_generator

    grid_size = voxel_generator.grid_size
    feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(
        model_cfg)
    feature_map_size = [*feature_map_size, 1][::-1]

    anchors_np = target_assigner.generate_anchors(feature_map_size)["anchors"]

    anchors = torch.tensor(anchors_np, dtype=torch.float32, device=device)
    anchors = anchors.view(1, -1, 7)
    anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors_np[:,
                                                            [0, 1, 3, 4, 6]])

    res = voxel_generator.generate(points, max_voxels=12000)
    voxels = res["voxels"]
    coordinates = res["coordinates"]
    num_points = res["num_points_per_voxel"]

    # add batch idx to coords
    coords = np.pad(coordinates, ((0, 0), (1, 0)),
                    mode='constant',
                    constant_values=0)
    voxels = torch.tensor(voxels, dtype=torch.float32, device=device)
    coords = torch.tensor(coords, dtype=torch.float32, device=device)
    num_points = torch.tensor(num_points, dtype=torch.float32, device=device)

    # generate anchor mask
    # slow with high resolution. recommend disable this forever.
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    coors = coordinates
    dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
        coors, tuple(grid_size[::-1][1:]))
    dense_voxel_map = dense_voxel_map.cumsum(0)
    dense_voxel_map = dense_voxel_map.cumsum(1)
    anchors_area = box_np_ops.fused_get_anchors_area(dense_voxel_map,
                                                     anchors_bv, voxel_size,
                                                     pc_range, grid_size)
    anchors_mask = anchors_area > 1
    anchors_mask = torch.tensor(anchors_mask, dtype=torch.uint8, device=device)
    # example['anchors_mask'] = anchors_mask.astype(np.uint8)

    example = {
        "anchors": anchors,
        "voxels": voxels,
        "num_points": num_points,
        "coordinates": coords,
        'anchors_mask': anchors_mask,
    }

    return example
示例#3
0
 def generate_anchor_mask(self, anchors, anchor_area_threshold=-1):
     anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:,
                                                          [0, 1, 3, 4, 6]])
     anchors_mask = None
     if anchor_area_threshold >= 0:
         # slow with high resolution. recommend disable this forever.
         coors = coordinates
         dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
             coors, tuple(grid_size[::-1][1:]))
         dense_voxel_map = dense_voxel_map.cumsum(0)
         dense_voxel_map = dense_voxel_map.cumsum(1)
         anchors_area = box_np_ops.fused_get_anchors_area(
             dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
         anchors_mask = anchors_area > anchor_area_threshold
         # example['anchors_mask'] = anchors_mask.astype(np.uint8)
     return anchors_mask
示例#4
0
 def __getitem__(self, idx):
     pc_file = self.pc_files[idx]
     points = self.read_bin_points(pc_file)
     metrics = {}
     t1 = time.time()
     # [0, -40, -3, 70.4, 40, 1]
     voxel_size = self.voxel_generator.voxel_size
     pc_range = self.voxel_generator.point_cloud_range
     grid_size = self.voxel_generator.grid_size
     # [352, 400]
     res = self.voxel_generator.generate(points, self.max_voxels)
     voxels = res["voxels"]
     coordinates = res["coordinates"]
     num_points = res["num_points_per_voxel"]
     num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
     metrics["voxel_gene_time"] = time.time() - t1
     anchors = self.anchor_cache["anchors"]
     anchors_bv = self.anchor_cache["anchors_bv"]
     # anchors_dict = self.anchor_cache["anchors_dict"]
     # matched_thresholds = self.anchor_cache["matched_thresholds"]
     # unmatched_thresholds = self.anchor_cache["unmatched_thresholds"]
     example = {
         'pc_file': pc_file,
         'voxels': voxels,
         'num_points': num_points,
         'coordinates': coordinates,
         "num_voxels": num_voxels,
         "metrics": metrics,
         "anchors": anchors
     }
     if self.anchor_area_threshold >= 0:
         # slow with high resolution. recommend disable this forever.
         coors = coordinates
         dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
             coors, tuple(grid_size[::-1][1:]))
         dense_voxel_map = dense_voxel_map.cumsum(0)
         dense_voxel_map = dense_voxel_map.cumsum(1)
         anchors_area = box_np_ops.fused_get_anchors_area(
             dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
         anchors_mask = anchors_area > self.anchor_area_threshold
         # example['anchors_mask'] = anchors_mask.astype(np.uint8)
         example['anchors_mask'] = anchors_mask
     return example
示例#5
0
def prep_pointcloud(input_dict,
                    root_path,
                    voxel_generator,
                    target_assigner,
                    db_sampler=None,
                    max_voxels=20000,
                    remove_outside_points=False,
                    training=True,
                    create_targets=True,
                    shuffle_points=False,
                    remove_unknown=False,
                    gt_rotation_noise=(-np.pi / 3, np.pi / 3),
                    gt_loc_noise_std=(1.0, 1.0, 1.0),
                    global_rotation_noise=(-np.pi / 4, np.pi / 4),
                    global_scaling_noise=(0.95, 1.05),
                    global_random_rot_range=(0.78, 2.35),
                    global_translate_noise_std=(0, 0, 0),
                    num_point_features=4,
                    anchor_area_threshold=1,
                    gt_points_drop=0.0,
                    gt_drop_max_keep=10,
                    remove_points_after_sample=True,
                    anchor_cache=None,
                    remove_environment=False,
                    random_crop=False,
                    reference_detections=None,
                    out_size_factor=2,
                    use_group_id=False,
                    multi_gpu=False,
                    min_points_in_gt=-1,
                    random_flip_x=True,
                    random_flip_y=True,
                    sample_importance=1.0,
                    out_dtype=np.float32):
    """convert point cloud to voxels, create targets if ground truths 
    exists.

    input_dict format: dataset.get_sensor_data format

    """
    t = time.time()
    class_names = target_assigner.classes
    points = input_dict["lidar"]["points"]
    if training:
        anno_dict = input_dict["lidar"]["annotations"]
        gt_dict = {
            "gt_boxes": anno_dict["boxes"],
            "gt_names": anno_dict["names"],
            "gt_importance": np.ones([anno_dict["boxes"].shape[0]], dtype=anno_dict["boxes"].dtype),
        }
        if "difficulty" not in anno_dict:
            difficulty = np.zeros([anno_dict["boxes"].shape[0]],
                                  dtype=np.int32)
            gt_dict["difficulty"] = difficulty
        else:
            gt_dict["difficulty"] = anno_dict["difficulty"]
        if use_group_id and "group_ids" in anno_dict:
            group_ids = anno_dict["group_ids"]
            gt_dict["group_ids"] = group_ids
    calib = None
    if "calib" in input_dict:
        calib = input_dict["calib"]

    if reference_detections is not None:
        assert calib is not None and "image" in input_dict
        C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
        frustums = box_np_ops.get_frustum_v2(reference_detections, C)
        frustums -= T
        frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
        frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
        surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
        masks = points_in_convex_polygon_3d_jit(points, surfaces)
        points = points[masks.any(-1)]

    if remove_outside_points:
        assert calib is not None
        image_shape = input_dict["image"]["image_shape"]
        points = box_np_ops.remove_outside_points(
            points, calib["rect"], calib["Trv2c"], calib["P2"], image_shape)
    if remove_environment is True and training:
        selected = kitti.keep_arrays_by_name(gt_names, target_assigner.classes)
        _dict_select(gt_dict, selected)
        masks = box_np_ops.points_in_rbbox(points, gt_dict["gt_boxes"])
        points = points[masks.any(-1)]
    metrics = {}

    if training:
        """
        boxes_lidar = gt_dict["gt_boxes"]
        bev_map = simplevis.nuscene_vis(points, boxes_lidar)
        cv2.imshow('pre-noise', bev_map)
        """
        selected = kitti.drop_arrays_by_name(gt_dict["gt_names"], ["DontCare"])
        _dict_select(gt_dict, selected)
        if remove_unknown:
            remove_mask = gt_dict["difficulty"] == -1
            """
            gt_boxes_remove = gt_boxes[remove_mask]
            gt_boxes_remove[:, 3:6] += 0.25
            points = prep.remove_points_in_boxes(points, gt_boxes_remove)
            """
            keep_mask = np.logical_not(remove_mask)
            _dict_select(gt_dict, keep_mask)
        gt_dict.pop("difficulty")
        if min_points_in_gt > 0:
            # points_count_rbbox takes 10ms with 10 sweeps nuscenes data
            point_counts = box_np_ops.points_count_rbbox(points, gt_dict["gt_boxes"])
            mask = point_counts >= min_points_in_gt
            _dict_select(gt_dict, mask)
        gt_boxes_mask = np.array(
            [n in class_names for n in gt_dict["gt_names"]], dtype=np.bool_)
        if db_sampler is not None:
            group_ids = None
            if "group_ids" in gt_dict:
                group_ids = gt_dict["group_ids"]

            sampled_dict = db_sampler.sample_all(
                root_path,
                gt_dict["gt_boxes"],
                gt_dict["gt_names"],
                num_point_features,
                random_crop,
                gt_group_ids=group_ids,
                calib=calib)

            if sampled_dict is not None:
                sampled_gt_names = sampled_dict["gt_names"]
                sampled_gt_boxes = sampled_dict["gt_boxes"]
                sampled_points = sampled_dict["points"]
                sampled_gt_masks = sampled_dict["gt_masks"]
                gt_dict["gt_names"] = np.concatenate(
                    [gt_dict["gt_names"], sampled_gt_names], axis=0)
                gt_dict["gt_boxes"] = np.concatenate(
                    [gt_dict["gt_boxes"], sampled_gt_boxes])
                gt_boxes_mask = np.concatenate(
                    [gt_boxes_mask, sampled_gt_masks], axis=0)
                sampled_gt_importance = np.full([sampled_gt_boxes.shape[0]], sample_importance,
                                                dtype=sampled_gt_boxes.dtype)
                gt_dict["gt_importance"] = np.concatenate(
                    [gt_dict["gt_importance"], sampled_gt_importance])

                if group_ids is not None:
                    sampled_group_ids = sampled_dict["group_ids"]
                    gt_dict["group_ids"] = np.concatenate(
                        [gt_dict["group_ids"], sampled_group_ids])

                if remove_points_after_sample:
                    masks = box_np_ops.points_in_rbbox(points,
                                                       sampled_gt_boxes)
                    points = points[np.logical_not(masks.any(-1))]

                points = np.concatenate([sampled_points, points], axis=0)
        pc_range = voxel_generator.point_cloud_range
        group_ids = None
        if "group_ids" in gt_dict:
            group_ids = gt_dict["group_ids"]

        prep.noise_per_object_v3_(
            gt_dict["gt_boxes"],
            points,
            gt_boxes_mask,
            rotation_perturb=gt_rotation_noise,
            center_noise_std=gt_loc_noise_std,
            global_random_rot_range=global_random_rot_range,
            group_ids=group_ids,
            num_try=100)

        # should remove unrelated objects after noise per object
        # for k, v in gt_dict.items():
        #     print(k, v.shape)
        _dict_select(gt_dict, gt_boxes_mask)
        gt_classes = np.array(
            [class_names.index(n) + 1 for n in gt_dict["gt_names"]],
            dtype=np.int32)
        gt_dict["gt_classes"] = gt_classes
        gt_dict["gt_boxes"], points = prep.random_flip(gt_dict["gt_boxes"],
                                                       points, 0.5, random_flip_x, random_flip_y)
        gt_dict["gt_boxes"], points = prep.global_rotation_v2(
            gt_dict["gt_boxes"], points, *global_rotation_noise)
        gt_dict["gt_boxes"], points = prep.global_scaling_v2(
            gt_dict["gt_boxes"], points, *global_scaling_noise)
        prep.global_translate_(gt_dict["gt_boxes"], points, global_translate_noise_std)
        bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
        mask = prep.filter_gt_box_outside_range_by_center(gt_dict["gt_boxes"], bv_range)
        _dict_select(gt_dict, mask)

        # limit rad to [-pi, pi]
        gt_dict["gt_boxes"][:, 6] = box_np_ops.limit_period(
            gt_dict["gt_boxes"][:, 6], offset=0.5, period=2 * np.pi)

        # boxes_lidar = gt_dict["gt_boxes"]
        # bev_map = simplevis.nuscene_vis(points, boxes_lidar)
        # cv2.imshow('post-noise', bev_map)
        # cv2.waitKey(0)
    if shuffle_points:
        # shuffle is a little slow.
        np.random.shuffle(points)

    # [0, -40, -3, 70.4, 40, 1]
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    grid_size = voxel_generator.grid_size
    # [352, 400]
    t1 = time.time()
    if not multi_gpu:
        res = voxel_generator.generate(
            points, max_voxels)
        voxels = res["voxels"]
        coordinates = res["coordinates"]
        num_points = res["num_points_per_voxel"]
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
    else:
        res = voxel_generator.generate_multi_gpu(
            points, max_voxels)
        voxels = res["voxels"]
        coordinates = res["coordinates"]
        num_points = res["num_points_per_voxel"]
        num_voxels = np.array([res["voxel_num"]], dtype=np.int64)
    metrics["voxel_gene_time"] = time.time() - t1
    example = {
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": num_voxels,
        "metrics": metrics,
    }
    if calib is not None:
        example["calib"] = calib
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    if anchor_cache is not None:
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        anchors_dict = anchor_cache["anchors_dict"]
        matched_thresholds = anchor_cache["matched_thresholds"]
        unmatched_thresholds = anchor_cache["unmatched_thresholds"]

    else:
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, target_assigner.box_ndim])
        anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
            anchors[:, [0, 1, 3, 4, 6]])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
    example["anchors"] = anchors
    anchors_mask = None
    if anchor_area_threshold >= 0:
        # slow with high resolution. recommend disable this forever.
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0)
        dense_voxel_map = dense_voxel_map.cumsum(1)
        anchors_area = box_np_ops.fused_get_anchors_area(
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold
        # example['anchors_mask'] = anchors_mask.astype(np.uint8)
        example['anchors_mask'] = anchors_mask
    # print("prep time", time.time() - t)
    metrics["prep_time"] = time.time() - t
    if not training:
        return example
    example["gt_names"] = gt_dict["gt_names"]
    # voxel_labels = box_np_ops.assign_label_to_voxel(gt_boxes, coordinates,
    #                                                 voxel_size, coors_range)
    if create_targets:
        t1 = time.time()
        targets_dict = target_assigner.assign(
            anchors,
            anchors_dict,
            gt_dict["gt_boxes"],
            anchors_mask,
            gt_classes=gt_dict["gt_classes"],
            gt_names=gt_dict["gt_names"],
            matched_thresholds=matched_thresholds,
            unmatched_thresholds=unmatched_thresholds,
            importance=gt_dict["gt_importance"])

        """
        boxes_lidar = gt_dict["gt_boxes"]
        bev_map = simplevis.nuscene_vis(points, boxes_lidar, gt_dict["gt_names"])
        assigned_anchors = anchors[targets_dict['labels'] > 0]
        ignored_anchors = anchors[targets_dict['labels'] == -1]
        bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], ignored_anchors, [128, 128, 128], 2)
        bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], assigned_anchors, [255, 0, 0])
        cv2.imshow('anchors', bev_map)
        cv2.waitKey(0)
        
        boxes_lidar = gt_dict["gt_boxes"]
        pp_map = np.zeros(grid_size[:2], dtype=np.float32)
        voxels_max = np.max(voxels[:, :, 2], axis=1, keepdims=False)
        voxels_min = np.min(voxels[:, :, 2], axis=1, keepdims=False)
        voxels_height = voxels_max - voxels_min
        voxels_height = np.minimum(voxels_height, 4)
        # sns.distplot(voxels_height)
        # plt.show()
        pp_map[coordinates[:, 1], coordinates[:, 2]] = voxels_height / 4
        pp_map = (pp_map * 255).astype(np.uint8)
        pp_map = cv2.cvtColor(pp_map, cv2.COLOR_GRAY2RGB)
        pp_map = simplevis.draw_box_in_bev(pp_map, [-50, -50, 3, 50, 50, 1], boxes_lidar, [128, 0, 128], 1)
        cv2.imshow('heights', pp_map)
        cv2.waitKey(0)
        """
        example.update({
            'labels': targets_dict['labels'],
            'reg_targets': targets_dict['bbox_targets'],
            # 'reg_weights': targets_dict['bbox_outside_weights'],
            'importance': targets_dict['importance'],
        })
    return example
def prep_pointcloud(input_dict,
                    root_path,
                    voxel_generator,
                    target_assigner,
                    db_sampler=None,
                    max_voxels=20000,
                    class_names=['Car'],
                    remove_outside_points=False,
                    training=True,
                    create_targets=True,
                    shuffle_points=False,
                    reduce_valid_area=False,
                    remove_unknown=False,
                    gt_rotation_noise=[-np.pi / 3, np.pi / 3],
                    gt_loc_noise_std=[1.0, 1.0, 1.0],
                    global_rotation_noise=[-np.pi / 4, np.pi / 4],
                    global_scaling_noise=[0.95, 1.05],
                    global_random_rot_range=[0.78, 2.35],
                    generate_bev=False,
                    without_reflectivity=False,
                    num_point_features=4,
                    anchor_area_threshold=1,
                    gt_points_drop=0.0,
                    gt_drop_max_keep=10,
                    remove_points_after_sample=True,
                    anchor_cache=None,
                    remove_environment=False,
                    random_crop=False,
                    reference_detections=None,
                    add_rgb_to_points=False,
                    lidar_input=False,
                    unlabeled_db_sampler=None,
                    out_size_factor=2,
                    min_gt_point_dict=None,
                    bev_only=False,
                    use_group_id=False,
                    out_dtype=np.float32):
    """convert point cloud to voxels, create targets if ground truths 
    exists.
    """
    points = input_dict["points"]
    if training:
        gt_boxes = input_dict["gt_boxes"]
        gt_names = input_dict["gt_names"]
        difficulty = input_dict["difficulty"]
        group_ids = None
        if use_group_id and "group_ids" in input_dict:
            group_ids = input_dict["group_ids"]
    rect = input_dict["rect"]
    Trv2c = input_dict["Trv2c"]
    P2 = input_dict["P2"]
    unlabeled_training = unlabeled_db_sampler is not None
    #image_idx = input_dict["image_idx"]

    if reference_detections is not None:
        C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
        frustums = box_np_ops.get_frustum_v2(reference_detections, C)
        frustums -= T
        # frustums = np.linalg.inv(R) @ frustums.T
        frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
        frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
        surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
        masks = points_in_convex_polygon_3d_jit(points, surfaces)
        points = points[masks.any(-1)]

    if remove_outside_points and not lidar_input:
        image_shape = input_dict["image_shape"]
        points = box_np_ops.remove_outside_points(points, rect, Trv2c, P2,
                                                  image_shape)
    if remove_environment is True and training:
        selected = kitti.keep_arrays_by_name(gt_names, class_names)
        gt_boxes = gt_boxes[selected]
        gt_names = gt_names[selected]
        difficulty = difficulty[selected]
        if group_ids is not None:
            group_ids = group_ids[selected]
        points = prep.remove_points_outside_boxes(points, gt_boxes)
    if training:
        # print(gt_names)
        selected = kitti.drop_arrays_by_name(gt_names, ["DontCare"])
        gt_boxes = gt_boxes[selected]
        gt_names = gt_names[selected]
        difficulty = difficulty[selected]
        if group_ids is not None:
            group_ids = group_ids[selected]

        gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c)
        if remove_unknown:
            remove_mask = difficulty == -1
            """
            gt_boxes_remove = gt_boxes[remove_mask]
            gt_boxes_remove[:, 3:6] += 0.25
            points = prep.remove_points_in_boxes(points, gt_boxes_remove)
            """
            keep_mask = np.logical_not(remove_mask)
            gt_boxes = gt_boxes[keep_mask]
            gt_names = gt_names[keep_mask]
            difficulty = difficulty[keep_mask]
            if group_ids is not None:
                group_ids = group_ids[keep_mask]
        gt_boxes_mask = np.array([n in class_names for n in gt_names],
                                 dtype=np.bool_)
        if db_sampler is not None:
            sampled_dict = db_sampler.sample_all(root_path,
                                                 gt_boxes,
                                                 gt_names,
                                                 num_point_features,
                                                 random_crop,
                                                 gt_group_ids=group_ids,
                                                 rect=rect,
                                                 Trv2c=Trv2c,
                                                 P2=P2)

            if sampled_dict is not None:
                sampled_gt_names = sampled_dict["gt_names"]
                sampled_gt_boxes = sampled_dict["gt_boxes"]
                sampled_points = sampled_dict["points"]
                sampled_gt_masks = sampled_dict["gt_masks"]
                # gt_names = gt_names[gt_boxes_mask].tolist()
                gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
                # gt_names += [s["name"] for s in sampled]
                gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes])
                gt_boxes_mask = np.concatenate(
                    [gt_boxes_mask, sampled_gt_masks], axis=0)
                if group_ids is not None:
                    sampled_group_ids = sampled_dict["group_ids"]
                    group_ids = np.concatenate([group_ids, sampled_group_ids])

                if remove_points_after_sample:
                    points = prep.remove_points_in_boxes(
                        points, sampled_gt_boxes)

                points = np.concatenate([sampled_points, points], axis=0)
        # unlabeled_mask = np.zeros((gt_boxes.shape[0], ), dtype=np.bool_)
        if without_reflectivity:
            used_point_axes = list(range(num_point_features))
            used_point_axes.pop(3)
            points = points[:, used_point_axes]
        pc_range = voxel_generator.point_cloud_range
        if bev_only:  # set z and h to limits
            gt_boxes[:, 2] = pc_range[2]
            gt_boxes[:, 5] = pc_range[5] - pc_range[2]
        prep.noise_per_object_v3_(
            gt_boxes,
            points,
            gt_boxes_mask,
            rotation_perturb=gt_rotation_noise,
            center_noise_std=gt_loc_noise_std,
            global_random_rot_range=global_random_rot_range,
            group_ids=group_ids,
            num_try=100)
        # should remove unrelated objects after noise per object
        gt_boxes = gt_boxes[gt_boxes_mask]
        gt_names = gt_names[gt_boxes_mask]
        if group_ids is not None:
            group_ids = group_ids[gt_boxes_mask]
        gt_classes = np.array([class_names.index(n) + 1 for n in gt_names],
                              dtype=np.int32)

        gt_boxes, points = prep.random_flip(gt_boxes, points)
        gt_boxes, points = prep.global_rotation(gt_boxes,
                                                points,
                                                rotation=global_rotation_noise)
        gt_boxes, points = prep.global_scaling_v2(gt_boxes, points,
                                                  *global_scaling_noise)

        bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
        mask = prep.filter_gt_box_outside_range(gt_boxes, bv_range)
        gt_boxes = gt_boxes[mask]
        gt_classes = gt_classes[mask]
        gt_names = gt_names[mask]
        if group_ids is not None:
            group_ids = group_ids[mask]

        # limit rad to [-pi, pi]
        gt_boxes[:, 6] = box_np_ops.limit_period(gt_boxes[:, 6],
                                                 offset=0.5,
                                                 period=2 * np.pi)

    if shuffle_points:
        # shuffle is a little slow.
        np.random.shuffle(points)

    # [0, -40, -3, 70.4, 40, 1]
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    grid_size = voxel_generator.grid_size
    # [352, 400]

    voxels, coordinates, num_points = voxel_generator.generate(
        points, max_voxels)

    example = {
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": np.array([voxels.shape[0]], dtype=np.int64)
    }
    example.update({
        'rect': rect,
        'Trv2c': Trv2c,
        'P2': P2,
    })
    # if not lidar_input:
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    if anchor_cache is not None:
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        matched_thresholds = anchor_cache["matched_thresholds"]
        unmatched_thresholds = anchor_cache["unmatched_thresholds"]
        anchors_dict = anchor_cache["anchors_dict"]
    else:
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, 7])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
        anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:,
                                                             [0, 1, 3, 4, 6]])
    example["anchors"] = anchors
    # print("debug", anchors.shape, matched_thresholds.shape)
    # anchors_bv = anchors_bv.reshape([-1, 4])
    anchors_mask = None
    if anchor_area_threshold >= 0:
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0)
        dense_voxel_map = dense_voxel_map.cumsum(1)
        anchors_area = box_np_ops.fused_get_anchors_area(
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold
        # example['anchors_mask'] = anchors_mask.astype(np.uint8)
        example['anchors_mask'] = anchors_mask
    if not training:
        return example
    if create_targets:
        targets_dict = target_assigner.assign_v2(anchors_dict,
                                                 gt_boxes,
                                                 anchors_mask,
                                                 gt_classes=gt_classes,
                                                 gt_names=gt_names)
        example.update({
            'labels': targets_dict['labels'],
            'reg_targets': targets_dict['bbox_targets'],
            'reg_weights': targets_dict['bbox_outside_weights'],
        })
    return example
示例#7
0
def prep_pointcloud_ikg(input_dict,
                        root_path,
                        voxel_generator,
                        target_assigner,
                        db_sampler=None,
                        max_voxels=20000,
                        class_names=['Pedestrian'],
                        remove_outside_points=False,
                        training=True,
                        create_targets=True,
                        shuffle_points=False,
                        reduce_valid_area=False,
                        remove_unknown=False,
                        gt_rotation_noise=[-np.pi / 3, np.pi / 3],
                        gt_loc_noise_std=[1.0, 1.0, 1.0],
                        global_rotation_noise=[-np.pi / 4, np.pi / 4],
                        global_scaling_noise=[0.95, 1.05],
                        global_loc_noise_std=(0.2, 0.2, 0.2),
                        global_random_rot_range=[0.78, 2.35],
                        generate_bev=False,
                        without_reflectivity=False,
                        num_point_features=4,
                        anchor_area_threshold=1,
                        gt_points_drop=0.0,
                        gt_drop_max_keep=10,
                        remove_points_after_sample=True,
                        anchor_cache=None,
                        remove_environment=False,
                        random_crop=False,
                        reference_detections=None,
                        add_rgb_to_points=False,
                        lidar_input=False,
                        unlabeled_db_sampler=None,
                        out_size_factor=2,
                        min_gt_point_dict=None,
                        bev_only=False,
                        use_group_id=False,
                        out_dtype=np.float32):
    """convert point cloud to voxels, create targets if ground truths 
    exists.
    """
    points = input_dict["points"]
    if training:
        gt_boxes = input_dict["gt_boxes"]
        gt_names = input_dict["gt_names"]
    image_idx = input_dict["image_idx"]

    if training:
        # print(gt_names)
        selected = kitti.drop_arrays_by_name(gt_names, ["DontCare"])
        gt_boxes = gt_boxes[selected]
        gt_names = gt_names[selected]

        # unlabeled_mask = np.zeros((gt_boxes.shape[0], ), dtype=np.bool_)
        gt_boxes_mask = np.array([n in class_names for n in gt_names],
                                 dtype=np.bool_)

        pc_range = voxel_generator.point_cloud_range

        if bev_only:  # set z and h to limits
            gt_boxes[:, 2] = pc_range[2]
            gt_boxes[:, 5] = pc_range[5] - pc_range[2]

        # should remove unrelated objects after noise per object
        gt_boxes = gt_boxes[gt_boxes_mask]
        gt_names = gt_names[gt_boxes_mask]

        gt_classes = np.array([class_names.index(n) + 1 for n in gt_names],
                              dtype=np.int32)

        # gt_boxes, points = prep.random_flip(gt_boxes, points)
        # gt_boxes, points = prep.global_rotation(
        #     gt_boxes, points, rotation=global_rotation_noise)
        # gt_boxes, points = prep.global_scaling_v2(gt_boxes, points,
        #                                           *global_scaling_noise)

        # Global translation
        #gt_boxes, points = prep.global_translate(gt_boxes, points, global_loc_noise_std)

        bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
        mask = prep.filter_gt_box_outside_range(gt_boxes, bv_range)
        gt_boxes = gt_boxes[mask]
        gt_classes = gt_classes[mask]

        # limit rad to [-pi, pi]
        gt_boxes[:, 6] = box_np_ops.limit_period(gt_boxes[:, 6],
                                                 offset=0.5,
                                                 period=2 * np.pi)
    #打乱点的顺序
    # if shuffle_points:
    #     # shuffle is a little slow.
    #     np.random.shuffle(points)

    # [0, -40, -3, 70.4, 40, 1]
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    grid_size = voxel_generator.grid_size
    # [352, 400]

    voxels, coordinates, num_points = voxel_generator.generate(
        points, max_voxels)

    example = {
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": np.array([voxels.shape[0]], dtype=np.int64)
    }

    # if not lidar_input:
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    if anchor_cache is not None:
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        matched_thresholds = anchor_cache["matched_thresholds"]
        unmatched_thresholds = anchor_cache["unmatched_thresholds"]
    else:
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, 7])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:,
                                                             [0, 1, 3, 4, 6]])
    example["anchors"] = anchors
    # print("debug", anchors.shape, matched_thresholds.shape)
    # anchors_bv = anchors_bv.reshape([-1, 4])
    anchors_mask = None
    if anchor_area_threshold >= 0:
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0)
        dense_voxel_map = dense_voxel_map.cumsum(1)
        anchors_area = box_np_ops.fused_get_anchors_area(
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold
        # example['anchors_mask'] = anchors_mask.astype(np.uint8)
        example['anchors_mask'] = anchors_mask
    if generate_bev:
        bev_vxsize = voxel_size.copy()
        bev_vxsize[:2] /= 2
        bev_vxsize[2] *= 2
        bev_map = points_to_bev(points, bev_vxsize, pc_range,
                                without_reflectivity)
        example["bev_map"] = bev_map

    #if testing(without labels),return example = {
    #voxels,num_points,coordinates,num_voxels}
    if not training:
        return example
    if create_targets:
        targets_dict = target_assigner.assign(
            anchors,
            gt_boxes,
            anchors_mask,
            gt_classes=gt_classes,
            matched_thresholds=matched_thresholds,
            unmatched_thresholds=unmatched_thresholds)
        example.update({
            'labels': targets_dict['labels'],
            'reg_targets': targets_dict['bbox_targets'],
            'reg_weights': targets_dict['bbox_outside_weights'],
        })
    return example
示例#8
0
def prep_main(points,
              calib,
              voxel_generator,
              target_assigner,
              max_voxels=20000,
              shuffle_points=False,
              anchor_area_threshold=1,
              anchor_cache=None,
              out_size_factor=2,
              multi_gpu=False):
    """
    Main preprocessing: coverting points to voxels.

    Main args:
        points: np.ndarray(dtype=float32, shape=(N, 3+)) TODO: confirm this.
        calib: calibration        
    Returns:
        example: dict{'voxels', 'num_points', 'coordinates', 'num_voxels', 'metrics',
                      'calib', 'anchors', 'anchors_mask', 'anchors_dict',
                      'matched_thresholds', 'unmatched_thresholds'
                     }
    """
    metrics = {}

    if shuffle_points:
        # shuffle is a little slow.
        np.random.shuffle(points)

    # [0, -40, -3, 70.4, 40, 1]
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    grid_size = voxel_generator.grid_size
    # [352, 400]
    t1 = time.time()
    if not multi_gpu:
        res = voxel_generator.generate(
            points, max_voxels)
        voxels = res["voxels"]
        coordinates = res["coordinates"]
        num_points = res["num_points_per_voxel"]
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
    else:
        res = voxel_generator.generate_multi_gpu(
            points, max_voxels)
        voxels = res["voxels"]
        coordinates = res["coordinates"]
        num_points = res["num_points_per_voxel"]
        num_voxels = np.array([res["voxel_num"]], dtype=np.int64)
    metrics["voxel_gene_time"] = time.time() - t1
    example = {
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": num_voxels,
        "metrics": metrics,
    }
    if calib is not None:
        example["calib"] = calib
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    if anchor_cache is not None:
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        anchors_dict = anchor_cache["anchors_dict"]
        matched_thresholds = anchor_cache["matched_thresholds"]
        unmatched_thresholds = anchor_cache["unmatched_thresholds"]

    else:
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, target_assigner.box_ndim])
        anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
            anchors[:, [0, 1, 3, 4, 6]])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
    example["anchors"] = anchors
    example["anchors_dict"] = anchors_dict
    example["matched_thresholds"] = matched_thresholds
    example["unmatched_thresholds"] = unmatched_thresholds
    anchors_mask = None
    if anchor_area_threshold >= 0:
        # slow with high resolution. recommend disable this forever.
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0)
        dense_voxel_map = dense_voxel_map.cumsum(1)
        anchors_area = box_np_ops.fused_get_anchors_area(
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold
        # example['anchors_mask'] = anchors_mask.astype(np.uint8)
        example['anchors_mask'] = anchors_mask

    return example
示例#9
0
def prep_pointcloud(input_dict,
                    root_path,
                    voxel_generator,
                    target_assigner,
                    db_sampler=None,
                    max_voxels=20000,
                    remove_outside_points=False,
                    training=True,
                    create_targets=True,
                    shuffle_points=False,
                    remove_unknown=False,
                    gt_rotation_noise=(-np.pi / 3, np.pi / 3),
                    gt_loc_noise_std=(1.0, 1.0, 1.0),
                    global_rotation_noise=(-np.pi / 4, np.pi / 4),
                    global_scaling_noise=(0.95, 1.05),
                    global_random_rot_range=(0.78, 2.35),
                    global_translate_noise_std=(0, 0, 0),
                    num_point_features=4,
                    anchor_area_threshold=1,
                    gt_points_drop=0.0,
                    gt_drop_max_keep=10,
                    remove_points_after_sample=True,
                    anchor_cache=None,
                    remove_environment=False,
                    random_crop=False,
                    reference_detections=None,
                    out_size_factor=2,
                    use_group_id=False,
                    out_dtype=np.float32):
    """convert point cloud to voxels, create targets if ground truths 
    exists.

    input_dict format: dataset.get_sensor_data format

    """
    # t = time.time()
    class_names = target_assigner.classes
    points = input_dict["lidar"]["points"]
    if training:
        anno_dict = input_dict["lidar"]["annotations"]
        gt_dict = {
            "gt_boxes": anno_dict["boxes"],
            "gt_names": anno_dict["names"],
        }
        if "difficulty" not in anno_dict:
            difficulty = np.zeros([anno_dict["boxes"].shape[0]],
                                  dtype=np.int32)
            gt_dict["difficulty"] = difficulty
        else:
            gt_dict["difficulty"] = anno_dict["difficulty"]
        if use_group_id and "group_ids" in anno_dict:
            group_ids = anno_dict["group_ids"]
            gt_dict["group_ids"] = group_ids
    calib = None
    if "calib" in input_dict:
        calib = input_dict["calib"]

    if reference_detections is not None:
        assert calib is not None and "image" in input_dict
        C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
        frustums = box_np_ops.get_frustum_v2(reference_detections, C)
        frustums -= T
        frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
        frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
        surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
        masks = points_in_convex_polygon_3d_jit(points, surfaces)
        points = points[masks.any(-1)]

    if remove_outside_points:
        assert calib is not None
        image_shape = input_dict["image"]["image_shape"]
        points = box_np_ops.remove_outside_points(points, calib["rect"],
                                                  calib["Trv2c"], calib["P2"],
                                                  image_shape)
    if remove_environment is True and training:
        selected = kitti.keep_arrays_by_name(gt_names, target_assigner.classes)
        _dict_select(gt_dict, selected)
        masks = box_np_ops.points_in_rbbox(points, gt_dict["gt_boxes"])
        points = points[masks.any(-1)]

    if training:
        # boxes_lidar = gt_dict["gt_boxes"]
        # bev_map = simplevis.nuscene_vis(points, boxes_lidar)
        # cv2.imshow('pre-noise', bev_map)
        selected = kitti.drop_arrays_by_name(gt_dict["gt_names"], ["DontCare"])
        _dict_select(gt_dict, selected)
        if remove_unknown:
            remove_mask = gt_dict["difficulty"] == -1
            """
            gt_boxes_remove = gt_boxes[remove_mask]
            gt_boxes_remove[:, 3:6] += 0.25
            points = prep.remove_points_in_boxes(points, gt_boxes_remove)
            """
            keep_mask = np.logical_not(remove_mask)
            _dict_select(gt_dict, keep_mask)
        gt_dict.pop("difficulty")
        gt_boxes_mask = np.array(
            [n in class_names for n in gt_dict["gt_names"]], dtype=np.bool_)
        if db_sampler is not None:
            group_ids = None
            if "group_ids" in gt_dict:
                group_ids = gt_dict["group_ids"]

            sampled_dict = db_sampler.sample_all(root_path,
                                                 gt_dict["gt_boxes"],
                                                 gt_dict["gt_names"],
                                                 num_point_features,
                                                 random_crop,
                                                 gt_group_ids=group_ids,
                                                 calib=calib)

            if sampled_dict is not None:
                sampled_gt_names = sampled_dict["gt_names"]
                sampled_gt_boxes = sampled_dict["gt_boxes"]
                sampled_points = sampled_dict["points"]
                sampled_gt_masks = sampled_dict["gt_masks"]
                gt_dict["gt_names"] = np.concatenate(
                    [gt_dict["gt_names"], sampled_gt_names], axis=0)
                gt_dict["gt_boxes"] = np.concatenate(
                    [gt_dict["gt_boxes"], sampled_gt_boxes])
                gt_boxes_mask = np.concatenate(
                    [gt_boxes_mask, sampled_gt_masks], axis=0)
                if group_ids is not None:
                    sampled_group_ids = sampled_dict["group_ids"]
                    gt_dict["group_ids"] = np.concatenate(
                        [gt_dict["group_ids"], sampled_group_ids])

                if remove_points_after_sample:
                    masks = box_np_ops.points_in_rbbox(points,
                                                       sampled_gt_boxes)
                    points = points[np.logical_not(masks.any(-1))]

                points = np.concatenate([sampled_points, points], axis=0)
        pc_range = voxel_generator.point_cloud_range
        group_ids = None
        if "group_ids" in gt_dict:
            group_ids = gt_dict["group_ids"]

        prep.noise_per_object_v3_(
            gt_dict["gt_boxes"],
            points,
            gt_boxes_mask,
            rotation_perturb=gt_rotation_noise,
            center_noise_std=gt_loc_noise_std,
            global_random_rot_range=global_random_rot_range,
            group_ids=group_ids,
            num_try=100)

        # should remove unrelated objects after noise per object
        # for k, v in gt_dict.items():
        #     print(k, v.shape)
        _dict_select(gt_dict, gt_boxes_mask)
        gt_classes = np.array(
            [class_names.index(n) + 1 for n in gt_dict["gt_names"]],
            dtype=np.int32)
        gt_dict["gt_classes"] = gt_classes

        gt_dict["gt_boxes"], points = prep.random_flip(gt_dict["gt_boxes"],
                                                       points)
        gt_dict["gt_boxes"], points = prep.global_rotation(
            gt_dict["gt_boxes"], points, rotation=global_rotation_noise)
        gt_dict["gt_boxes"], points = prep.global_scaling_v2(
            gt_dict["gt_boxes"], points, *global_scaling_noise)
        prep.global_translate_(gt_dict["gt_boxes"], points,
                               global_translate_noise_std)
        bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
        mask = prep.filter_gt_box_outside_range(gt_dict["gt_boxes"], bv_range)
        _dict_select(gt_dict, mask)

        # limit rad to [-pi, pi]
        gt_dict["gt_boxes"][:, 6] = box_np_ops.limit_period(
            gt_dict["gt_boxes"][:, 6], offset=0.5, period=2 * np.pi)

        # boxes_lidar = gt_dict["gt_boxes"]
        # bev_map = simplevis.nuscene_vis(points, boxes_lidar)
        # cv2.imshow('post-noise', bev_map)
        # cv2.waitKey(0)

    if shuffle_points:
        # shuffle is a little slow.
        np.random.shuffle(points)

    # [0, -40, -3, 70.4, 40, 1]
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    grid_size = voxel_generator.grid_size
    # [352, 400]

    voxels, coordinates, num_points = voxel_generator.generate(
        points, max_voxels)
    example = {
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": np.array([voxels.shape[0]], dtype=np.int64)
    }
    if calib is not None:
        example["calib"] = calib
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    if anchor_cache is not None:
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        anchors_dict = anchor_cache["anchors_dict"]
    else:
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, 7])
        anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:,
                                                             [0, 1, 3, 4, 6]])
    example["anchors"] = anchors
    anchors_mask = None
    if anchor_area_threshold >= 0:
        # slow with high resolution. recommend disable this forever.
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0)
        dense_voxel_map = dense_voxel_map.cumsum(1)
        anchors_area = box_np_ops.fused_get_anchors_area(
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold
        # example['anchors_mask'] = anchors_mask.astype(np.uint8)
        example['anchors_mask'] = anchors_mask
    if not training:
        return example
    # voxel_labels = box_np_ops.assign_label_to_voxel(gt_boxes, coordinates,
    #                                                 voxel_size, coors_range)
    """
    example.update({
        'gt_boxes': gt_boxes.astype(out_dtype),
        'num_gt': np.array([gt_boxes.shape[0]]),
        # 'voxel_labels': voxel_labels,
    })
    """
    if create_targets:
        targets_dict = target_assigner.assign_v2(
            anchors_dict,
            gt_dict["gt_boxes"],
            anchors_mask,
            gt_classes=gt_dict["gt_classes"],
            gt_names=gt_dict["gt_names"])

        example.update({
            'labels': targets_dict['labels'],
            'reg_targets': targets_dict['bbox_targets'],
            'reg_weights': targets_dict['bbox_outside_weights'],
        })
    return example
示例#10
0
def prep_pointcloud(input_dict,
                    root_path,
                    voxel_generator,
                    target_assigner,
                    db_sampler=None,
                    max_voxels=20000,
                    class_names=['Car'],
                    remove_outside_points=False,
                    training=True,
                    create_targets=True,
                    shuffle_points=False,
                    reduce_valid_area=False,
                    remove_unknown=False,
                    gt_rotation_noise=[-np.pi / 3, np.pi / 3],
                    gt_loc_noise_std=[1.0, 1.0, 1.0],
                    global_rotation_noise=[-np.pi / 4, np.pi / 4],
                    global_scaling_noise=[0.95, 1.05],
                    global_loc_noise_std=(0.2, 0.2, 0.2),
                    global_random_rot_range=[0.78, 2.35],
                    generate_bev=False,
                    without_reflectivity=False,
                    num_point_features=4,
                    anchor_area_threshold=1,
                    gt_points_drop=0.0,
                    gt_drop_max_keep=10,
                    remove_points_after_sample=True,
                    anchor_cache=None,
                    remove_environment=False,
                    random_crop=False,
                    reference_detections=None,
                    add_rgb_to_points=False,
                    lidar_input=False,
                    unlabeled_db_sampler=None,
                    out_size_factor=2,
                    min_gt_point_dict=None,
                    bev_only=False,
                    use_group_id=False,
                    out_dtype=np.float32):
    """convert point cloud to voxels, create targets if ground truths 
    exists.
    """
    # 这部分用来读取某一帧数据
    points = input_dict["points"] # velodyne_reduced, array(N*4)
    if training:
        gt_boxes = input_dict["gt_boxes"] # 真值框,位置,尺寸,绝对转角,N*1,一个真值框一行
        gt_names = input_dict["gt_names"]
        difficulty = input_dict["difficulty"]
        group_ids = None
        if use_group_id and "group_ids" in input_dict: # False
            group_ids = input_dict["group_ids"]
    rect = input_dict["rect"]
    Trv2c = input_dict["Trv2c"]
    P2 = input_dict["P2"]
    unlabeled_training = unlabeled_db_sampler is not None
    image_idx = input_dict["image_idx"]

    if reference_detections is not None: # None
        C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
        frustums = box_np_ops.get_frustum_v2(reference_detections, C)
        frustums -= T
        # frustums = np.linalg.inv(R) @ frustums.T
        frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
        frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
        surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
        masks = points_in_convex_polygon_3d_jit(points, surfaces)
        points = points[masks.any(-1)]

    if remove_outside_points and not lidar_input: # False
        image_shape = input_dict["image_shape"]
        points = box_np_ops.remove_outside_points(points, rect, Trv2c, P2,
                                                  image_shape)
    if remove_environment is True and training: # False
        selected = kitti.keep_arrays_by_name(gt_names, class_names)
        gt_boxes = gt_boxes[selected]
        gt_names = gt_names[selected]
        difficulty = difficulty[selected]
        if group_ids is not None: # None
            group_ids = group_ids[selected]
        points = prep.remove_points_outside_boxes(points, gt_boxes)

    if training:
        # 先去掉真值内的DontCare
        selected = kitti.drop_arrays_by_name(gt_names, ["DontCare"]) # 去掉DontCare
        gt_boxes = gt_boxes[selected]
        gt_names = gt_names[selected]
        difficulty = difficulty[selected]
        if group_ids is not None: # None
            group_ids = group_ids[selected]

        gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c) # 相机坐标下的真值框转换成激光雷达坐标下[xyz_lidar,w,l,h,r],一个对象一行
        if remove_unknown: # False
            remove_mask = difficulty == -1
            """
            gt_boxes_remove = gt_boxes[remove_mask]
            gt_boxes_remove[:, 3:6] += 0.25
            points = prep.remove_points_in_boxes(points, gt_boxes_remove)
            """
            keep_mask = np.logical_not(remove_mask)
            gt_boxes = gt_boxes[keep_mask]
            gt_names = gt_names[keep_mask]
            difficulty = difficulty[keep_mask]
            if group_ids is not None: # None
                group_ids = group_ids[keep_mask]
        gt_boxes_mask = np.array( # 目标类别的对象标签,布尔类型,同样一行一个对象
            [n in class_names for n in gt_names], dtype=np.bool_)
        # 下面用来对去掉DontCare的真值进行采样补充
        if db_sampler is not None: # not None
            # 数据库预处理类里的方法,返回的是该帧数据内各类别用来补充的采样真值数据,包括真值内出现的非目标类别
            sampled_dict = db_sampler.sample_all(
                root_path,
                gt_boxes,
                gt_names,
                num_point_features,
                random_crop,
                gt_group_ids=group_ids, # None
                rect=rect,
                Trv2c=Trv2c,
                P2=P2)

            if sampled_dict is not None: # 下面将原始数据与补充采样数据合并
                sampled_gt_names = sampled_dict["gt_names"]
                sampled_gt_boxes = sampled_dict["gt_boxes"]
                sampled_points = sampled_dict["points"]
                sampled_gt_masks = sampled_dict["gt_masks"]
                # gt_names = gt_names[gt_boxes_mask].tolist()
                gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
                # gt_names += [s["name"] for s in sampled]
                gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes])
                gt_boxes_mask = np.concatenate(
                    [gt_boxes_mask, sampled_gt_masks], axis=0) # 真值框是目标类别的标志
                if group_ids is not None: # None
                    sampled_group_ids = sampled_dict["group_ids"]
                    group_ids = np.concatenate([group_ids, sampled_group_ids])

                if remove_points_after_sample: # False,将采样框所占位置点云去除
                    points = prep.remove_points_in_boxes(
                        points, sampled_gt_boxes)

                points = np.concatenate([sampled_points, points], axis=0) # 合并原始点云与采样点云
        # unlabeled_mask = np.zeros((gt_boxes.shape[0], ), dtype=np.bool_)
        if without_reflectivity: # False
            used_point_axes = list(range(num_point_features))
            used_point_axes.pop(3)
            points = points[:, used_point_axes]
        pc_range = voxel_generator.point_cloud_range
        if bev_only:  # set z and h to limits, False
            gt_boxes[:, 2] = pc_range[2]
            gt_boxes[:, 5] = pc_range[5] - pc_range[2]
        prep.noise_per_object_v3_( # 每一个对象添加扰动,实际上就是修改每个对象真值框坐标与内部点云坐标
            gt_boxes, # 补充过采样数据的真值框
            points, # 补充过采样数据的点云
            gt_boxes_mask, # 真值框标志(不区分原始数据与采样数据)
            rotation_perturb=gt_rotation_noise, # 旋转角度范围,平均分布
            center_noise_std=gt_loc_noise_std, # 位置正态分布方差
            global_random_rot_range=global_random_rot_range, # 全局旋转[0.0,0.0]
            group_ids=group_ids, # None
            num_try=100)
        # should remove unrelated objects after noise per object
        gt_boxes = gt_boxes[gt_boxes_mask] # 添加扰动后筛选目标类别真值框
        gt_names = gt_names[gt_boxes_mask]
        if group_ids is not None: # Fasle
            group_ids = group_ids[gt_boxes_mask]
        gt_classes = np.array( # (gt_num)
            [class_names.index(n) + 1 for n in gt_names], dtype=np.int32)

        gt_boxes, points = prep.random_flip(gt_boxes, points) # 随机翻转
        gt_boxes, points = prep.global_rotation( # 全局旋转
            gt_boxes, points, rotation=global_rotation_noise)
        gt_boxes, points = prep.global_scaling_v2(gt_boxes, points, # 全局缩放
                                                  *global_scaling_noise)

        # Global translation
        # 全局定位扰动
        gt_boxes, points = prep.global_translate(gt_boxes, points, global_loc_noise_std)

        bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
        mask = prep.filter_gt_box_outside_range(gt_boxes, bv_range) # 过滤超出鸟瞰图范围的真值框
        gt_boxes = gt_boxes[mask]
        gt_classes = gt_classes[mask]
        if group_ids is not None: # None
            group_ids = group_ids[mask]

        # limit rad to [-pi, pi]
        gt_boxes[:, 6] = box_np_ops.limit_period( # 调整真值转角至目标范围
            gt_boxes[:, 6], offset=0.5, period=2 * np.pi)

    if shuffle_points: # True,打乱全局点云数据
        # shuffle is a little slow.
        np.random.shuffle(points)

    voxel_size = voxel_generator.voxel_size # [0.16, 0.16, 4]
    pc_range = voxel_generator.point_cloud_range # [0, -39.68, -3, 69.12, 39.68, 1]
    grid_size = voxel_generator.grid_size # [432, 496, 1] x,y,z

    # 生成体素
    """
    Returns:
        voxels:[num_voxels, 100, 4] 体素索引映射全局体素特征
        coordinates:[num_voxels, 3] 体素索引映射体素坐标
        num_points:(num_voxels,) 体素索引映射体素内的点数
    """
    voxels, coordinates, num_points = voxel_generator.generate(
        points, max_voxels)

    example = {
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": np.array([voxels.shape[0]], dtype=np.int64)
    }
    example.update({
        'rect': rect,
        'Trv2c': Trv2c,
        'P2': P2,
    })
    # if not lidar_input:
    feature_map_size = grid_size[:2] // out_size_factor # [216 248]
    feature_map_size = [*feature_map_size, 1][::-1] # [1,248,216]
    if anchor_cache is not None:
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        matched_thresholds = anchor_cache["matched_thresholds"]
        unmatched_thresholds = anchor_cache["unmatched_thresholds"]
    else:
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, 7])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
            anchors[:, [0, 1, 3, 4, 6]])
    example["anchors"] = anchors
    # print("debug", anchors.shape, matched_thresholds.shape)
    # anchors_bv = anchors_bv.reshape([-1, 4])
    anchors_mask = None
    if anchor_area_threshold >= 0: # True
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask( # 某处体素是否被采样,否为0,是为1.array[496,432] y,x
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0) # 元素值为按列累加到元素的和
        dense_voxel_map = dense_voxel_map.cumsum(1) # 元素值为按行累加到元素的和,相当于统计了元素两个维度上之前元素的和
        anchors_area = box_np_ops.fused_get_anchors_area( # 统计每个锚框内的体素数量
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold # 标记超过一个体素的锚框
        # example['anchors_mask'] = anchors_mask.astype(np.uint8)
        example['anchors_mask'] = anchors_mask
    if generate_bev: # False
        bev_vxsize = voxel_size.copy()
        bev_vxsize[:2] /= 2
        bev_vxsize[2] *= 2
        bev_map = points_to_bev(points, bev_vxsize, pc_range,
                                without_reflectivity)
        example["bev_map"] = bev_map
    if not training:
        return example # 测试数据集不需要创建训练目标
    if create_targets: # True
        targets_dict = target_assigner.assign(
            anchors, # (248*216*2,7)
            gt_boxes, # (gt_num, 7)
            anchors_mask, # (248*216*2,)
            gt_classes=gt_classes, # (gt_num,)
            matched_thresholds=matched_thresholds, # (248*216*2,)
            unmatched_thresholds=unmatched_thresholds) # (248*216*2,)
        example.update({
            'labels': targets_dict['labels'], # (total_anchors,),所有锚框对应真值类别(1,2...),无对应真值设为0,dontcare设为-1
            'reg_targets': targets_dict['bbox_targets'], # (total_anchors, 7),所有锚框对应真值相对锚框的偏差编码,无对应真值设为[0,0,0,0,0,0,0]
            'reg_weights': targets_dict['bbox_outside_weights'], # (total_anchors,),所有锚框的外部权重,有对应真值设为1,无对应真值设为0
        })
    return example
示例#11
0
def prep_pointcloud(input_dict,
                    root_path,
                    voxel_generator,
                    target_assigner,
                    db_sampler=None,
                    max_voxels=20000,
                    class_names=['PEDESTRIAN'],
                    remove_outside_points=False,
                    training=True,
                    create_targets=True,
                    shuffle_points=False,
                    reduce_valid_area=False,
                    remove_unknown=False,
                    gt_rotation_noise=[-np.pi / 3, np.pi / 3],
                    gt_loc_noise_std=[1.0, 1.0, 1.0],
                    global_rotation_noise=[-np.pi / 4, np.pi / 4],
                    global_scaling_noise=[0.95, 1.05],
                    global_loc_noise_std=(0.2, 0.2, 0.2),
                    global_random_rot_range=[0.78, 2.35],
                    generate_bev=False,
                    without_reflectivity=False,
                    num_point_features=4,
                    anchor_area_threshold=1,
                    gt_points_drop=0.0,
                    gt_drop_max_keep=10,
                    remove_points_after_sample=True,
                    anchor_cache=None,
                    remove_environment=False,
                    random_crop=False,
                    reference_detections=None,
                    add_rgb_to_points=False,
                    lidar_input=False,
                    unlabeled_db_sampler=None,
                    out_size_factor=2,
                    min_gt_point_dict=None,
                    bev_only=False,
                    use_group_id=False,
                    out_dtype=np.float32):
    """convert point cloud to voxels, create targets if ground truths 
    exists.
    """
    points = input_dict["points"]
    pc_range = voxel_generator.point_cloud_range

    pts_x, pts_y, pts_z = points[:, 0], points[:, 1], points[:, 2]
    range_flag = ((pts_x >= pc_range[0]) & (pts_x <= pc_range[3])
                  & (pts_y >= pc_range[1]) & (pts_y <= pc_range[4])
                  & (pts_z >= pc_range[2]) & (pts_z <= pc_range[5]))

    points = points[range_flag]

    if training:
        gt_boxes = input_dict["gt_boxes"]
        gt_names = input_dict["gt_names"]
        ## group_ids ?      np.arange(num_gt,dtype=np.int32) num_gt - number of objects (of all categories) in annotated lidar frame

        group_ids = None
        if use_group_id and "group_ids" in input_dict:
            group_ids = input_dict["group_ids"]

    #unlabeled_training = unlabeled_db_sampler is not None

    if training:

        gt_boxes_mask = np.array([n in class_names for n in gt_names],
                                 dtype=np.bool_)
        #print(gt_boxes_mask.shape,gt_boxes.shape,"before")

        prep.noise_per_object_v3_(
            gt_boxes,
            points,
            gt_boxes_mask,
            rotation_perturb=gt_rotation_noise,
            center_noise_std=gt_loc_noise_std,
            global_random_rot_range=global_random_rot_range,
            group_ids=group_ids,
            num_try=100)
        #print(gt_boxes_mask.shape,gt_boxes.shape,"after")

        # should remove unrelated objects after noise per object
        gt_boxes = gt_boxes[gt_boxes_mask]
        gt_names = gt_names[gt_boxes_mask]

        if group_ids is not None:
            group_ids = group_ids[gt_boxes_mask]

        gt_classes = np.array([class_names.index(n) + 1 for n in gt_names],
                              dtype=np.int32)

        #need to check the output
        gt_boxes, points = prep.random_flip(gt_boxes, points)
        gt_boxes, points = prep.global_rotation(gt_boxes,
                                                points,
                                                rotation=global_rotation_noise)
        gt_boxes, points = prep.global_scaling_v2(gt_boxes, points,
                                                  *global_scaling_noise)

        # Global translation
        gt_boxes, points = prep.global_translate(gt_boxes, points,
                                                 global_loc_noise_std)

        bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
        mask = prep.filter_gt_box_outside_range(gt_boxes, bv_range)
        gt_boxes = gt_boxes[mask]
        gt_classes = gt_classes[mask]
        if group_ids is not None:
            group_ids = group_ids[mask]

        # limit rad to [-pi, pi]
        gt_boxes[:, 6] = box_np_ops.limit_period(gt_boxes[:, 6],
                                                 offset=0.5,
                                                 period=2 * np.pi)
        #assert -np.pi/2 <= g <= np.pi/2

    if shuffle_points:
        # shuffle is a little slow.
        np.random.shuffle(points)

    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    grid_size = voxel_generator.grid_size

    voxels, coordinates, num_points = voxel_generator.generate(
        points, max_voxels)

    example = {
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": np.array([voxels.shape[0]], dtype=np.int64)
    }

    # if not lidar_input:
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    if anchor_cache is not None:
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        matched_thresholds = anchor_cache["matched_thresholds"]
        unmatched_thresholds = anchor_cache["unmatched_thresholds"]
    else:
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, 7])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:,
                                                             [0, 1, 3, 4, 6]])
    example["anchors"] = anchors

    anchors_mask = None
    if anchor_area_threshold >= 0:
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0)
        dense_voxel_map = dense_voxel_map.cumsum(1)
        anchors_area = box_np_ops.fused_get_anchors_area(
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold
        example['anchors_mask'] = anchors_mask

    if generate_bev:
        bev_vxsize = voxel_size.copy()
        bev_vxsize[:2] /= 2
        bev_vxsize[2] *= 2
        bev_map = points_to_bev(points, bev_vxsize, pc_range,
                                without_reflectivity)
        example["bev_map"] = bev_map
    if not training:
        return example
    if create_targets:
        targets_dict = target_assigner.assign(
            anchors,
            gt_boxes,
            anchors_mask,
            gt_classes=gt_classes,
            matched_thresholds=matched_thresholds,
            unmatched_thresholds=unmatched_thresholds)
        example.update({
            'labels': targets_dict['labels'],
            'reg_targets': targets_dict['bbox_targets'],
            'reg_weights': targets_dict['bbox_outside_weights'],
        })
    return example
示例#12
0
    def draw_anchors(self,
                     gt_boxes_lidar,
                     points=None,
                     image_idx=0,
                     gt_names=None):
        # print(gt_names)
        voxel_size = np.array(self._voxel_size, dtype=np.float32)
        # voxel_size = np.array([0.2, 0.2, 0.4], dtype=np.float32)
        coors_range = np.array(self._coors_range, dtype=np.float32)
        # coors_range = np.array([0, -40, -3, 70.4, 40, 1], dtype=np.float32)
        grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size
        grid_size = np.round(grid_size).astype(np.int64)
        # print(grid_size)
        bv_range = coors_range[[0, 1, 3, 4]]
        anchor_generator = AnchorGeneratorStride(
            # sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],
            sizes=[0.6, 1.76, 1.73],
            anchor_strides=[0.4, 0.4, 0.0],
            anchor_offsets=[0.2, -39.8, -1.465],
            rotations=[0, 1.5707963267948966],
            match_threshold=0.5,
            unmatch_threshold=0.35,
        )
        anchor_generator1 = AnchorGeneratorStride(
            # sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],
            sizes=[0.6, 0.8, 1.73],
            anchor_strides=[0.4, 0.4, 0.0],
            anchor_offsets=[0.2, -39.8, -1.465],
            rotations=[0, 1.5707963267948966],
            match_threshold=0.5,
            unmatch_threshold=0.35,
        )
        anchor_generator2 = AnchorGeneratorStride(
            # sizes=[0.6, 0.8, 1.73, 0.6, 1.76, 1.73],
            sizes=[1.6, 3.9, 1.56],
            anchor_strides=[0.4, 0.4, 0.0],
            anchor_offsets=[0.2, -39.8, -1.55442884],
            rotations=[0, 1.5707963267948966],
            # rotations=[0],
            match_threshold=0.6,
            unmatch_threshold=0.45,
        )
        anchor_generators = [anchor_generator2]
        box_coder = GroundBox3dCoder()
        # similarity_calc = DistanceSimilarity(1.0)
        similarity_calc = NearestIouSimilarity()
        target_assigner = TargetAssigner(box_coder, anchor_generators,
                                         similarity_calc)
        # anchors = box_np_ops.create_anchors_v2(
        #     bv_range, grid_size[:2] // 2, sizes=anchor_dims)
        # matched_thresholds = [0.45, 0.45, 0.6]
        # unmatched_thresholds = [0.3, 0.3, 0.45]

        t = time.time()
        feature_map_size = grid_size[:2] // 2
        feature_map_size = [*feature_map_size, 1][::-1]
        # print(feature_map_size)
        # """
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, 7])
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:,
                                                             [0, 1, 3, 4, 6]])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
        print(f"num_anchors_ {len(anchors)}")
        if points is not None:
            voxels, coors, num_points = points_to_voxel(
                points,
                self._voxel_size,
                # self._coors_range,
                coors_range,
                self._max_num_points,
                reverse_index=True,
                max_voxels=self._max_voxels)

            # print(np.min(coors, 0), np.max(coors, 0))
            dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
                coors, tuple(grid_size[::-1][1:]))
            dense_voxel_map = dense_voxel_map.cumsum(0)
            dense_voxel_map = dense_voxel_map.cumsum(1)
            anchors_mask = box_np_ops.fused_get_anchors_area(
                dense_voxel_map, anchors_bv, voxel_size, coors_range,
                grid_size) > 1
        # print(np.sum(anchors_mask), anchors_mask.shape)
        class_names = [
            'Car', "Pedestrian", "Cyclist", 'Van', 'Truck', "Tram", 'Misc',
            'Person_sitting'
        ]
        gt_classes = np.array([class_names.index(n) + 1 for n in gt_names],
                              dtype=np.int32)
        t = time.time()
        target_dict = target_assigner.assign(
            anchors,
            gt_boxes_lidar,
            anchors_mask,
            gt_classes=gt_classes,
            matched_thresholds=matched_thresholds,
            unmatched_thresholds=unmatched_thresholds)
        labels = target_dict["labels"]
        reg_targets = target_dict["bbox_targets"]
        reg_weights = target_dict["bbox_outside_weights"]
        # print(labels[labels > 0])
        # decoded_reg_targets = box_np_ops.second_box_decode(reg_targets, anchors)
        # print(decoded_reg_targets.reshape(-1, 7)[labels > 0])
        print("target time", (time.time() - t))
        print(f"num_pos={np.sum(labels > 0)}")
        colors = np.zeros([anchors.shape[0], 4])
        ignored_color = bbox_plot.gl_color(GLColor.Gray, 0.5)
        pos_color = bbox_plot.gl_color(GLColor.Cyan, 0.5)

        colors[labels == -1] = ignored_color
        colors[labels > 0] = pos_color
        cared_anchors_mask = np.logical_and(labels != 0, anchors_mask)
        colors = colors[cared_anchors_mask]
        anchors_not_neg = box_np_ops.rbbox3d_to_corners(
            anchors)[cared_anchors_mask]
        self.boxes3d("anchors", anchors_not_neg, colors=colors)
示例#13
0
def prep_pointcloud2(input_dict,
                     root_path,
                     voxel_generator,
                     target_assigner,
                     db_sampler=None,
                     max_voxels=20000,
                     class_names=['Car'],
                     remove_outside_points=False,
                     training=True,
                     create_targets=True,
                     shuffle_points=False,
                     reduce_valid_area=False,
                     remove_unknown=False,
                     gt_rotation_noise=[-np.pi / 3, np.pi / 3],
                     gt_loc_noise_std=[1.0, 1.0, 1.0],
                     global_rotation_noise=[-np.pi / 4, np.pi / 4],
                     global_scaling_noise=[0.95, 1.05],
                     global_loc_noise_std=(0.2, 0.2, 0.2),
                     global_random_rot_range=[0.78, 2.35],
                     generate_bev=False,
                     without_reflectivity=False,
                     num_point_features=4,
                     anchor_area_threshold=1,
                     gt_points_drop=0.0,
                     gt_drop_max_keep=10,
                     remove_points_after_sample=True,
                     anchor_cache=None,
                     remove_environment=False,
                     random_crop=False,
                     reference_detections=None,
                     add_rgb_to_points=False,
                     lidar_input=False,
                     unlabeled_db_sampler=None,
                     out_size_factor=2,
                     min_gt_point_dict=None,
                     bev_only=False,
                     use_group_id=False,
                     out_dtype=np.float32):
    """convert point cloud to voxels, create targets if ground truths 
    exists.
    """

    points = input_dict["points"]
    rect = input_dict["rect"]
    Trv2c = input_dict["Trv2c"]
    P2 = input_dict["P2"]
    unlabeled_training = unlabeled_db_sampler is not None
    image_idx = input_dict["image_idx"]

    if reference_detections is not None:
        C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
        frustums = box_np_ops.get_frustum_v2(reference_detections, C)
        frustums -= T
        # frustums = np.linalg.inv(R) @ frustums.T
        frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
        frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
        surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
        masks = points_in_convex_polygon_3d_jit(points, surfaces)
        points = points[masks.any(-1)]

    now = rospy.get_rostime()
    print('before shuffle TIME:' + str(now.to_sec()))

    if shuffle_points:
        # shuffle is a little slow.
        np.random.shuffle(points)

    # [0, -40, -3, 70.4, 40, 1]
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    grid_size = voxel_generator.grid_size
    # [352, 400]

    now = rospy.get_rostime()
    print('before voxel generate:' + str(now.to_sec()))

    voxels, coordinates, num_points = voxel_generator.generate(
        points, max_voxels)

    now = rospy.get_rostime()
    print('before encording TIME:' + str(now.to_sec()))

    example = {
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": np.array([voxels.shape[0]], dtype=np.int64)
    }

    now = rospy.get_rostime()
    print('before encording2 TIME:' + str(now.to_sec()))

    example.update({
        'rect': rect,
        'Trv2c': Trv2c,
        'P2': P2,
    })

    now = rospy.get_rostime()
    print('before anchor_cache TIME:' + str(now.to_sec()))

    # if not lidar_input:
    feature_map_size = grid_size[:2] // out_size_factor

    now = rospy.get_rostime()
    print('before anchor_fm TIME:' + str(now.to_sec()))

    feature_map_size = [*feature_map_size, 1][::-1]
    if anchor_cache is not None:
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        matched_thresholds = anchor_cache["matched_thresholds"]
        unmatched_thresholds = anchor_cache["unmatched_thresholds"]
    else:
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, 7])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:,
                                                             [0, 1, 3, 4, 6]])
    example["anchors"] = anchors
    # print("debug", anchors.shape, matched_thresholds.shape)
    # anchors_bv = anchors_bv.reshape([-1, 4])
    anchors_mask = None

    now = rospy.get_rostime()
    print('before create_anchor_mask TIME:' + str(now.to_sec()))

    #costs time

    if anchor_area_threshold >= 0:
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0)
        dense_voxel_map = dense_voxel_map.cumsum(1)
        anchors_area = box_np_ops.fused_get_anchors_area(
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold
        # example['anchors_mask'] = anchors_mask.astype(np.uint8)
        example['anchors_mask'] = anchors_mask

    now = rospy.get_rostime()
    print('before create_bev TIME:' + str(now.to_sec()))

    if generate_bev:
        bev_vxsize = voxel_size.copy()
        bev_vxsize[:2] /= 2
        bev_vxsize[2] *= 2
        bev_map = points_to_bev(points, bev_vxsize, pc_range,
                                without_reflectivity)
        example["bev_map"] = bev_map

    now = rospy.get_rostime()
    print('after create bev TIME:' + str(now.to_sec()))
    if not training:
        return example
示例#14
0
def prep_pointcloud(input_dict,
                    root_path,
                    voxel_generator,
                    target_assigner,
                    db_sampler=None,
                    max_voxels=20000,
                    max_sweeps=10,
                    remove_outside_points=False,
                    training=True,
                    create_targets=True,
                    shuffle_points=False,
                    remove_unknown=False,
                    gt_rotation_noise=(-np.pi / 3, np.pi / 3),
                    gt_loc_noise_std=(1.0, 1.0, 1.0),
                    global_rotation_noise=(-np.pi / 4, np.pi / 4),
                    global_scaling_noise=(0.95, 1.05),
                    global_random_rot_range=(0.78, 2.35),
                    global_translate_noise_std=(0, 0, 0),
                    num_point_features=4,
                    anchor_area_threshold=1,
                    gt_points_drop=0.0,
                    gt_drop_max_keep=10,
                    remove_points_after_sample=True,
                    anchor_cache=None,
                    remove_environment=False,
                    random_crop=False,
                    reference_detections=None,
                    out_size_factor=2,
                    use_group_id=False,
                    multi_gpu=False,
                    min_points_in_gt=-1,
                    random_flip_x=True,
                    random_flip_y=True,
                    sample_importance=1.0,
                    out_dtype=np.float32):
    """convert point cloud to voxels, create targets if ground truths
    exists.

    input_dict format: dataset.get_sensor_data format

    """
    t = time.time()
    class_names = target_assigner.classes
    points = input_dict["lidar"]["points"]
    indices = input_dict["lidar"]["indices"]
    origins = input_dict["lidar"]["origins"]
    if training:
        anno_dict = input_dict["lidar"]["annotations"]
        gt_dict = {
            "gt_boxes":
            anno_dict["boxes"],
            "gt_names":
            anno_dict["names"],
            "gt_importance":
            np.ones([anno_dict["boxes"].shape[0]],
                    dtype=anno_dict["boxes"].dtype),
        }
        if "difficulty" not in anno_dict:
            difficulty = np.zeros([anno_dict["boxes"].shape[0]],
                                  dtype=np.int32)
            gt_dict["difficulty"] = difficulty
        else:
            gt_dict["difficulty"] = anno_dict["difficulty"]
        if use_group_id and "group_ids" in anno_dict:
            group_ids = anno_dict["group_ids"]
            gt_dict["group_ids"] = group_ids
    calib = None
    if "calib" in input_dict:
        calib = input_dict["calib"]

    # # Disable these two since we do not do this for NuScenes
    # if reference_detections is not None:
    #     assert calib is not None and "image" in input_dict
    #     C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
    #     frustums = box_np_ops.get_frustum_v2(reference_detections, C)
    #     frustums -= T
    #     frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
    #     frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
    #     surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
    #     masks = points_in_convex_polygon_3d_jit(points, surfaces)
    #     points = points[masks.any(-1)]
    # if remove_outside_points:
    #     assert calib is not None
    #     image_shape = input_dict["image"]["image_shape"]
    #     points = box_np_ops.remove_outside_points(
    #         points, calib["rect"], calib["Trv2c"], calib["P2"], image_shape)

    # # Very interesting attempt
    # # I have tried the same and found it doesn't really work
    # if remove_environment is True and training:
    #     selected = kitti.keep_arrays_by_name(gt_names, target_assigner.classes)
    #     _dict_select(gt_dict, selected)
    #     masks = box_np_ops.points_in_rbbox(points, gt_dict["gt_boxes"])
    #     points = points[masks.any(-1)]

    metrics = {}

    point_indices_to_remove = None
    if training:
        """
        boxes_lidar = gt_dict["gt_boxes"]
        bev_map = simplevis.nuscene_vis(points, boxes_lidar)
        cv2.imshow('pre-noise', bev_map)
        """
        selected = kitti.drop_arrays_by_name(gt_dict["gt_names"], ["Denture"])
        _dict_select(gt_dict, selected)
        if remove_unknown:
            remove_mask = gt_dict["difficulty"] == -1
            """
            gt_boxes_remove = gt_boxes[remove_mask]
            gt_boxes_remove[:, 3:6] += 0.25
            points = prep.remove_points_in_boxes(points, gt_boxes_remove)
            """
            keep_mask = np.logical_not(remove_mask)
            _dict_select(gt_dict, keep_mask)
        gt_dict.pop("difficulty")

        # This part is interesting - we will need to do the same
        if min_points_in_gt > 0:
            # points_count_rbbox takes 10ms with 10 sweeps nuscenes data
            point_counts = box_np_ops.points_count_rbbox(
                points, gt_dict["gt_boxes"])
            mask = point_counts >= min_points_in_gt
            _dict_select(gt_dict, mask)

        gt_boxes_mask = np.array(
            [n in class_names for n in gt_dict["gt_names"]], dtype=np.bool_)

        if db_sampler is not None:
            group_ids = None
            if "group_ids" in gt_dict:
                group_ids = gt_dict["group_ids"]

            sampled_dict = db_sampler.sample_all(root_path,
                                                 gt_dict["gt_boxes"],
                                                 gt_dict["gt_names"],
                                                 num_point_features,
                                                 random_crop,
                                                 gt_group_ids=group_ids,
                                                 calib=calib)

            if sampled_dict is not None:
                sampled_gt_names = sampled_dict["gt_names"]
                sampled_gt_boxes = sampled_dict["gt_boxes"]
                sampled_points = sampled_dict["points"]
                sampled_gt_masks = sampled_dict["gt_masks"]
                gt_dict["gt_names"] = np.concatenate(
                    [gt_dict["gt_names"], sampled_gt_names], axis=0)
                gt_dict["gt_boxes"] = np.concatenate(
                    [gt_dict["gt_boxes"], sampled_gt_boxes])
                gt_boxes_mask = np.concatenate(
                    [gt_boxes_mask, sampled_gt_masks], axis=0)
                sampled_gt_importance = np.full([sampled_gt_boxes.shape[0]],
                                                sample_importance,
                                                dtype=sampled_gt_boxes.dtype)
                gt_dict["gt_importance"] = np.concatenate(
                    [gt_dict["gt_importance"], sampled_gt_importance])

                if group_ids is not None:
                    sampled_group_ids = sampled_dict["group_ids"]
                    gt_dict["group_ids"] = np.concatenate(
                        [gt_dict["group_ids"], sampled_group_ids])

                # # Commented out because we have a new way of removing points
                # if remove_points_after_sample:
                #     masks = box_np_ops.points_in_rbbox(points, sampled_gt_boxes)
                #     point_indices_to_remove = np.flatnonzero(masks.any(-1))
                #     # # Delay this process so we can use the full point cloud
                #     # # when we do the ray stopping algorithm
                #     # points = points[np.logical_not(masks.any(-1))]

                # # Paste objects behind so that we don't have to update indices
                # points = np.concatenate([sampled_points, points], axis=0)
                points = np.concatenate([points, sampled_points], axis=0)

        pc_range = voxel_generator.point_cloud_range
        group_ids = None
        if "group_ids" in gt_dict:
            group_ids = gt_dict["group_ids"]

        # # Disable this one for now (not used in PointPillars anyways)
        # prep.noise_per_object_v3_(
        #     gt_dict["gt_boxes"],
        #     points,
        #     gt_boxes_mask,
        #     rotation_perturb=gt_rotation_noise,
        #     center_noise_std=gt_loc_noise_std,
        #     global_random_rot_range=global_random_rot_range,
        #     group_ids=group_ids,
        #     num_try=100)

        # should remove unrelated objects after noise per object
        # for k, v in gt_dict.items():
        #     print(k, v.shape)
        _dict_select(gt_dict, gt_boxes_mask)
        gt_classes = np.array(
            [class_names.index(n) + 1 for n in gt_dict["gt_names"]],
            dtype=np.int32)
        gt_dict["gt_classes"] = gt_classes
        gt_dict["gt_boxes"], points, origins = prep.random_flip(
            gt_dict["gt_boxes"], points, origins, 0.5, random_flip_x,
            random_flip_y)
        gt_dict["gt_boxes"], points, origins = prep.global_rotation_v2(
            gt_dict["gt_boxes"], points, origins, *global_rotation_noise)
        gt_dict["gt_boxes"], points, origins = prep.global_scaling_v2(
            gt_dict["gt_boxes"], points, origins, *global_scaling_noise)
        prep.global_translate_(gt_dict["gt_boxes"], points, origins,
                               global_translate_noise_std)
        bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
        mask = prep.filter_gt_box_outside_range_by_center(
            gt_dict["gt_boxes"], bv_range)
        _dict_select(gt_dict, mask)

        # limit rad to [-pi, pi]
        gt_dict["gt_boxes"][:, 6] = box_np_ops.limit_period(
            gt_dict["gt_boxes"][:, 6], offset=0.5, period=2 * np.pi)

        # boxes_lidar = gt_dict["gt_boxes"]
        # bev_map = simplevis.nuscene_vis(points, boxes_lidar)
        # cv2.imshow('post-noise', bev_map)
        # cv2.waitKey(0)

    # # Disable this for now (not used in PointPillars anyways)
    # if shuffle_points:
    #     # shuffle is a little slow.
    #     np.random.shuffle(points)

    # [0, -40, -3, 70.4, 40, 1]
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    grid_size = voxel_generator.grid_size

    # organize points into lists based on timestamps
    time_stamps = points[
        indices[:-1],
        -1]  # counting on the fact we do not miss points from any intermediate time_stamps
    time_stamps = (time_stamps[:-1] + time_stamps[1:]) / 2
    time_stamps = [-1000.0] + time_stamps.tolist() + [1000.0]  # add boundaries
    time_stamps = np.array(time_stamps)

    # # LL_OCCUPIED, LL_FREE = 0.85, -0.4
    # lo_occupied = np.log(0.7 / (1 - 0.7))
    # lo_free = np.log(0.4 / (1 - 0.4))

    # is there are additional points (from database sampling)
    num_original = indices[-1]
    if len(points) > num_original:
        # split data into two half (indexed and un-indexed)
        original_points, sampled_points = points[:num_original], points[
            num_original:]
        # compute occupancy and masks
        # visibility, original_mask, sampled_mask = mapping.compute_visibility_and_masks(
        #     original_points, sampled_points, origins, time_stamps, pc_range, min(voxel_size)
        # )
        logodds, original_mask, sampled_mask = mapping.compute_logodds_and_masks(
            original_points,
            sampled_points,
            origins,
            time_stamps,
            pc_range,
            min(voxel_size)  # , lo_occupied, lo_free
        )
        # apply visible mask
        points = np.concatenate(
            (original_points[original_mask], sampled_points[sampled_mask]))
    else:
        # visibility = mapping.compute_visibility(
        #     points, origins, time_stamps, pc_range, min(voxel_size)
        # )
        logodds = mapping.compute_logodds(
            points,
            origins,
            time_stamps,
            pc_range,
            min(voxel_size)  #, lo_occupied, lo_free
        )

    # T = len(time_stamps)-1
    # visibility = visibility.reshape(T, -1)
    # if T < (1 + max_sweeps):
    #     visibility = np.pad(visibility, ((0, (1+max_sweeps)-T), (0,0)), 'edge')

    # with open(f'./utils/mapping/examples/{time.time()}.pkl', 'wb') as f:
    #     ##
    #     pickle.dump(original_points, f)
    #     pickle.dump(sampled_points, f)
    #     pickle.dump(origins, f)
    #     pickle.dump(time_stamps, f)
    #     pickle.dump(pc_range, f)
    #     pickle.dump(voxel_size, f)
    #     ##
    #     pickle.dump(occupancy, f)
    #     pickle.dump(original_mask, f)
    #     pickle.dump(sampled_mask, f)

    if training:
        if min_points_in_gt > 0:
            # points_count_rbbox takes 10ms with 10 sweeps nuscenes data
            point_counts = box_np_ops.points_count_rbbox(
                points, gt_dict["gt_boxes"])
            mask = point_counts >= min_points_in_gt
            _dict_select(gt_dict, mask)

    # [352, 400]
    t1 = time.time()
    if not multi_gpu:
        res = voxel_generator.generate(points, max_voxels)
        voxels = res["voxels"]
        coordinates = res["coordinates"]
        num_points = res["num_points_per_voxel"]
        num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
    else:
        res = voxel_generator.generate_multi_gpu(points, max_voxels)
        voxels = res["voxels"]
        coordinates = res["coordinates"]
        num_points = res["num_points_per_voxel"]
        num_voxels = np.array([res["voxel_num"]], dtype=np.int64)
    metrics["voxel_gene_time"] = time.time() - t1
    example = {
        'voxels': voxels,
        # 'visibility': visibility,
        'logodds': logodds,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": num_voxels,
        "metrics": metrics,
    }
    if calib is not None:
        example["calib"] = calib
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    # print(f'feature_map_size in prep_pointcloud(): {feature_map_size}')
    if anchor_cache is not None:
        # print('having anchor cache')
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        anchors_dict = anchor_cache["anchors_dict"]
        matched_thresholds = anchor_cache["matched_thresholds"]
        unmatched_thresholds = anchor_cache["unmatched_thresholds"]

    else:
        # print('NOT having anchor cache')
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, target_assigner.box_ndim])
        anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:,
                                                             [0, 1, 3, 4, 6]])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
    # print(f'anchors.shape: {anchors.shape}')

    example["anchors"] = anchors
    anchors_mask = None
    if anchor_area_threshold >= 0:
        # slow with high resolution. recommend disable this forever.
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0)
        dense_voxel_map = dense_voxel_map.cumsum(1)
        anchors_area = box_np_ops.fused_get_anchors_area(
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold
        # example['anchors_mask'] = anchors_mask.astype(np.uint8)
        example['anchors_mask'] = anchors_mask
    # print("prep time", time.time() - t)
    metrics["prep_time"] = time.time() - t
    if not training:
        return example
    example["gt_names"] = gt_dict["gt_names"]
    # voxel_labels = box_np_ops.assign_label_to_voxel(gt_boxes, coordinates,
    #                                                 voxel_size, coors_range)
    if create_targets:
        t1 = time.time()
        targets_dict = target_assigner.assign(
            anchors,
            anchors_dict,
            gt_dict["gt_boxes"],
            anchors_mask,
            gt_classes=gt_dict["gt_classes"],
            gt_names=gt_dict["gt_names"],
            matched_thresholds=matched_thresholds,
            unmatched_thresholds=unmatched_thresholds,
            importance=gt_dict["gt_importance"])
        """
        boxes_lidar = gt_dict["gt_boxes"]
        bev_map = simplevis.nuscene_vis(points, boxes_lidar, gt_dict["gt_names"])
        assigned_anchors = anchors[targets_dict['labels'] > 0]
        ignored_anchors = anchors[targets_dict['labels'] == -1]
        bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], ignored_anchors, [128, 128, 128], 2)
        bev_map = simplevis.draw_box_in_bev(bev_map, [-50, -50, 3, 50, 50, 1], assigned_anchors, [255, 0, 0])
        cv2.imshow('anchors', bev_map)
        cv2.waitKey(0)

        boxes_lidar = gt_dict["gt_boxes"]
        pp_map = np.zeros(grid_size[:2], dtype=np.float32)
        voxels_max = np.max(voxels[:, :, 2], axis=1, keepdims=False)
        voxels_min = np.min(voxels[:, :, 2], axis=1, keepdims=False)
        voxels_height = voxels_max - voxels_min
        voxels_height = np.minimum(voxels_height, 4)
        # sns.distplot(voxels_height)
        # plt.show()
        pp_map[coordinates[:, 1], coordinates[:, 2]] = voxels_height / 4
        pp_map = (pp_map * 255).astype(np.uint8)
        pp_map = cv2.cvtColor(pp_map, cv2.COLOR_GRAY2RGB)
        pp_map = simplevis.draw_box_in_bev(pp_map, [-50, -50, 3, 50, 50, 1], boxes_lidar, [128, 0, 128], 1)
        cv2.imshow('heights', pp_map)
        cv2.waitKey(0)
        """
        example.update({
            'labels': targets_dict['labels'],
            'reg_targets': targets_dict['bbox_targets'],
            # 'reg_weights': targets_dict['bbox_outside_weights'],
            'importance': targets_dict['importance'],
        })
    return example
示例#15
0
def prep_pointcloud(input_dict,
                    root_path,
                    voxel_generator,
                    target_assigner,
                    db_sampler=None,
                    max_voxels=70000,
                    class_names=['Car'],
                    remove_outside_points=False,
                    training=True,
                    create_targets=True,
                    shuffle_points=False,
                    reduce_valid_area=False,
                    remove_unknown=False,
                    gt_rotation_noise=[-np.pi / 3, np.pi / 3],
                    gt_loc_noise_std=[1.0, 1.0, 1.0],
                    global_rotation_noise=[-np.pi / 4, np.pi / 4],
                    global_scaling_noise=[0.95, 1.05],
                    global_loc_noise_std=(0.2, 0.2, 0.2),
                    global_random_rot_range=[0.78, 2.35],
                    generate_bev=False,
                    without_reflectivity=False,
                    num_point_features=4,
                    anchor_area_threshold=1,
                    gt_points_drop=0.0,
                    gt_drop_max_keep=10,
                    remove_points_after_sample=True,
                    anchor_cache=None,
                    remove_environment=False,
                    random_crop=False,
                    reference_detections=None,
                    add_rgb_to_points=False,
                    lidar_input=False,
                    unlabeled_db_sampler=None,
                    out_size_factor=2,
                    min_gt_point_dict=None,
                    bev_only=False,
                    use_group_id=False,
                    out_dtype=np.float32):
    """convert point cloud to voxels, create targets if ground truths
    exists.
    """
    points = input_dict["lidar"]["points"]
    if training:
        gt_boxes = input_dict['lidar']['annotations']["gt_boxes"]
        gt_names = input_dict['lidar']['annotations']["gt_names"]
        # difficulty = input_dict["difficulty"]
        group_ids = None
        if use_group_id and "group_ids" in input_dict:
            group_ids = input_dict["group_ids"]
    # rect = input_dict["rect"]
    # Trv2c = input_dict["Trv2c"]
    # P2 = input_dict["P2"]
    unlabeled_training = unlabeled_db_sampler is not None

    calib = None
    # print(gt_dict)
    # print("+++++++++++++++1111111+++++++++++++++")
    if "calib" in input_dict:
        calib = input_dict["calib"]

    if remove_environment is True and training:
        selected = kitti.keep_arrays_by_name(gt_names, class_names)
        gt_boxes = gt_boxes[selected]
        gt_names = gt_names[selected]
        # difficulty = difficulty[selected]
        if group_ids is not None:
            group_ids = group_ids[selected]
        points = prep.remove_points_outside_boxes(points, gt_boxes)
    if training:
        # print(gt_names)
        selected = kitti.drop_arrays_by_name(gt_names, ["DontCare"])
        gt_boxes = gt_boxes[selected]
        gt_names = gt_names[selected]
        # difficulty = difficulty[selected]
        if group_ids is not None:
            group_ids = group_ids[selected]

        # if remove_unknown:
        #     remove_mask = difficulty == -1
        #     """
        #     gt_boxes_remove = gt_boxes[remove_mask]
        #     gt_boxes_remove[:, 3:6] += 0.25
        #     points = prep.remove_points_in_boxes(points, gt_boxes_remove)
        #     """
        #     keep_mask = np.logical_not(remove_mask)
        #     gt_boxes = gt_boxes[keep_mask]
        #     gt_names = gt_names[keep_mask]
        #     difficulty = difficulty[keep_mask]
        #     if group_ids is not None:
        #         group_ids = group_ids[keep_mask]
        gt_boxes_mask = np.array(
            [n in class_names for n in gt_names], dtype=np.bool_)
        if db_sampler is not None:
            sampled_dict = db_sampler.sample_all(
                root_path,
                gt_boxes,
                gt_names,
                num_point_features,
                random_crop,
                gt_group_ids=group_ids
            )

            if sampled_dict is not None:
                sampled_gt_names = sampled_dict["gt_names"]
                sampled_gt_boxes = sampled_dict["gt_boxes"]
                sampled_points = sampled_dict["points"]
                sampled_gt_masks = sampled_dict["gt_masks"]
                # gt_names = gt_names[gt_boxes_mask].tolist()
                gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
                # gt_names += [s["name"] for s in sampled]
                gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes])
                gt_boxes_mask = np.concatenate(
                    [gt_boxes_mask, sampled_gt_masks], axis=0)
                if group_ids is not None:
                    sampled_group_ids = sampled_dict["group_ids"]
                    group_ids = np.concatenate([group_ids, sampled_group_ids])

                if remove_points_after_sample:
                    points = prep.remove_points_in_boxes(
                        points, sampled_gt_boxes)

                points = np.concatenate([sampled_points, points], axis=0)
        # unlabeled_mask = np.zeros((gt_boxes.shape[0], ), dtype=np.bool_)
        if without_reflectivity:
            used_point_axes = list(range(num_point_features))
            used_point_axes.pop(3)
            points = points[:, used_point_axes]
        pc_range = voxel_generator.point_cloud_range
        if bev_only:  # set z and h to limits
            gt_boxes[:, 2] = pc_range[2]
            gt_boxes[:, 5] = pc_range[5] - pc_range[2]
        prep.noise_per_object_v3_(
            gt_boxes,
            points,
            gt_boxes_mask,
            rotation_perturb=gt_rotation_noise,
            center_noise_std=gt_loc_noise_std,
            global_random_rot_range=global_random_rot_range,
            group_ids=group_ids,
            num_try=100)
        # should remove unrelated objects after noise per object
        gt_boxes = gt_boxes[gt_boxes_mask]
        gt_names = gt_names[gt_boxes_mask]
        if group_ids is not None:
            group_ids = group_ids[gt_boxes_mask]
        gt_classes = np.array(
            [class_names.index(n) + 1 for n in gt_names], dtype=np.int32)

        gt_boxes, points = prep.random_flip(gt_boxes, points)
        gt_boxes, points = prep.global_rotation(
            gt_boxes, points, rotation=global_rotation_noise)
        gt_boxes, points = prep.global_scaling_v2(gt_boxes, points,
                                                  *global_scaling_noise)

        # Global translation
        gt_boxes, points = prep.global_translate(gt_boxes, points, global_loc_noise_std)

        bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
        mask = prep.filter_gt_box_outside_range(gt_boxes, bv_range)
        gt_boxes = gt_boxes[mask]
        gt_classes = gt_classes[mask]
        if group_ids is not None:
            group_ids = group_ids[mask]

        # limit rad to [-pi, pi]
        gt_boxes[:, 6] = box_np_ops.limit_period(
            gt_boxes[:, 6], offset=0.5, period=2 * np.pi)

    if shuffle_points:
        # shuffle is a little slow.
        np.random.shuffle(points)

    # [0, -40, -3, 70.4, 40, 1]
    voxel_size = voxel_generator.voxel_size
    pc_range = voxel_generator.point_cloud_range
    grid_size = voxel_generator.grid_size
    # [352, 400]

    # max_voxels: maximum number of voxels
    voxels, coordinates, num_points = voxel_generator.generate(
        points, max_voxels)

    example = {
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        "num_voxels": np.array([voxels.shape[0]], dtype=np.int64),
    }

    # if not lidar_input:
    feature_map_size = grid_size[:2] // out_size_factor
    feature_map_size = [*feature_map_size, 1][::-1]
    if anchor_cache is not None:
        anchors = anchor_cache["anchors"]
        anchors_bv = anchor_cache["anchors_bv"]
        matched_thresholds = anchor_cache["matched_thresholds"]
        unmatched_thresholds = anchor_cache["unmatched_thresholds"]
    else:
        ret = target_assigner.generate_anchors(feature_map_size)
        anchors = ret["anchors"]
        anchors = anchors.reshape([-1, 7])
        matched_thresholds = ret["matched_thresholds"]
        unmatched_thresholds = ret["unmatched_thresholds"]
        anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
            anchors[:, [0, 1, 3, 4, 6]])
    example["anchors"] = anchors
    # print("debug", anchors.shape, matched_thresholds.shape)
    # anchors_bv = anchors_bv.reshape([-1, 4])
    anchors_mask = None
    if anchor_area_threshold >= 0:
        coors = coordinates
        dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
            coors, tuple(grid_size[::-1][1:]))
        dense_voxel_map = dense_voxel_map.cumsum(0)
        dense_voxel_map = dense_voxel_map.cumsum(1)
        anchors_area = box_np_ops.fused_get_anchors_area(
            dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
        anchors_mask = anchors_area > anchor_area_threshold
        # example['anchors_mask'] = anchors_mask.astype(np.uint8)
        example['anchors_mask'] = anchors_mask
    if generate_bev:
        bev_vxsize = voxel_size.copy()
        bev_vxsize[:2] /= 2
        bev_vxsize[2] *= 2
        bev_map = points_to_bev(points, bev_vxsize, pc_range,
                                without_reflectivity)
        example["bev_map"] = bev_map
    if not training:
        return example
    if create_targets:
        targets_dict = target_assigner.assign(
            anchors,
            gt_boxes,
            anchors_mask,
            gt_classes=gt_classes,
            matched_thresholds=matched_thresholds,
            unmatched_thresholds=unmatched_thresholds)
        example.update({
            'labels': targets_dict['labels'],
            'reg_targets': targets_dict['bbox_targets'],
            'reg_weights': targets_dict['bbox_outside_weights'],
        })
    example["points"]= input_dict["lidar"]["points"]
    example['gt_boxes'] = input_dict['lidar']['annotations']["gt_boxes"]
    example['gt_names'] = input_dict['lidar']['annotations']["gt_names"]
    return example