コード例 #1
0
    def remove_occluded_points(self, bg_points, gt_boxes, fg_points_list,
                               sampled_boxes, sampled_points_list):
        bg_points_spherical = box_np_ops.convert_to_spherical_coord(bg_points)
        boxes = np.concatenate((gt_boxes, sampled_boxes), axis=0)
        boxes_points = fg_points_list + sampled_points_list
        boxes_points_spherical = box_np_ops.convert_to_spherical_coord_list(
            boxes_points)
        ascending_idx = sorted(range(len(boxes)), key=lambda x: boxes[x, 0])
        locs = boxes[:, 0:3]
        dims = boxes[:, 3:6]
        angles = boxes[:, 6]
        camera_box_origin = [0.5, 0.5, 0]
        box_corners = box_np_ops.center_to_corner_box3d(locs,
                                                        dims,
                                                        angles,
                                                        camera_box_origin,
                                                        axis=2)
        box_corners_spherical = box_np_ops.convert_to_spherical_coord(
            box_corners)

        scale_factor = 100
        box_corners_spherical *= scale_factor
        boxes_points_spherical = [
            x * scale_factor for x in boxes_points_spherical
        ]
        bg_points_spherical *= scale_factor

        exclude_idx = []
        for idx in ascending_idx[:-1]:
            exclude_idx.append(idx)
            for i, box_points in enumerate(boxes_points_spherical):
                if i in exclude_idx:
                    continue
                # remove_mask = in_hull(box_points[:, :2], ConvexHull(box_corners_spherical[idx][:, :2], qhull_options='QJ').points)
                remove_mask = in_hull(box_points[:, :2],
                                      box_corners_spherical[idx][:, :2])
                boxes_points_spherical[i] = box_points[np.logical_not(
                    remove_mask)]

        remained_boxes_idx = np.zeros((boxes.shape[0]), dtype=bool)
        for i, box_points in reversed(list(enumerate(boxes_points_spherical))):
            if i < len(fg_points_list):
                if box_points.shape[0] > 8:
                    remained_boxes_idx[i] = True
                    bg_points_spherical = np.concatenate(
                        [bg_points_spherical, box_points], axis=0)
            else:
                if box_points.shape[0] > 8:
                    remained_boxes_idx[i] = True
                    # remove_mask = in_hull(bg_points_spherical[:, :2], ConvexHull(box_points[:, :2], qhull_options='QJ').points)
                    remove_mask = in_hull(bg_points_spherical[:, :2],
                                          box_points[:, :2])
                    bg_points_spherical = bg_points_spherical[np.logical_not(
                        remove_mask)]
                    bg_points_spherical = np.concatenate(
                        [bg_points_spherical, box_points], axis=0)

        bg_points_spherical /= scale_factor
        points = box_np_ops.convert_to_cartesian_coord(bg_points_spherical)
        return points, remained_boxes_idx
コード例 #2
0
def compute_3dbox_corners(box):
    centers = box[:3]
    dims = box[3:6]
    angles = np.array([box[6]])
    corners_3d = box_np_ops.center_to_corner_box3d(centers.reshape([1, 3]),
                                                   dims.reshape([1, 3]),
                                                   angles,
                                                   origin=(0.5, 0.5, 0.5),
                                                   axis=2)
    result = np.squeeze(corners_3d)
    # print(result)
    # return result
    return np.transpose(result)
コード例 #3
0
def box3d_t_2d(box3d, P2):
  from second.core.box_np_ops import center_to_corner_box3d, project_to_image
  locs = box3d[:, :3]
  dims = box3d[:, 3:6]
  angles = box3d[:,6]
  camera_box_origin = [0.5,1.0,0.5]
  box_corners_3d =  center_to_corner_box3d(
                    locs, dims, angles, camera_box_origin, axis=1)
  box_corners_in_image = project_to_image(box_corners_3d, P2)

  minxy = np.min(box_corners_in_image, axis=1)
  maxxy = np.max(box_corners_in_image, axis=1)
  box_2d_preds = np.concatenate([minxy, maxxy], axis=1)
  return box_2d_preds
コード例 #4
0
ファイル: common.py プロジェクト: emdata-ailab/LPCC-Net
def box3d_to_bbox(box3d, rect, Trv2c, P2):
    box3d_lidar = box3d.copy()
    box3d_lidar[:, 2] -= box3d_lidar[:, 5] / 2
    box3d_camera = box_lidar_to_camera(box3d_lidar, rect, Trv2c)
    box_corners = center_to_corner_box3d(box3d_camera[:, :3],
                                         box3d_camera[:, 3:6],
                                         box3d_camera[:, 6], [0.5, 1.0, 0.5],
                                         axis=1)
    box_corners_in_image = project_to_image(box_corners, P2)
    # box_corners_in_image: [N, 8, 2]
    minxy = np.min(box_corners_in_image, axis=1)
    maxxy = np.max(box_corners_in_image, axis=1)
    bbox = np.concatenate([minxy, maxxy], axis=1)
    return bbox
コード例 #5
0
ファイル: f110_viewer.py プロジェクト: zhuhaijun753/f110-avp
    def draw_detection(self, detection_anno, label_color=GLColor.Blue):
        dt_box_color = self.w_config.get("DTBoxColor")[:3]
        dt_box_color = (*dt_box_color, self.w_config.get("DTBoxAlpha"))

        dt_box_lidar = np.array(
            [detection_anno["box3d_lidar"].detach().cpu().numpy()])[0]
        scores = np.array([detection_anno["scores"].detach().cpu().numpy()])[0]

        # filter by score
        keep_list = np.where(scores > 0.2)[0]
        dt_box_lidar = dt_box_lidar[keep_list, :]
        scores = scores[keep_list]

        dt_boxes_corners = box_np_ops.center_to_corner_box3d(
            dt_box_lidar[:, :3],
            dt_box_lidar[:, 3:6],
            dt_box_lidar[:, 6],
            origin=[0.5, 0.5, 0],
            axis=2)

        # filter bbox by its center
        centers = (dt_boxes_corners[:, 0, :] + dt_boxes_corners[:, 6, :]) / 2
        keep_list = np.where((centers[:, 0] < self.points_range[0]) & (centers[:, 0] > self.points_range[3]) & \
                             (centers[:, 1] < self.points_range[1]) & (centers[:, 1] > self.points_range[4]) & \
                             (centers[:, 2] < self.points_range[2]) & (centers[:, 2] > self.points_range[5]))[0]
        dt_boxes_corners = dt_boxes_corners[keep_list, :, :]
        dt_box_lidar = dt_box_lidar[keep_list, :]
        scores = scores[keep_list]

        num_dt = dt_box_lidar.shape[0]
        self.info('num_dt', num_dt)

        if num_dt != 0:
            for ind in range(num_dt):
                self.info('scores', scores[ind])
                self.info('dt_box_lidar', dt_box_lidar[ind])
            dt_box_color = np.tile(
                np.array(dt_box_color)[np.newaxis, ...], [num_dt, 1])
            scores_rank = scores / scores[0]
            # if self.w_config.get("DTScoreAsAlpha") and scores is not None:
            # dt_box_color = np.concatenate([dt_box_color[:, :3], scores[..., np.newaxis]], axis=1)
            dt_box_color = np.concatenate(
                [dt_box_color[:, :3], scores_rank[..., np.newaxis]], axis=1)
            # dt_box_color = np.concatenate([dt_box_color[:, :3], np.ones((scores[..., np.newaxis].shape))], axis=1)
            self.w_pc_viewer.boxes3d("dt_boxes", dt_boxes_corners,
                                     dt_box_color,
                                     self.w_config.get("DTBoxLineWidth"), 1.0)
コード例 #6
0
ファイル: visualize.py プロジェクト: surabhi96/pointpillars
def kitti_anno_to_corners(info, annos=None):
    rect = info['calib/R0_rect']
    P2 = info['calib/P2']
    Tr_velo_to_cam = info['calib/Tr_velo_to_cam']
    if annos is None:
        annos = info['annos']
    dims = annos['dimensions']
    loc = annos['location']
    rots = annos['rotation_y']
    scores = None
    if 'score' in annos:
        scores = annos['score']
    boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1)
    boxes_lidar = box_np_ops.box_camera_to_lidar(boxes_camera, rect,
                                                 Tr_velo_to_cam)
    boxes_corners = box_np_ops.center_to_corner_box3d(boxes_lidar[:, :3],
                                                      boxes_lidar[:, 3:6],
                                                      boxes_lidar[:, 6],
                                                      origin=[0.5, 0.5, 0],
                                                      axis=2)
    return boxes_corners, scores, boxes_lidar
コード例 #7
0
def noise_per_object_v2_(gt_boxes,
                         points=None,
                         valid_mask=None,
                         rotation_perturb=np.pi / 4,
                         center_noise_std=1.0,
                         global_random_rot_range=np.pi / 4,
                         num_try=100):
    """random rotate or remove each groundtrutn independently.
    use kitti viewer to test this function points_transform_

    Args:
        gt_boxes: [N, 7], gt box in lidar.points_transform_
        points: [M, 4], point cloud in lidar.
    """
    num_boxes = gt_boxes.shape[0]
    if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
        rotation_perturb = [-rotation_perturb, rotation_perturb]
    if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
        global_random_rot_range = [
            -global_random_rot_range, global_random_rot_range
        ]

    if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
        center_noise_std = [
            center_noise_std, center_noise_std, center_noise_std
        ]
    if valid_mask is None:
        valid_mask = np.ones((num_boxes, ), dtype=np.bool_)
    center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
    loc_noises = np.random.normal(scale=center_noise_std,
                                  size=[num_boxes, num_try, 3])
    # loc_noises = np.random.uniform(
    #     -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
    rot_noises = np.random.uniform(rotation_perturb[0],
                                   rotation_perturb[1],
                                   size=[num_boxes, num_try])
    gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
    grot_lowers = global_random_rot_range[0] - gt_grots
    grot_uppers = global_random_rot_range[1] - gt_grots
    global_rot_noises = np.random.uniform(grot_lowers[..., np.newaxis],
                                          grot_uppers[..., np.newaxis],
                                          size=[num_boxes, num_try])

    origin = [0.5, 0.5, 0]
    gt_box_corners = box_np_ops.center_to_corner_box3d(gt_boxes[:, :3],
                                                       gt_boxes[:, 3:6],
                                                       gt_boxes[:, 6],
                                                       origin=origin,
                                                       axis=2)
    if np.abs(global_random_rot_range[0] - global_random_rot_range[1]) < 1e-3:
        selected_noise = noise_per_box(gt_boxes[:, [0, 1, 3, 4, 6]],
                                       valid_mask, loc_noises, rot_noises)
    else:
        selected_noise = noise_per_box_v2_(gt_boxes[:, [0, 1, 3, 4, 6]],
                                           valid_mask, loc_noises, rot_noises,
                                           global_rot_noises)
    loc_transforms = _select_transform(loc_noises, selected_noise)
    rot_transforms = _select_transform(rot_noises, selected_noise)
    if points is not None:
        surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
        point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
        points_transform_(points, gt_boxes[:, :3], point_masks, loc_transforms,
                          rot_transforms, valid_mask)

    box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
コード例 #8
0
    def convert_detection_to_kitti_annos(self, detection):
        class_names = self._class_names
        det_image_idxes = [det["metadata"]["image_idx"] for det in detection]
        gt_image_idxes = [
            info["image"]["image_idx"] for info in self._kitti_infos
        ]
        annos = []
        for i in range(len(detection)):
            det_idx = det_image_idxes[i]
            det = detection[i]
            # info = self._kitti_infos[gt_image_idxes.index(det_idx)]
            info = self._kitti_infos[i]
            calib = info["calib"]
            rect = calib["R0_rect"]
            Trv2c = calib["Tr_velo_to_cam"]
            P2 = calib["P2"]
            final_box_preds = det["box3d_lidar"].detach().cpu().numpy()
            label_preds = det["label_preds"].detach().cpu().numpy()
            scores = det["scores"].detach().cpu().numpy()
            if final_box_preds.shape[0] != 0:
                final_box_preds[:, 2] -= final_box_preds[:, 5] / 2
                box3d_camera = box_np_ops.box_lidar_to_camera(
                    final_box_preds, rect, Trv2c)
                locs = box3d_camera[:, :3]
                dims = box3d_camera[:, 3:6]
                angles = box3d_camera[:, 6]
                camera_box_origin = [0.5, 1.0, 0.5]
                box_corners = box_np_ops.center_to_corner_box3d(
                    locs, dims, angles, camera_box_origin, axis=1)
                box_corners_in_image = box_np_ops.project_to_image(
                    box_corners, P2)
                # box_corners_in_image: [N, 8, 2]
                minxy = np.min(box_corners_in_image, axis=1)
                maxxy = np.max(box_corners_in_image, axis=1)
                bbox = np.concatenate([minxy, maxxy], axis=1)
            anno = kitti.get_start_result_anno()
            num_example = 0
            box3d_lidar = final_box_preds
            for j in range(box3d_lidar.shape[0]):
                image_shape = info["image"]["image_shape"]
                if bbox[j, 0] > image_shape[1] or bbox[j, 1] > image_shape[0]:
                    continue
                if bbox[j, 2] < 0 or bbox[j, 3] < 0:
                    continue
                bbox[j, 2:] = np.minimum(bbox[j, 2:], image_shape[::-1])
                bbox[j, :2] = np.maximum(bbox[j, :2], [0, 0])
                anno["bbox"].append(bbox[j])
                # convert center format to kitti format
                # box3d_lidar[j, 2] -= box3d_lidar[j, 5] / 2
                anno["alpha"].append(
                    -np.arctan2(-box3d_lidar[j, 1], box3d_lidar[j, 0]) +
                    box3d_camera[j, 6])
                anno["dimensions"].append(box3d_camera[j, 3:6])
                anno["location"].append(box3d_camera[j, :3])
                anno["rotation_y"].append(box3d_camera[j, 6])

                anno["name"].append(class_names[int(label_preds[j])])
                anno["truncated"].append(0.0)
                anno["occluded"].append(0)
                anno["score"].append(scores[j])

                num_example += 1
            if num_example != 0:
                anno = {n: np.stack(v) for n, v in anno.items()}
                annos.append(anno)
            else:
                annos.append(kitti.empty_result_anno())
            num_example = annos[-1]["name"].shape[0]
            annos[-1]["metadata"] = det["metadata"]
        return annos
コード例 #9
0
def noise_per_object_v3_(gt_boxes,
                         points=None,
                         valid_mask=None,
                         rotation_perturb=np.pi / 4,
                         center_noise_std=1.0,
                         global_random_rot_range=np.pi / 4,
                         num_try=100,
                         group_ids=None):
    """random rotate or remove each groundtruth independently.
    use kitti viewer to test this function points_transform_

    Args:
        gt_boxes: [N, 7], gt box in lidar.points_transform_
        points: [M, 4], point cloud in lidar.
    """
    num_boxes = gt_boxes.shape[0]
    if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
        rotation_perturb = [-rotation_perturb, rotation_perturb]
    if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
        global_random_rot_range = [
            -global_random_rot_range, global_random_rot_range
        ]
    enable_grot = np.abs(global_random_rot_range[0] -  # False
                         global_random_rot_range[1]) >= 1e-3
    if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
        center_noise_std = [
            center_noise_std, center_noise_std, center_noise_std
        ]
    if valid_mask is None:
        valid_mask = np.ones((num_boxes, ), dtype=np.bool_)
    center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
    loc_noises = np.random.normal(  # 定位扰动服从正态分布,每个对象有num_try个扰动数据,每个数据包含三个方向
        scale=center_noise_std,
        size=[num_boxes, num_try, 3])
    # loc_noises = np.random.uniform(
    #     -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
    rot_noises = np.random.uniform(rotation_perturb[0],
                                   rotation_perturb[1],
                                   size=[num_boxes, num_try
                                         ])  # 在范围内均匀分布,每个对象都有num_try个旋转角扰动
    gt_grots = np.arctan2(gt_boxes[:, 0],
                          gt_boxes[:, 1])  # 根据中心点激光雷达下前后与左右坐标正切值,求绝对转角
    grot_lowers = global_random_rot_range[0] - gt_grots
    grot_uppers = global_random_rot_range[1] - gt_grots
    global_rot_noises = np.random.uniform(  # 叠加全局旋转角扰动后的绝对转角扰动,由于全局转角扰动范围为[0.0,0.0],单个对象绝对转角扰动保持不变
        grot_lowers[..., np.newaxis],
        grot_uppers[..., np.newaxis],
        size=[num_boxes, num_try])
    if group_ids is not None:
        if enable_grot:
            set_group_noise_same_v2_(loc_noises, rot_noises, global_rot_noises,
                                     group_ids)
        else:
            set_group_noise_same_(loc_noises, rot_noises, group_ids)
        group_centers, group_id_num_dict = get_group_center(
            gt_boxes[:, :3], group_ids)
        if enable_grot:
            group_transform_v2_(loc_noises, rot_noises, gt_boxes[:, :3],
                                gt_boxes[:, 6], group_centers,
                                global_rot_noises, valid_mask)
        else:
            group_transform_(loc_noises, rot_noises, gt_boxes[:, :3],
                             gt_boxes[:, 6], group_centers, valid_mask)
        group_nums = np.array(list(group_id_num_dict.values()), dtype=np.int64)

    origin = [0.5, 0.5, 0]  # 激光雷达坐标系下原点
    # 真值框信息转换为8个角点[N,8,3](左前下,左前上,左后上,左后下,右前下,右前上,右后上,右后下)
    gt_box_corners = box_np_ops.center_to_corner_box3d(gt_boxes[:, :3],
                                                       gt_boxes[:, 3:6],
                                                       gt_boxes[:, 6],
                                                       origin=origin,
                                                       axis=2)
    if group_ids is not None:  # None
        if not enable_grot:
            selected_noise = noise_per_box_group(gt_boxes[:, [0, 1, 3, 4, 6]],
                                                 valid_mask, loc_noises,
                                                 rot_noises, group_nums)
        else:
            selected_noise = noise_per_box_group_v2_(
                gt_boxes[:, [0, 1, 3, 4, 6]], valid_mask, loc_noises,
                rot_noises, group_nums, global_rot_noises)
    else:
        if not enable_grot:  # True
            selected_noise = noise_per_box(gt_boxes[:, [0, 1, 3, 4, 6]],
                                           valid_mask, loc_noises,
                                           rot_noises)  # 每一个真值不会碰撞的定位和旋转扰动索引
        else:
            selected_noise = noise_per_box_v2_(gt_boxes[:, [0, 1, 3, 4, 6]],
                                               valid_mask, loc_noises,
                                               rot_noises, global_rot_noises)
    loc_transforms = _select_transform(loc_noises, selected_noise)  # 根据索引选择扰动
    rot_transforms = _select_transform(rot_noises, selected_noise)
    surfaces = box_np_ops.corner_to_surfaces_3d_jit(
        gt_box_corners)  # [N,6,4,3],6个面,每个面4个点
    if points is not None:
        point_masks = points_in_convex_polygon_3d_jit(
            points[:, :3], surfaces)  # 判断此帧点云的点是否在表面内
        points_transform_(
            points,
            gt_boxes[:, :3],
            point_masks,
            loc_transforms,  # 对添加了扰动的真值框内的点云坐标进行修改
            rot_transforms,
            valid_mask)

    box3d_transform_(gt_boxes, loc_transforms, rot_transforms,
                     valid_mask)  # 对添加了扰动的真值框坐标进行修改
コード例 #10
0
    # #####*****  加载车道bmp  *****#####
    bmp_path = r'/data/station_2_lane.bmp'
    img_cv2 = cv2.imread(bmp_path)
    imgg = cv2.cvtColor(img_cv2, cv2.COLOR_BGR2RGB)
    bmp_img = cache_bmp(imgg)

    for i in range(100):
        box_file = box_adr + str(my_count) + '.npy'
        lidar_file = lidar_adr + str(my_count) + '.npy'
        my_count += 1
        box = np.load(box_file)
        point = np.load(lidar_file)

        corners = box_np_ops.center_to_corner_box3d(box[:, :3],
                                                    box[:, 3:6],
                                                    box[:, 6],
                                                    origin=[0.5, 0.5, 0.5],
                                                    axis=2)
        start_time = time.time()
        # new_box = fix_cluster(box, bmp_img)
        new_box = fix_cluster(box)
        print('the running time is : ', (time.time() - start_time) * 10)
        new_box[:, 1] += 1
        new_corners = box_np_ops.center_to_corner_box3d(new_box[:, :3],
                                                        new_box[:, 3:6],
                                                        new_box[:, 6],
                                                        origin=[0.5, 0.5, 0.5],
                                                        axis=2)
        line_box = np.array([[0, 1], [1, 2], [0, 3], [2, 3], [4, 5], [4, 7],
                             [5, 6], [6, 7], [0, 4], [1, 5], [2, 6], [3, 7]])
コード例 #11
0
def PointPillarsInference(inference_ctx, points, points_range, scale_up,
                          shift_x, shift_z):
    if points.shape[0] < 100:
        return None
    inputs = inference_ctx.get_inference_input_dict(points)
    with inference_ctx.ctx():
        predictions_dicts = inference_ctx.inference(inputs)
    # print(predictions_dicts)
    detection_anno = predictions_dicts[0]
    if detection_anno["box3d_lidar"] is None:
        return None
    dt_box_lidar = np.array(
        [detection_anno["box3d_lidar"].detach().cpu().numpy()])[0]
    scores = np.array([detection_anno["scores"].detach().cpu().numpy()])[0]
    # print(scores)

    # filter by score
    keep_list = np.where(scores > 0.4)[0]
    dt_box_lidar = dt_box_lidar[keep_list, :]
    scores = scores[keep_list]
    dt_box_lidar[:, :6] /= scale_up
    dt_box_lidar[:, 1] += shift_x
    dt_box_lidar[:, 2] -= -0.12

    dt_box_lidar_big = dt_box_lidar.copy()
    # dt_box_lidar_big[:, 4] *= 1.1 # length
    dt_box_lidar_big[:, 3] *= 1.1  # width
    # dt_box_lidar[:, 4] *= 1.1 # length
    dt_box_lidar[:, 3] *= 1.2  # width

    dt_boxes_corners = box_np_ops.center_to_corner_box3d(dt_box_lidar[:, :3],
                                                         dt_box_lidar[:, 3:6],
                                                         dt_box_lidar[:, 6],
                                                         origin=[0.5, 0.5, 0],
                                                         axis=2)

    dt_boxes_corners_big = box_np_ops.center_to_corner_box3d(
        dt_box_lidar_big[:, :3],
        dt_box_lidar_big[:, 3:6],
        dt_box_lidar_big[:, 6],
        origin=[0.5, 0.5, 0],
        axis=2)

    # filter bbox by its center
    centers = dt_box_lidar[:, :3]
    keep_list = np.where((centers[:, 0] < points_range[0]) & (centers[:, 0] > points_range[3]) & \
                            (centers[:, 1] < points_range[1]) & (centers[:, 1] > points_range[4]))[0]
    dt_boxes_corners = dt_boxes_corners[keep_list, :, :]
    dt_boxes_corners_big = dt_boxes_corners_big[keep_list, :]
    # dt_box_lidar = dt_box_lidar[keep_list]
    scores = scores[keep_list]

    num_dt = dt_boxes_corners.shape[0]

    if num_dt == 0:
        print('miss')
        return None
    boxes_select = np.zeros((18, 3))
    boxes_select[0:8, :] = dt_boxes_corners[0, :, :]  # tight bbox
    boxes_select[8:16, :] = dt_boxes_corners_big[0, :, :]  # big bbox
    boxes_select[16, :] = dt_box_lidar[keep_list[0], :3]  # center position
    boxes_select[17, 0] = dt_box_lidar[keep_list[0], 6]  # orientation
    print('scores: ', scores, 'angles: ', dt_box_lidar[keep_list, 6])
    # print(dt_box_lidar[keep_list[0], 6])

    return boxes_select
コード例 #12
0
def main():
    np.set_printoptions(threshold=np.inf)
    # pc_file = '/home/lucerna/MEGA/project/AVP/2427439726050.npy'
    # pc = np.transpose(np.load(pc_file))

    context_cloud = zmq.Context()
    socket_cloud = context_cloud.socket(zmq.SUB)
    socket_cloud.setsockopt(zmq.SUBSCRIBE, b"")
    socket_cloud.setsockopt(zmq.RCVHWM, 1)
    socket_cloud.connect("tcp://localhost:5556")
    print("Collecting point clouds...")

    context_result = zmq.Context()
    socket_result = context_result.socket(zmq.SUB)
    socket_result.setsockopt(zmq.SUBSCRIBE, b"")
    socket_result.setsockopt(zmq.RCVHWM, 1)
    socket_result.connect("tcp://localhost:5560")
    print("Collecting inference...")

    context_odom = zmq.Context()
    socket_odom = context_odom.socket(zmq.SUB)
    socket_odom.setsockopt(zmq.SUBSCRIBE, b"")
    socket_odom.setsockopt(zmq.RCVHWM, 1)
    print("Collecting odom info...")
    socket_odom.connect("tcp://192.168.1.2:5559")

    context_box = zmq.Context()
    socket_box = context_box.socket(zmq.PUB)
    socket_box.setsockopt(zmq.SNDHWM, 1)
    socket_box.bind("tcp://*:5557")
    print('Sending bbox')

    context_grid = zmq.Context()
    socket_grid = context_grid.socket(zmq.PUB)
    socket_grid.setsockopt(zmq.SNDHWM, 1)
    socket_grid.bind("tcp://*:5558")
    print('Sending occupancy grid')

    x_clip = np.array([0, 1.5])
    y_clip = np.array([-1.5, 1.5])
    z_clip = 0.14
    grid_res = 100
    object_res = 50

    # create occupancy grid
    dim_x = int((x_clip[1] - x_clip[0]) * grid_res)
    dim_y = int((y_clip[1] - y_clip[0]) * grid_res)
    dim_x_object = int((x_clip[1] - x_clip[0]) * object_res)
    dim_y_object = int((y_clip[1] - y_clip[0]) * object_res)
    object_matrix = np.zeros([dim_x_object, dim_y_object])
    object_matrix_inflated = object_matrix.copy()
    car_matrix = np.zeros([dim_x_object, dim_y_object])
    car_matrix_object = np.zeros([dim_x_object, dim_y_object])
    car_peri_coords_x = []
    car_peri_coords_y = []
    pc_grid = np.zeros((1, 2))
    pc_in = None
    inference_in = None
    odom_info = None

    last_orientation = 0
    last_postion = np.zeros((1, 3))
    last_time = 0
    flip_count = 0
    detection_count = 1
    calibration_count = 10
    filter_flag = 0
    first_time = 1
    point_update = 0

    mu_now = np.zeros((4, ))
    mu_pre = np.zeros((4, ))
    z_now = np.zeros((4, ))
    z_pre = np.zeros((4, ))
    odom_now = np.zeros((4, ))
    odom_pre = np.zeros((4, ))

    while True:
        if socket_cloud.poll(timeout=5) != 0:
            # add objects in the grid
            point_update = 1
            pc_in = np.transpose(recv_array(socket_cloud))
            pc = pc_in[np.where((pc_in[:, 1] > y_clip[0])
                                & (pc_in[:, 1] < y_clip[1]))]
            pc = pc[np.where(pc[:, 0] < x_clip[1])]
            pc[:, 2] += -z_clip
            pc = pc[np.where((pc[:, 2] > 0))]
            pc = pc[np.where((pc[:, 3] > 150))]
            pc_grid = pc[:, 0:2]
            pc_grid[:, 0] = (np.floor(pc_grid[:, 0] * object_res))
            pc_grid[:, 1] = (np.floor(pc_grid[:, 1] * object_res) +
                             dim_y_object / 2)
            pc_grid = pc_grid.astype(int)

            if pc_grid.shape[0] > 1:
                object_matrix = np.zeros([dim_x_object, dim_y_object])
                pc_grid, counts = np.unique(pc_grid,
                                            return_counts=True,
                                            axis=0)
                pc_grid = pc_grid[np.where(counts > grid_res / object_res)]
                object_matrix[pc_grid[:, 0], pc_grid[:, 1]] = 1
                object_matrix_inflated = object_matrix.copy()

        if socket_result.poll(timeout=5) != 0:
            inference_in = recv_array(socket_result)
            if inference_in[1, 2] == 1:
                first_time = 0
                dt_box_lidar = inference_in[0, :].copy()
                if len(dt_box_lidar.shape) == 1:
                    dt_box_lidar = np.expand_dims(dt_box_lidar, axis=0)
                # dt_box_lidar[:, 3] *= 1.2 # width
                # dt_box_lidar[:, 4] *= 1.1 # length
                dt_box_lidar[:, 3] = 0.28  # width
                dt_box_lidar[:, 4] = 0.57  # length

                # if we encounter a lost of track, recalibrate
                if inference_in[1, 0] - last_time > 3:
                    last_orientation = 0
                    flip_count = 0
                    detection_count = 1
                    print('Calibrating...')

                if detection_count == calibration_count:
                    print('Calibrated')
                    detection_count += 1
                elif detection_count < calibration_count and inference_in[
                        1, 1] > 0.6:
                    # print(flip_count, detection_count)
                    detection_count += 1

                if last_time != 0:
                    delta_time = inference_in[1, 0] - last_time

                    # dealing with orientation fluctuation
                    if np.abs(np.pi -
                              np.abs(last_orientation -
                                     dt_box_lidar[:, 6])) < 0.6 and delta_time:
                        if detection_count < calibration_count and inference_in[
                                1, 1] > 0.6:
                            flip_count += 1
                        if not (detection_count == calibration_count
                                and flip_count / detection_count >= 0.5):
                            dt_box_lidar[:, 6] -= np.pi

                    delta_angular_speed = np.abs(
                        last_orientation - dt_box_lidar[:, 6]) / delta_time
                    delta_linear_speed = np.abs(
                        last_postion - dt_box_lidar[:, :3]) / delta_time
                    # print('delta_time', delta_time, 'delta_angular_speed', delta_angular_speed, 'delta_linear_speed', delta_linear_speed)

                    # filter jetter
                    if delta_time < 0.2 and (
                            delta_angular_speed > 2
                            or np.any(delta_linear_speed > 2)
                    ) and detection_count > calibration_count:
                        # print(delta_angular_speed, delta_linear_speed)
                        print('filtered')
                        filter_flag = 1

                if filter_flag == 0:
                    # record current detection
                    last_orientation = dt_box_lidar[:, 6]
                    last_time = inference_in[1, 0]
                    last_postion = dt_box_lidar[:, :3]
                elif filter_flag == 1:
                    # print('filtered')
                    dt_box_lidar[:, 6] = last_orientation
                    last_time = inference_in[1, 0]
                    dt_box_lidar[:, :3] = last_postion
                    filter_flag = 0

                z_now[0] = dt_box_lidar[:, 0]
                z_now[1] = dt_box_lidar[:, 1]
                z_now[2] = dt_box_lidar[:, 6]
                z_now[3] = inference_in[1, 0]

        if socket_odom.poll(
                timeout=1) != 0 and detection_count > calibration_count:
            odom_info = recv_array(socket_odom)
            odom_now[3] = odom_info[2]
            linear_speed_correction = 0.67
            angular_speed_correction = 0.9
            delta_time_odom = odom_now[3] - odom_pre[3]
            short_time = 0
            if delta_time_odom < 0.01:
                short_time = delta_time_odom
            if delta_time_odom > 0.01 and z_now[2] != 0:
                delta_time_odom += short_time
                odom_now = convert_odom(
                    odom_info[0] * linear_speed_correction,
                    odom_info[1] * angular_speed_correction, z_now[2],
                    odom_info[2])

            # predict step
            if odom_now[3] > odom_pre[3]:
                if delta_time_odom > 1000:
                    delta_time_odom = 0.03
                mu_now[0:3] = delta_time_odom * odom_now[0:3] + mu_pre[0:3]
            odom_pre = odom_now.copy()

        # update step
        if z_now[3] > z_pre[3]:
            K = 0.8
            mu_now[0:3] = mu_now[0:3] + K * (z_now[0:3] - mu_now[0:3])
            z_pre = z_now.copy()

        mu_pre = mu_now
        if first_time == 0 and pc_grid.shape[0] > 1:
            dt_box_lidar[:, 0:2] = mu_now[0:2]
            dt_box_lidar[:, 6] = mu_now[2]
            # get the cornor coords from the bbox
            dt_boxes_corners = box_np_ops.center_to_corner_box3d(
                dt_box_lidar[:, :3],
                dt_box_lidar[:, 3:6],
                dt_box_lidar[:, 6],
                origin=[0.5, 0.5, 0],
                axis=2)
            bbox_array = np.zeros((10, 3))
            bbox_array[0:8, :] = dt_boxes_corners
            bbox_array[8, :] = dt_box_lidar[0, :3]  # center position
            bbox_array[9, 0] = dt_box_lidar[0, 6]  # orientation
            send_array(socket_box, bbox_array)

            # add car detection in the grid
            rect_x = np.zeros((4, ))
            rect_y = np.zeros((4, ))
            rect_x[0] = find_coords(bbox_array[0, 0], object_res)
            rect_y[0] = find_coords(bbox_array[0, 1], object_res,
                                    dim_y_object / 2)
            rect_x[1] = find_coords(bbox_array[4, 0], object_res)
            rect_y[1] = find_coords(bbox_array[4, 1], object_res,
                                    dim_y_object / 2)
            rect_x[2] = find_coords(bbox_array[6, 0], object_res)
            rect_y[2] = find_coords(bbox_array[6, 1], object_res,
                                    dim_y_object / 2)
            rect_x[3] = find_coords(bbox_array[2, 0], object_res)
            rect_y[3] = find_coords(bbox_array[2, 1], object_res,
                                    dim_y_object / 2)
            car_coords_x, car_coords_y = np.array(
                polygon(rect_x, rect_y, shape=(dim_x_object, dim_y_object)))
            car_matrix = np.zeros([dim_x_object, dim_y_object])
            car_matrix[car_coords_x, car_coords_y] = 1

            rect_x[0] = find_coords(bbox_array[0, 0], grid_res)
            rect_y[0] = find_coords(bbox_array[0, 1], grid_res, dim_y / 2)
            rect_x[1] = find_coords(bbox_array[4, 0], grid_res)
            rect_y[1] = find_coords(bbox_array[4, 1], grid_res, dim_y / 2)
            rect_x[2] = find_coords(bbox_array[6, 0], grid_res)
            rect_y[2] = find_coords(bbox_array[6, 1], grid_res, dim_y / 2)
            rect_x[3] = find_coords(bbox_array[2, 0], grid_res)
            rect_y[3] = find_coords(bbox_array[2, 1], grid_res, dim_y / 2)
            car_peri_coords_x, car_peri_coords_y = np.array(
                polygon_perimeter(rect_x,
                                  rect_y,
                                  shape=(dim_x, dim_y),
                                  clip=True))

            # if an occupied grid's neighbor is in car, then that grid is also in car
            car_matrix_object = np.zeros([dim_x_object, dim_y_object])
            pc_grid = np.transpose(np.array(np.where(object_matrix == 1)))
            for ind in range(pc_grid.shape[0]):
                if car_matrix[pc_grid[ind, 0], pc_grid[ind, 1]] == 1:
                    car_matrix_object[pc_grid[ind, 0], pc_grid[ind, 1]] = 1
                else:
                    find_neighbor = 0
                    neighbors = find_neighbours(pc_grid[ind, 0],
                                                pc_grid[ind, 1],
                                                dim_x_object,
                                                dim_y_object,
                                                option=0)
                    for neighbor in neighbors:
                        if object_matrix[neighbor[0], neighbor[1]] == 1:
                            find_neighbor = 1
                        if car_matrix[neighbor[0], neighbor[1]] == 1:
                            car_matrix_object[pc_grid[ind, 0], pc_grid[ind,
                                                                       1]] = 1
                            find_neighbor = 1
                            continue
                    if find_neighbor == 0:
                        # remove isolated points
                        object_matrix_inflated[pc_grid[ind, 0], pc_grid[ind,
                                                                        1]] = 0

            # use connected body to find the car
            matrix_labels, num = label(object_matrix_inflated,
                                       connectivity=2,
                                       return_num=True)
            find_flag = 0
            for num_ind in range(num + 1):
                label_xy = np.array(np.where(matrix_labels == num_ind))
                if num_ind != 0:
                    for ind in range(label_xy.shape[1]):
                        if car_matrix_object[label_xy[0, ind],
                                             label_xy[1, ind]] == 1:
                            car_matrix_object[label_xy[0, :],
                                              label_xy[1, :]] = 1
                            find_flag = 1
                        if find_flag == 1:
                            break
                        # print(label_xy.shape[1], ind)

            pc_grid = np.transpose(np.array(np.where(car_matrix_object == 1)))
            object_matrix_inflated[pc_grid[:, 0], pc_grid[:, 1]] = 0

            # inflate the rest of the object matrix
            # if point_update == 1:
            #     pc_grid = np.transpose(np.array(np.where(object_matrix_inflated == 1)))
            #     for ind in range(pc_grid.shape[0]):
            #         neighbors = find_neighbours(pc_grid[ind, 0], pc_grid[ind, 1], dim_x_object, dim_y_object, option = 1)
            #         for neighbor in neighbors:
            #             object_matrix_inflated[neighbor[0], neighbor[1]] = 1
            #     pc_grid = np.transpose(np.array(np.where(object_matrix_inflated == 1)))
            #     point_update = 0

        car_matrix_object_big = rescale(car_matrix_object,
                                        grid_res / object_res,
                                        anti_aliasing=False)
        object_matrix_big = rescale(object_matrix_inflated,
                                    grid_res / object_res,
                                    anti_aliasing=False)

        occupancy_grid = OccupancyGrid(dim_x, dim_y)
        occupancy_grid.matrix[np.where(car_matrix_object_big > 0)] = 2
        occupancy_grid.matrix[car_peri_coords_x,
                              car_peri_coords_y] = 3  # perimeter line
        occupancy_grid.matrix[np.where(object_matrix_big > 0)] = 1
        occupancy_grid.update_image()
        send_array(socket_grid, occupancy_grid.image)