def draw_boxes_3D(boxes, color):
     for box in boxes:
         corners = project(P2, box.get_corners()).astype(np.int32)
         for start, end in BOX_CONNECTIONS:
             x1, y1 = corners[:, start]
             x2, y2 = corners[:, end]
             cv2.line(img, (x1, y1), (x2, y2), color, 1)
Exemple #2
0
 def get_world_pts(pt_cloud,
                   bev_width,
                   bev_length,
                   image_downsampling_factor,
                   P2,
                   parts=4):
     # https://www.sicara.ai/blog/2017-07-05-fast-custom-knn-sklearn-cython
     if pt_cloud.shape[0] != 3:
         pt_cloud = pt_cloud.T
     world_pts = []
     # one time for each dim
     for i in range(bev_length):
         for j in range(bev_width):
             world_pts.append(bev2world(j, i, bev_width, bev_length, 80,
                                        70))
     all_inds = []
     for i in range(parts):
         cur_part = np.array(world_pts[i * len(world_pts) //
                                       parts:i * len(world_pts) // parts +
                                       len(world_pts) // parts]).T
         _, inds = knn.knn(cur_part.astype(np.float32),
                           pt_cloud.astype(np.float32), 1)
         inds = np.squeeze(inds) - 1
         all_inds = all_inds + inds.tolist()
     world_pts = np.array(world_pts).T
     nearest = pt_cloud[:, all_inds]
     geo_feature = nearest - world_pts
     nearest_projected = project(P2, nearest).astype(
         np.int32).T // image_downsampling_factor
     return nearest_projected.reshape(
         (bev_length, bev_width, 2)), geo_feature.reshape(
             (bev_length, bev_width, 3))
Exemple #3
0
 def draw_boxes_3D(boxes, color_dict):
     for box in boxes:
         corners = transforms_3D.project(P2, get_corners_3D(box)).astype(
             np.int32)
         for start, end in BOX_CONNECTIONS_3D:
             x1, y1 = corners[:, start]
             x2, y2 = corners[:, end]
             cv2.line(img, (x1, y1), (x2, y2),
                      color_dict.get(box.cls, DEFAULT_COLOR), 1)
 def compute_contfuse_mapping(self, t, world_pts, nearest, bev_width,
                              bev_length, image_downsampling_factor):
     _, _, P2 = self.get_calib(t)
     geo_feature = nearest - world_pts
     nearest_projected = project(P2, nearest).astype(
         np.int32).T // image_downsampling_factor
     mapping, geo = nearest_projected.reshape(
         (bev_length, bev_width, 2)), geo_feature.reshape(
             (bev_length, bev_width, 3))
     mapping[:, :, (0, 1)] = mapping[:, :, (1, 0)]
     if image_downsampling_factor == 2:
         mapping[:, :, 0] = np.clip(mapping[:, :, 0], 0, 187)
         mapping[:, :, 1] = np.clip(mapping[:, :, 1], 0, 620)
     elif image_downsampling_factor == 4:
         mapping[:, :, 0] = np.clip(mapping[:, :, 0], 0, 93)
         mapping[:, :, 1] = np.clip(mapping[:, :, 1], 0, 310)
     elif image_downsampling_factor == 8:
         mapping[:, :, 0] = np.clip(mapping[:, :, 0], 0, 46)
         mapping[:, :, 1] = np.clip(mapping[:, :, 1], 0, 155)
     return mapping, geo
def get_contfuse_data(pt_cloud,
                      bev_width,
                      bev_length,
                      image_downsampling_factor,
                      P2,
                      parts=4):
    world_pts, nearest = get_nearest_neighbour(pt_cloud,
                                               bev_width,
                                               bev_length,
                                               image_downsampling_factor,
                                               P2,
                                               parts=4)
    print('world_pts.shape', world_pts.shape)
    print('nearest.shape  ', nearest.shape)
    geo_feature = nearest - world_pts
    nearest_projected = project(P2, nearest).astype(
        np.int32).T // image_downsampling_factor
    return {
        'mapping': nearest_projected.reshape((bev_length, bev_width, 2)),
        'geo_feature': geo_feature.reshape((bev_length, bev_width, 3))
    }
def fov_filter(pts, P, img_size, decorations=None):
    pts_projected = project(P, pts)
    mask = ((pts_projected[0] >= 0) & (pts_projected[0] <= img_size[1]) &
            (pts_projected[1] >= 0) & (pts_projected[1] <= img_size[0]))
    pts = pts[:, mask]
    return pts if decorations is None else pts, decorations[:, mask]
    def get_range_view(self,
                       img=None,
                       pts=None,
                       ref=None,
                       P2=None,
                       gt_boxes=None,
                       pred_boxes=None,
                       out_type=None):
        if out_type not in ['depth', 'intensity', 'height']:
            return None

        if pts.shape[0] != 3:
            pts = pts.T
        if ref.shape[0] != 1:
            ref = ref.T

        if img is not None:
            img = np.copy(img)  # Clone
        else:
            img = np.zeros((375, 1242, 1))

        def draw_boxes_2D(boxes, color):
            for box in boxes:
                cv2.rectangle(img, (int(box.x1), int(box.y1)),
                              (int(box.x2), int(box.y2)), color, 1)

        def draw_boxes_3D(boxes, color):
            for box in boxes:
                corners = project(P2, box.get_corners()).astype(np.int32)
                for start, end in BOX_CONNECTIONS:
                    x1, y1 = corners[:, start]
                    x2, y2 = corners[:, end]
                    cv2.line(img, (x1, y1), (x2, y2), color, 1)

        if gt_boxes is not None and len(gt_boxes) > 0:
            if isinstance(gt_boxes[0], Box2D):
                draw_boxes_2D(gt_boxes, GT_COLOR)
            elif isinstance(gt_boxes[0], Box3D):
                draw_boxes_3D(gt_boxes, GT_COLOR)

        if pts is not None:

            pts_projected = project(P2, pts).astype(np.int32).T
            for i in range(pts_projected.shape[0]):
                if out_type == 'depth':
                    clr = pts.T[i][2] / 70.
                elif out_type == 'intensity':
                    clr = ref.T[i][0]
                elif out_type == 'height':
                    clr = 1. - (pts.T[i][1] + 1.) / 4.
                cv2.circle(img, (pts_projected[i][0], pts_projected[i][1]), 4,
                           float(clr), -1)

            # window_sz = 1
            # for i in range(img.shape[0]):
            #     for j in range(img.shape[1]):
            #         if np.sum(img[i,j]) <= 0.0:
            #             if i - window_sz >= 0 and j - window_sz >= 0 and i + window_sz <= img.shape[0] and j + window_sz <= img.shape[1]:
            #                 img[i,j] = np.sum(img[i-window_sz:i+window_sz,j-window_sz:j+window_sz], axis=(0,1)) / (window_sz * 2)**2.

            # img = rgb2gray(img)
            # img = np.expand_dims(img, axis=-1)

        return img