Exemplo n.º 1
0
def test_box3D_csv():

    box3D_list = [box3D_object.Box3DObject(0.1,2.3,6.3,0.0,2.0,3.0,1.0), \
                  box3D_object.Box3DObject(0.32,2.3,7.3,0.0,2.0,3.0,1.0),\
                  box3D_object.Box3DObject(-1.3,5.3,2.3,0.0,2.0,3.0,1.0)];

    # Create outut dir if not created
    os.makedirs(OUTPUT_DIR_PATH, exist_ok=True)

    # Save test copy
    csv_path = os.path.join(OUTPUT_DIR_PATH, 'test_box3D.csv');
    box3D_object.Box3DObject.to_csv(csv_path, box3D_list);
Exemplo n.º 2
0
def test_box3dD(show_image = False):

    # Path to detection and image:
    cam_model_path = 'traj_ext/camera_calib/calib_file/brest/brest_area1_street_cfg.yml';
    image_path = 'traj_ext/camera_calib/calib_file/brest/brest_area1_street.jpg';

    # Open the detection:
    box3D = box3D_object.Box3DObject(0.3,2.3,-4.3,0.0,4.0,2.0,1.0);
    box3D_list = [box3D];

    # Open image and camera model
    cam_model = cameramodel.CameraModel.read_from_yml(cam_model_path);
    image = cv2.imread(image_path);

    # Display on image
    box3D.display_on_image(image, cam_model);

    if show_image:
        cv2.imshow('Test 3D box', image)
        if cv2.waitKey(0) & 0xFF == ord('q'):
            pass;
Exemplo n.º 3
0
def compute_cost_mono(opti_params, im_size, cam_model, mask, param_fix):
    """Compute the overlap cost for mono image between

    Args:
        opti_params (TYPE): Description
        im_size (TYPE): Description
        cam_model (TYPE): Description
        mask (TYPE): Description
        param_fix (TYPE): Description

    Returns:
        TYPE: Overlap score
    """

    # Get optim paramters
    psi_rad = opti_params[0]
    x = opti_params[1]
    y = opti_params[2]

    # Get fixed paramters
    z = param_fix[0]
    l = param_fix[1]
    w = param_fix[2]
    h = param_fix[3]

    # Create 3D box corners points
    box3D = box3D_object.Box3DObject(psi_rad, x, y, z, l, w, h)

    # Mask box
    mask_3Dbox = box3D.create_mask(cam_model, im_size)

    # Compute overlap score
    overlap_score, masko_1, masko_2 = overlap_mask(mask, mask_3Dbox)

    # Return overlap_score
    return overlap_score
    def display_conflict_point_on_image(self, time_ms, cam_model, image_current_conflict, traj_list, TTC_label = True, \
                                        is_hd_map = False):

        # 判断图像存在 并且 TTC为0.01表示已经相撞不需要预测,才继续进行下面的画图
        if not (image_current_conflict is None) and (self._TTC > 0.01):

            # 依次画 冲突点 、 写TTC 、 画连线
            # 画冲突点 # 将 平面坐标 转换为 原始镜头坐标
            pt_conflict_A = cam_model.project_points(
                np.array([(self._point_A_x, self._point_A_y, 0.0)]))
            pt_conflict_A = (int(pt_conflict_A[0]), int(pt_conflict_A[1]))
            pt_conflict_B = cam_model.project_points(
                np.array([(self._point_B_x, self._point_B_y, 0.0)]))
            pt_conflict_B = (int(pt_conflict_B[0]), int(pt_conflict_B[1]))
            # 冲突点 标为 红色 大实心圆
            image_current_conflict = cv2.circle(image_current_conflict,
                                                pt_conflict_A, 4, (0, 0, 255),
                                                -1)
            image_current_conflict = cv2.circle(image_current_conflict,
                                                pt_conflict_B, 4, (0, 0, 255),
                                                -1)

            # 两辆车与 冲突点的连线
            # 获取两条轨迹,并获取当前时刻的轨迹点
            traj_point_A = TrajPoint(0, 0, 0, 0, 0, 0)
            traj_point_B = TrajPoint(0, 0, 0, 0, 0, 0)
            length_A, width_A, height_A = 0, 0, 0
            length_B, width_B, height_B = 0, 0, 0
            for traj in traj_list:

                if traj.get_id() == self._track_id_A:
                    traj_point_A = traj.get_point_at_timestamp(time_ms)
                    length_A, width_A, height_A = traj.get_size()

                if traj.get_id() == self._track_id_B:
                    traj_point_B = traj.get_point_at_timestamp(time_ms)
                    length_B, width_B, height_B = traj.get_size()

            pt_pix_A = cam_model.project_points(
                np.array([(traj_point_A.x, traj_point_A.y, 0.0)]))
            pt_pix_A = (int(pt_pix_A[0]), int(pt_pix_A[1]))
            pt_pix_B = cam_model.project_points(
                np.array([(traj_point_B.x, traj_point_B.y, 0.0)]))
            pt_pix_B = (int(pt_pix_B[0]), int(pt_pix_B[1]))
            # 画的是 红色 宽度为2像素的 实线
            image_current_conflict = cv2.line(image_current_conflict, pt_pix_A,
                                              pt_conflict_A, (0, 0, 255), 1)
            image_current_conflict = cv2.line(image_current_conflict, pt_pix_B,
                                              pt_conflict_B, (0, 0, 255), 1)
            # 两辆相关车辆用 蓝色 的 实线 相连
            image_current_conflict = cv2.line(image_current_conflict, pt_pix_A,
                                              pt_pix_B, (255, 0, 0), 2)

            # 将3D边界框画在hd_map视角图中

            # 添加判断 is_hd_map_view, if is_hd_map_view: do 画3D box(参考原visual中的方法)
            #if is_hd_map:
            # 分别画A、B两车的3D边框到hd_map
            box3D_A = box3D_object.Box3DObject(traj_point_A.psi_rad, \
                                               self._point_A_x, \
                                               self._point_A_y, \
                                               0.0, \
                                               length_A, \
                                               width_A, \
                                               height_A)
            box3D_B = box3D_object.Box3DObject(traj_point_B.psi_rad, \
                                               self._point_B_x, \
                                               self._point_B_y, \
                                               0.0, \
                                               length_B, \
                                               width_B, \
                                               height_B)
            # Display 3D box on image 画红色的3D边框
            image_current_conflict = box3D_A.display_on_image(
                image_current_conflict, cam_model, (0, 0, 255))
            image_current_conflict = box3D_B.display_on_image(
                image_current_conflict, cam_model, (0, 0, 255))
            # 将 四个点 计算投影点pt_X(X = 1234),画在图像上的是投影后的点

            # 标 TTC时间、冲突类型
            # 缩放大小0.8 蓝色,线宽 1
            if TTC_label:

                if is_hd_map:
                    pt_text = cam_model.project_points(
                        np.array([(self._point_A_x + 5, self._point_A_y, 0.0)
                                  ]))
                else:
                    pt_text = cam_model.project_points(
                        np.array([(self._point_A_x, self._point_A_y - 5, 0.0)
                                  ]))
                pt_text = (int(pt_text[0]), int(pt_text[1]))

                image_current_conflict = cv2.putText(image_current_conflict, 'TTC:'+ str(round(float(self._TTC), 2)) + 's',\
                                                     pt_text, cv2.FONT_HERSHEY_PLAIN, 2.5, (255, 0, 0), 2)

        return image_current_conflict
Exemplo n.º 5
0
def find_3Dbox(mask, roi, cam_model, im_size, box_size_lwh):
    """Find 3D box correspondig to a mask

    Args:
        mask (TYPE): Mask
        roi (TYPE): Region of interest
        cam_model (TYPE): Camera model
        im_size (TYPE): Size of the image (tulpe)
        box_size_lwh (TYPE): Box size: length, width, height in meters

    Returns:
        TYPE: box3D object
    """
    # mask: bool array of the size of the image
    # roi: corner coordinates of the ROI (2D bounding box) of the detected object
    # cam_model: camera model
    # im_size: Image size
    # box_size_lwh: 3D box size [length, width, height]

    # Get ROI coordinates
    x_1 = int(roi[1])
    y_1 = int(roi[0])
    x_2 = int(roi[3])
    y_2 = int(roi[2])

    tl = (x_1, y_1)
    br = (x_2, y_2)

    # Compute rough 3D position of the object:
    # Re-project the center of the ROI on the ground

    # Get center of ROI
    pt_image_x = (x_1 + x_2) / 2
    pt_image_y = (y_1 + y_2) / 2

    # 3D position by re-projection on the ground
    pt_image = (int(pt_image_x), int(pt_image_y))
    pos_FNED_init = cam_model.projection_ground(0, pt_image)
    pos_FNED_init.shape = (1, 3)

    # Construct initial 3D box:
    psi_rad = float(0.0)
    # Orientation of the box (yaw) - rad
    x = float(pos_FNED_init[0, 0])
    # Position X in F_NED of the center of the bottom side (defined implicitly by the camera paramters) - meters
    y = float(pos_FNED_init[0, 1])
    # Position Y in F_NED of the center of the bottom side (defined implicitly by the camera paramters) - meters
    z = float(0.0)
    # Position Z in F_NED of the center of the bottom side (defined implicitly by the camera paramters) - meters
    l = float(box_size_lwh[0])
    # Length of the 3D Box - meters
    w = float(box_size_lwh[1])
    # Width of the 3D Box - meters
    h = float(box_size_lwh[2])
    # Height of the 3D Box - meters

    # Run the optimization with different Initial Guess
    # Allow to avoid getting stuck on local optimum
    # Especially on the orientation (yaw), as the optimizer has a hard time find good orientation
    param_min = None
    for psi_deg in range(0, 180, 60):

        psi_rad = np.deg2rad(psi_deg)

        # We only optimize Yaw, Position X, Position Y
        param_opt = [psi_rad, x, y]

        # Position Z assume to be 0
        # 3DBox size: Defined by the class ID
        param_fix = [z, l, w, h]
        p_init = param_opt

        # Run optimizer: Good method: COBYLA, Powell
        param = opt.minimize(compute_cost_mono,
                             p_init,
                             method='Powell',
                             args=(im_size, cam_model, mask, param_fix),
                             options={
                                 'maxfev': 1000,
                                 'disp': True
                             })
        if param_min is None:
            param_min = param

        # Keep the best values among the different run with different initial guesses
        if param.fun < param_min.fun:
            param_min = param

    param = param_min

    # Retrieve 3D box parameters:
    psi_rad = round(param.x[0], 4)
    x = round(param.x[1], 4)
    y = round(param.x[2], 4)

    z = round(param_fix[0], 4)
    l = round(param_fix[1], 4)
    w = round(param_fix[2], 4)
    h = round(param_fix[3], 4)

    # Construct param_3Dbox
    box3D = box3D_object.Box3DObject(psi_rad, x, y, z, l, w, h)

    return box3D
Exemplo n.º 6
0
    # Construct points
    psi_rad = 0.0
    x = -0.7
    y = 0.8
    z = 0
    l = 4.5
    w = 1.8
    h = -1.6

    # keep looping until the 'q' key is pressed
    while True:

        im_current_1 = copy.copy(im_1)
        im_current_2 = copy.copy(im_2)

        box3D = box3D_object.Box3DObject(psi_rad, x, y, z, l, w, h)

        # pt = np.array([(x,  y, z)])
        # (pt_img, jacobian) = cv2.projectPoints(pt, rot_CF1_F, trans_CF1_F, cam_matrix_1, dist_coeffs_1)
        # pt_img = (int(pt_img[0][0][0]), int(pt_img[0][0][1]));
        # cv2.circle(im_current, pt_img, 5, (0,255, 255), -1)

        box3D.display_on_image(im_current_1, cam_model_1)
        box3D.display_on_image(im_current_2, cam_model_2)

        cv2.imshow("Camera 1", im_current_1)
        cv2.imshow("Camera 2", im_current_2)

        print("Box Params:")
        print("x = {};".format(x))
        print("y = {};".format(y))
    def display_on_image(self,
                         time_ms,
                         image,
                         cam_model,
                         only_complete=False,
                         color_text=(0, 0, 255),
                         no_label=False,
                         custom_text=None,
                         complete_marker=False,
                         velocity_label=False):
        """Display trajectory at time_ms on image

        Args:
            time_ms (TYPE): Time in ms
            image (TYPE): Image
            cam_model (TYPE): Camera Model
            color_text (tuple, optional): text color
            color (None, optional): Color
            no_label (bool, optional): Flag
            custom_text (None, optional): Custom text

        Deleted Parameters:
            image (TYPE): Image
        """

        # Make sure the image is not null
        det_track = None
        if not (image is None):

            # In only_complete = True, then it has to be complete
            if (not only_complete) or self.complete:

                # Get trajectory point for specific time_ms
                traj_point = self.get_point_at_timestamp(time_ms)

                if traj_point:

                    # Create 3D Box
                    length, width, height = self.get_size()
                    box3D = box3D_object.Box3DObject(traj_point.psi_rad,\
                                                     traj_point.x,\
                                                     traj_point.y,\
                                                     0.0,\
                                                     length,\
                                                     width,\
                                                     height)

                    # Display 3D box on image
                    image = box3D.display_on_image(image,
                                                   cam_model,
                                                   color=self.get_color())

                    # Create a 2D box from the 3D box
                    det_2Dbox = box3D.get_projected_2Dbox(cam_model)
                    det_track = DetObject(self.get_id(), self._agent_type,
                                          det_2Dbox, 0.99, self.get_id())

                    # Reproject on the satellite image
                    pt_pix = cam_model.project_points(
                        np.array([(traj_point.x, traj_point.y, 0.0)]))
                    pt_pix = (int(pt_pix[0]), int(pt_pix[1]))
                    image = cv2.circle(image, pt_pix, 3, self.get_color(), -1)

                    # Add Velocity annotations to the track:
                    if velocity_label:
                        v = np.sqrt(traj_point.vx * traj_point.vx +
                                    traj_point.vy * traj_point.vy)
                        text = "id:%i v:%.2fm/s" % (self.get_id(), v)

                    else:
                        # Add ID annotations to the track:
                        text = "id:%i" % (self.get_id())

                    if not (custom_text is None):
                        text = custom_text

                    if not no_label:
                        image = cv2.putText(image, text, pt_pix,
                                            cv2.FONT_HERSHEY_COMPLEX, 0.6,
                                            color_text, 1)

                    if complete_marker:
                        if not self.complete:
                            image = cv2.circle(image, pt_pix, 20, (0, 0, 255),
                                               3)

        return image, det_track
    def display_on_image_02time_ms(self,
                                   time_ms,
                                   image,
                                   cam_model,
                                   only_complete=False,
                                   color_text=(0, 0, 255),
                                   no_label=False,
                                   custom_text=None,
                                   complete_marker=False,
                                   velocity_label=False):
        det_track = None
        if not (image is None):

            # In only_complete = True, then it has to be complete
            if (not only_complete) or self.complete:
                # 将该条轨迹从开始至目前当前时刻的轨迹点画出来
                for i in range(0, time_ms + 1):
                    # Get trajectory point for specific i time_ms
                    traj_point = self.get_point_at_timestamp(i)

                    if traj_point:  # 若该时刻轨迹点存在

                        if (time_ms == i):  # 仅给当前时刻加3Dbox
                            # Create 3D Box
                            length, width, height = self.get_size()
                            box3D = box3D_object.Box3DObject(traj_point.psi_rad, \
                                                             traj_point.x, \
                                                             traj_point.y, \
                                                             0.0, \
                                                             length, \
                                                             width, \
                                                             height)

                            # Display 3D box on image
                            image = box3D.display_on_image(
                                image, cam_model, color=self.get_color())

                            # Create a 2D box from the 3D box
                            det_2Dbox = box3D.get_projected_2Dbox(cam_model)
                            det_track = DetObject(self.get_id(),
                                                  self._agent_type, det_2Dbox,
                                                  0.99, self.get_id())

                            # Reproject on the satellite image
                            pt_pix = cam_model.project_points(
                                np.array([(traj_point.x, traj_point.y, 0.0)]))
                            pt_pix = (int(pt_pix[0]), int(pt_pix[1]))
                            image = cv2.circle(image, pt_pix, 3,
                                               self.get_color(), -1)

                            # Add Velocity annotations to the track:
                            if velocity_label:
                                v = np.sqrt(traj_point.vx * traj_point.vx +
                                            traj_point.vy * traj_point.vy)
                                text = "id:%i v:%.2fm/s" % (self.get_id(), v)

                            else:
                                # Add ID annotations to the track:
                                text = "id:%i" % (self.get_id())

                            if not (custom_text is None):
                                text = custom_text

                            if not no_label:
                                image = cv2.putText(image, text, pt_pix,
                                                    cv2.FONT_HERSHEY_COMPLEX,
                                                    0.6, color_text, 1)

                            if complete_marker:
                                if not self.complete:
                                    image = cv2.circle(image, pt_pix, 20,
                                                       (0, 0, 255), 3)

                        else:  # 给图片按照坐标点画“点”
                            # Reproject on the satellite image
                            pt_pix = cam_model.project_points(
                                np.array([(traj_point.x, traj_point.y, 0.0)]))
                            pt_pix = (int(pt_pix[0]), int(pt_pix[1]))
                            image = cv2.circle(image, pt_pix, 2,
                                               self.get_color(), 1)

        return image, det_track