コード例 #1
0
def check_reprojection(img, marker_pos, marker_img_pos, rvec, tvec, cam_mtx,
                       dist_coeffs):
    """Project marker positions from 3D to the image plane and compare with user indicated positions."""
    marker_img_pos_proj, _ = cv2.projectPoints(marker_pos, rvec, tvec, cam_mtx,
                                               dist_coeffs)
    marker_img_pos_proj = marker_img_pos_proj.squeeze()

    # Check accuracy by reprojecting calibration markers
    error = np.linalg.norm(marker_img_pos -
                           marker_img_pos_proj) / len(marker_img_pos)
    print(f"RMS error: {error}")

    for p in marker_img_pos:
        draw_marker(*p, img, color=(0, 0, 255))
    for p in marker_img_pos_proj:
        draw_marker(*p, img, color=(0, 255, 0))

    # Draw world axis
    cv2.drawFrameAxes(img, cam_mtx, dist_coeffs, rvec, tvec, 1)

    win_name = 'Reprojection'
    cv2.namedWindow(win_name, cv2.WINDOW_GUI_NORMAL)
    cv2.resizeWindow(win_name, 960, 540)
    cv2.imshow(win_name, img)
    print("Press any key to continue.")
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #2
0
ファイル: cal_extr_track_node.py プロジェクト: poine/smocap
    def draw_debug_bgr(self, cam, img_cam=None):
        cnt_color, cntmax_color, thickness = (255,0,0), (0, 255,0), 3
        if self.img is None:
            return np.zeros((cam.h, cam.w, 3), dtype=np.uint8)
        else:
            if self.display_mode == 1: # input
                debug_img = self.img.copy()
            elif self.display_mode == 2: # aruco 
                debug_img = cv2.cvtColor(self.img, cv2.COLOR_GRAY2BGR)
                #debug_img = cv2.cvtColor(self.threshold1, cv2.COLOR_GRAY2BGR)
                #cv2.drawContours(debug_img, self.cnt_max, -1, cntmax_color, 3)
                #cv2.rectangle(debug_img, tuple(self.tl), tuple(self.br), (0,0,255), 1)

                if self.corners is not None:
                    aruco.drawDetectedMarkers(image=debug_img, corners=self.corners)
                if self.rvec is not None:
                    cv2.drawFrameAxes(debug_img, cam.K, cam.D, self.rvec, self.tvec, 0.1)
                self._draw_path(cam, debug_img)
                 
            elif self.display_mode == 3: # bird eye
                if self.bird_eye is None :
                    debug_img = self.img.copy()
                else:
                    chroma_blue, chroma_green = (187, 71, 0), (64, 177, 0)
                    debug_img = self.bird_eye.undist_unwarp_img(cv2.cvtColor(self.img, cv2.COLOR_GRAY2BGR), cam, fill_bg=chroma_green)
                    ui = cv_be.UnwarpedImage()
                    ui.draw_grid(debug_img, self.bird_eye, cam, 0.1)
                    ui.draw_path(debug_img, self.bird_eye, cam, self.track_path.points)

        self.draw_timing(debug_img)
        return debug_img
コード例 #3
0
 def _updateFeatures(self, gate_centre, rvec, tvec, contour, bounding_box):
     if gate_centre is not None:
         '''Updates all the features on the frame.'''
         # Plot the centre of the bounding box (on frame, using centre coords,
         # radius, color, filled)
         # cv.circle(self.frame, gate_centre, 5, (255, 255, 255), -1)
         # Draw contours/box on frame, biggest one, all of them, color, thickness
         cv.drawContours(self.frame, contour, -1, (0, 255, 0), 6)
         # cv.drawContours(self.frame, [bounding_box], -1, (0, 255, 255), 2)
         # Draw the axes on frame using the calibration data and PnP results
         cv.drawFrameAxes(self.frame, config.CAMERA_MATRIX,
                          config.DISTORTION, rvec, tvec, 25)
コード例 #4
0
def draw_axes(image, right_camera):
    """

    Draw axes on the chosen camera background

    :param image:
    :param right_camera:
    :return: nothing
    """
    extrinsic_camera_matrix, extrinsic_distortion_vector, extrinsic_rotation_vector, extrinsic_translation_vector \
        = get_extrinsic_parameters(right_camera)

    cv.drawFrameAxes(image, extrinsic_camera_matrix, extrinsic_distortion_vector, extrinsic_rotation_vector,
                     extrinsic_translation_vector, 1)
コード例 #5
0
def plot_marker_axes(image_path, vectors, factor):
    """
    Used to plot multiple individual markers
    """
    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
    dist_coeffs = vectors[str(factor)]['dist_coeffs']
    cam_matrix = vectors[str(factor)]['cam_matrix']
    marker_size = 0.025  # 25mm
    between_markers = 0.005  # 5mm
    length_of_axis = marker_size / 2

    img = cv2.imread(image_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict)
    frame_markers = aruco.drawDetectedMarkers(img.copy(), corners, ids)
    rvecs, tvecs, objPoints = aruco.estimatePoseSingleMarkers(
        corners, marker_size, cam_matrix, dist_coeffs)
    for i in range(len(tvecs)):
        frame_markers = cv2.drawFrameAxes(frame_markers,
                                          cam_matrix,
                                          dist_coeffs,
                                          rvecs[i],
                                          tvecs[i],
                                          length_of_axis,
                                          thickness=2)
    plt.figure(figsize=(12, 6))
    plt.imshow(frame_markers, interpolation="nearest")
    return rvecs, tvecs
コード例 #6
0
ファイル: fiducial_follow.py プロジェクト: zhangzifa/spot-sdk
    def get_position_lib(self, bbox, ints, source_name):
        """Compute transformation of 2d pixel coordinates to 3d camera coordinates."""
        camera = self.make_cam_mat(ints)
        best_bbox = (None, None, source_name)
        closest_dist = 1000
        for i in range(len(bbox)):
            obj_points, img_points = self.bbox_to_img_obj_pts(bbox[i])
            if self._cam_to_ext_guess[source_name][0]:
                # initialize the position estimate with the previous extrinsics solution
                # then iteratively solve for new position
                old_rvec, old_tvec = self._cam_to_ext_guess[source_name][1]
                _, rvec, tvec = cv2.solvePnP(obj_points, img_points, camera,
                                             np.zeros(
                                                 (5, 1)), old_rvec, old_tvec,
                                             True, cv2.SOLVEPNP_ITERATIVE)
            else:
                # Determine current extrinsic solution for the tag
                _, rvec, tvec = cv2.solvePnP(obj_points, img_points, camera,
                                             np.zeros((5, 1)))

            #Save extrinsics results to help speed up next attempts to locate bounding box
            self._cam_to_ext_guess[source_name] = (True, (rvec, tvec))

            if self._debug:
                self.back_prop_error(obj_points, img_points, rvec, tvec,
                                     camera)
                cv2.drawFrameAxes(self._image[source_name], camera,
                                  np.zeros((5, 1)), rvec, tvec, 100)
                if not self._display_images:
                    cv2.imshow('frame', self._image[source_name])
                    cv2.waitKey()
                    cv2.destroyAllWindows()

            dist = (float(tvec[0][0])**2 + float(tvec[1][0])**2 +
                    float(tvec[2][0])**2)**(.5) / 1000.0
            if dist < closest_dist:
                closest_dist = dist
                best_bbox = (tvec, rvec, source_name)

        # Flag indicating if the best april tag been found/located
        self._tag_not_located = best_bbox[0] is None and best_bbox[1] is None

        return best_bbox
コード例 #7
0
def draw_reprojection(color, objPoints, imgPoints, cameraMatrix, distCoeffs, patternSize, squaresize, folder, i):
    """ Pour une image, reprojeter des points et les axes"""
    # Vérification de la calibration de la caméra en reprojection:
    # Montrer axes
    ret, rvecs, tvecs = cv.solvePnP(objPoints, imgPoints, cameraMatrix, distCoeffs)
    img	=cv.drawFrameAxes(color.copy(), cameraMatrix, distCoeffs, rvecs, tvecs, 3*squaresize, 5)
    cv.imwrite('{}reprojection_axes_{}.jpg'.format(folder, i), img) #Z est en bleu, Y en vert, X en rouge
    pts, jac = cv.projectPoints(objPoints, rvecs, tvecs, cameraMatrix, distCoeffs)
    img = cv.drawChessboardCorners(color.copy(), patternSize, pts, 1)
    cv.imwrite('{}reprojection_points_{}.jpg'.format(folder, i), img)
    return img
コード例 #8
0
def find_extrinsic_parameters(img, obj_points, img_points, camera_matrix, dist_coeffs, show=True, save=False,
                              prefix=''):
    if save:
        if prefix == '':
            return print('Veuillez saisir un prefix')
        np.savetxt('matrices/points/calibration_image_points/' + prefix + '_img_points', img_points)
        np.savetxt('matrices/points/calibration_object_points/' + prefix + '_obj_points', obj_points)

    obj_points = np.array(obj_points, 'float32')
    img_points = np.array(img_points, 'float32')
    size = img.shape[:2]

    cali_flag = cv2.CALIB_FIX_INTRINSIC | cv2.CALIB_CB_NORMALIZE_IMAGE | cv2.CALIB_USE_INTRINSIC_GUESS
    retval, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera([obj_points], [img_points], size,
                                                                           camera_matrix,
                                                                           dist_coeffs, flags=cali_flag)

    (rotation_matrix, _) = cv2.Rodrigues(rvecs[0])

    if show:
        mean_error = 0
        # dessiner les axes
        img = cv2.drawFrameAxes(img, camera_matrix, dist_coeffs, rvecs[0], tvecs[0], 5)

        # redessiner les points source et calculer le pourcentage d'erreur comis
        img_points2, jacobian = cv2.projectPoints(obj_points, rvecs[0], tvecs[0], camera_matrix, dist_coeffs)
        for px in img_points2:
            cv2.circle(img, (px[0][0], px[0][1]), 10, (255, 255, 255), 5)
        cv2.namedWindow("errors", cv2.WINDOW_NORMAL)
        cv2.imshow('errors', img)
        cv2.waitKey(0)
        for i in range(0, len(img_points2)):
            mean_error += cv2.norm(img_points[i], img_points2[i][0], cv2.NORM_L2) / len(img_points2)
        print('erreur moyenne', mean_error)
    if save:
        np.savetxt('matrices/camera_matrices/extrinsic/' + prefix + '_camera_matrix', camera_matrix)
        np.savetxt('matrices/vectors/distortion/extrinsic/' + prefix + '_distortion_vector', dist_coeffs)
        np.savetxt('matrices/vectors/rotation/' + prefix + '_rotation_vector', rvecs[0])
        np.savetxt('matrices/vectors/translation/' + prefix + '_translation_vector', tvecs[0])
        np.savetxt('matrices/rotation_matrix/' + prefix + '_rotation_matrix', rotation_matrix)

    return camera_matrix, dist_coeffs, rotation_matrix, rvecs[0], tvecs[0]
コード例 #9
0
 def draw_axes(self, img, R, t):
     img	= cv2.drawFrameAxes(img, self.camera_matrix, self.dist_coefs, R, t, 30)
コード例 #10
0
    def draw(self,
             image,
             color,
             thickness=1,
             view_matrix=None,
             camera_matrix=None,
             dist_coeffs=None):
        """Draws the track"""
        if self.is_confirmed():
            track_color = (0, 200, 0, 0)
            text_color = (50, 50, 50)
        else:
            if self.is_occluded():
                track_color = (0, 0, 200, 0.3)
                text_color = (250, 250, 250)
            else:
                track_color = (200, 0, 0, 0.3)
                text_color = (250, 250, 250)

        if self.is_confirmed():
            if self.is_located() and self.is_confirmed():
                if view_matrix is not None and \
                    camera_matrix is not None and \
                        dist_coeffs is not None:
                    sensor_pose = Vector6D().from_transform(
                        np.dot(np.linalg.inv(view_matrix),
                               self.pose.transform()))
                    rot = sensor_pose.rotation().to_array()
                    depth = sensor_pose.position().z
                    # for opencv convention
                    R = euler_matrix(rot[0][0], rot[1][0], rot[2][0], "rxyz")
                    rvec = cv2.Rodrigues(R[:3, :3])[0]
                    cv2.putText(image, "{0:.3}m".format(depth),
                                (self.bbox.xmin + 5, self.bbox.ymin - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, .6, (255, 255, 255),
                                2)
                    cv2.drawFrameAxes(image, camera_matrix, dist_coeffs, rvec,
                                      sensor_pose.position().to_array(), 0.1)
            cv2.rectangle(image, (self.bbox.xmin, self.bbox.ymax - 26),
                          (self.bbox.xmax, self.bbox.ymax), (200, 200, 200),
                          -1)
            # if self.mask is not None:
            #     mask = np.full((image.shape[0], image.shape[1], 1), 255)
            #     xmin = int(self.bbox.xmin)
            #     ymin = int(self.bbox.ymin)
            #     xmax = int(self.bbox.xmax)
            #     ymax = int(self.bbox.ymax)
            #     mask[ymin:ymax, xmin:xmax] = self.mask
            #     contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            #     cv2.drawContours(image, contours, 0, track_color, thickness)

            self.bbox.draw(image, track_color, 2)
            self.bbox.draw(image, text_color, 1)

            cv2.putText(image, "{}".format(self.id[:6]),
                        (self.bbox.xmax - 60, self.bbox.ymax - 8),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, text_color, 1)
            cv2.putText(image, self.label,
                        (self.bbox.xmin + 5, self.bbox.ymax - 8),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, text_color, 1)
            if "facial_landmarks" in self.features:
                self.features["facial_landmarks"].draw(image, track_color,
                                                       thickness)
        else:
            self.bbox.draw(image, track_color, 1)
コード例 #11
0
ファイル: scene_node.py プロジェクト: LAAS-HRI/uwds3
    def draw(self, image, color, thickness=1, view_pose=None, camera=None):
        """Draws the track
        """

        if self.is_confirmed() or self.is_occluded():
            track_color = (0, 200, 0)
            text_color = (50, 50, 50)
            mask_color = (0, 100, 0)
            if self.is_static():
                track_color = (0, 200, 0)
                text_color = (50, 50, 50)
                mask_color = (0, 100, 0)
        else:
            if self.is_lost():
                track_color = (0, 0, 200)
                text_color = (250, 250, 250)
                mask_color = (0, 0, 100)
            else:
                track_color = (200, 0, 0)
                text_color = (250, 250, 250)
                mask_color = (100, 0, 0)

        if self.is_confirmed():
            if self.is_located():
                if view_pose is not None and camera is not None:
                    view_matrix = view_pose.transform()
                    camera_matrix = camera.camera_matrix()
                    dist_coeffs = camera.dist_coeffs
                    sensor_pose = Vector6D().from_transform(np.dot(np.linalg.inv(view_matrix), self.pose.transform()))
                    rot = sensor_pose.rotation().to_array()
                    depth = sensor_pose.position().z
                    # for opencv convention
                    R = euler_matrix(rot[0][0], rot[1][0], rot[2][0], "rxyz")
                    rvec = cv2.Rodrigues(R[:3, :3])[0]
                    cv2.putText(image, "{:0.3}m".format(depth),
                                (self.bbox.xmin+5, self.bbox.ymin-5),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                .6,
                                (255, 255, 255),
                                2)
                    cv2.drawFrameAxes(image, camera_matrix, dist_coeffs,
                                      rvec,
                                      sensor_pose.position().to_array(), 0.1)
                else:
                    depth = self.bbox.depth
                    cv2.putText(image, "{0:.2}m".format(depth),
                                (self.bbox.xmin+5, self.bbox.ymin-5),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                .6,
                                (255, 255, 255),
                                2)
            if self.mask is not None:
                contours, hierarchy = cv2.findContours(self.mask.astype("uint8"), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
                roi = image[int(self.bbox.ymin):int(self.bbox.ymax), int(self.bbox.xmin):int(self.bbox.xmax)]
                overlay = roi.copy()
                cv2.drawContours(overlay, contours, -1, mask_color, 2, cv2.LINE_8, hierarchy, 100)
                alpha = 0.6
                roi = cv2.addWeighted(overlay, alpha, roi, 1 - alpha, 0, roi)
            cv2.rectangle(image, (self.bbox.xmin, self.bbox.ymax-26),
                                 (self.bbox.xmax, self.bbox.ymax),
                                 (200, 200, 200), -1)

            self.bbox.draw(image, track_color, 2)
            self.bbox.draw(image, text_color, 1)

            cv2.putText(image,
                        "{}".format(self.id[:6]),
                        (self.bbox.xmax-75, self.bbox.ymax-8),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.6,
                        text_color,
                        1)

            if "facial_landmarks" in self.features:
                self.features["facial_landmarks"].draw(image,
                                                       track_color,
                                                       thickness)
        else:
            self.bbox.draw(image, track_color, 1)
コード例 #12
0
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, imsize[0])
cap.set(cv.CAP_PROP_FRAME_HEIGHT, imsize[1])

rot, trans = None, None
while cv.waitKey(1) != 27:
    img = cap.read()[1]

    # detection with aruco
    if rot is None:
        corners, ids = cv.aruco.detectMarkers(img, adict)[:2]

        if ids is not None:
            rvecs, tvecs = cv.aruco.estimatePoseSingleMarkers(corners, marker_len, K, None)[:2]
            rot, trans = rvecs[0].ravel(), tvecs[0].ravel()

    # tracking and refinement with rapid
    if rot is not None:
        for i in range(5):  # multiple iterations
            ratio, rot, trans = cv.rapid.rapid(img, 40, line_len, obj_points, tris, K, rot, trans)
            if ratio < 0.8:
                # bad quality, force re-detect
                rot, trans = None, None
                break

    # drawing
    cv.putText(img, "detecting" if rot is None else "tracking", (0, 20), cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255))
    if rot is not None:
        cv.drawFrameAxes(img, K, None, rot, trans, marker_len)
    cv.imshow("tracking", img)
コード例 #13
0
 def draw_axes(self, img, r, t):
     cv2.drawFrameAxes(img, self.camera_matrix, self.dist_coefficients, r,
                       t, 30)
コード例 #14
0
def plot_gridboard_axes(image_path, vectors, factor, gridboards=None):
    """
    Used to plot multiple gridboards, with one large set of axes per gridboard
    """
    dist_coeffs = vectors[str(factor)]['dist_coeffs']
    cam_matrix = vectors[str(factor)]['cam_matrix']
    nx = 2
    ny = 2
    marker_size = 0.025  # 25mm
    between_markers = 0.005  # 5mm
    markers_per_board = nx * ny
    length_of_axis = (marker_size + between_markers) * nx - between_markers
    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)
    offset = 0
    if not gridboards:
        gridboards = [
            aruco.GridBoard_create(nx,
                                   ny,
                                   marker_size,
                                   between_markers,
                                   aruco_dict,
                                   firstMarker=offset)
        ]
    print(image_path)
    frame = cv2.imread(image_path)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict)

    all_rvecs = []
    all_tvecs = []
    frame_with_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)

    #     print(f"ids={ids}")
    for i, board in enumerate(gridboards):
        # Go through all the corners / ids found in the image, and just select the
        # ones relevant to the current board
        ids_subset = ids.copy()
        corners_subset = corners.copy()
        for j, item in enumerate(ids):
            if item[0] not in [val[0] for val in board.ids]:
                np.delete(ids_subset, j)
                np.delete(corners_subset, j)
        # Reset the vectors
        rvec = np.ones(shape=(3, 1), dtype=float)
        tvec = np.ones(shape=(3, 1), dtype=float)
        # Estimate the Board's pose
        ret, rvec, tvec = aruco.estimatePoseBoard(corners_subset, ids_subset,
                                                  board, cam_matrix,
                                                  dist_coeffs, rvec, tvec)

        if ret == 0:  # if no gridboard was found
            print("Failed to get BoardPose")
            rvecs, tvecs, objPoints = aruco.estimatePoseSingleMarkers(
                corners, marker_size, cam_matrix, dist_coeffs)
            for i in range(len(tvecs)):
                frame_with_markers = cv2.drawFrameAxes(frame_with_markers,
                                                       cam_matrix,
                                                       dist_coeffs,
                                                       rvecs[i],
                                                       tvecs[i],
                                                       length_of_axis,
                                                       thickness=2)

        else:
            frame_with_markers = cv2.drawFrameAxes(frame_with_markers,
                                                   cam_matrix,
                                                   dist_coeffs,
                                                   rvec,
                                                   tvec,
                                                   length_of_axis,
                                                   thickness=2)
            all_rvecs.append(rvec)
            all_tvecs.append(tvec)
    plt.figure(figsize=(12, 6))
    plt.imshow(frame_with_markers, interpolation="nearest")
    return all_rvecs, all_tvecs
コード例 #15
0
                                    contours,
                                    -1, (0, 255, 0),
                                    thickness=3)
    #cv2.imshow("contours", contours_img)

    center = np.array([
        [0, 0, 0],
    ], dtype=np.float32)

    if rvecs is not None:
        imagePoints, jacobian = cv2.projectPoints(
            center, rvecs, tvecs,
            vision_pipeline.calibration_info.camera_matrix,
            vision_pipeline.calibration_info.dist_coeffs)
        imagePoints = imagePoints.reshape(-1, 2)
        image = cv2.drawFrameAxes(
            image, vision_pipeline.calibration_info.camera_matrix,
            vision_pipeline.calibration_info.dist_coeffs, rvecs, tvecs, 1)

        corner_img = cv2.circle(image,
                                tuple(imagePoints[0].astype(np.int32)),
                                3, (66, 244, 113),
                                thickness=3)

        #if dist > 10:
        cv2.imshow("corner_img", corner_img)

        # print(euler_angles) g
        #print("dist: {0:0.2f} | angle (degrees): {1:0.2f}".format(dist, euler_angles[1]*180/math.pi))

    cv2.waitKey(1000 // 30)
コード例 #16
0
    def observation_callback(self, rgb_image_msg):
        if self.camera_info is not None:

            bgr_image = self.bridge.imgmsg_to_cv2(rgb_image_msg)
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            viz_frame = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
            timer1 = cv2.getTickCount()

            detections = []
            if self.frame_count % self.n_frame == 0:
                detections = self.face_detector.detect(rgb_image)
            if self.frame_count % self.n_frame == 1:
                detections = self.detector.detect(rgb_image)
            self.frame_count += 1

            human_detections = [
                d for d in detections if d.class_label in self.body_parts
            ]
            object_detections = [
                d for d in detections if d.class_label not in self.body_parts
            ]

            object_tracks = self.object_tracker.update(rgb_image,
                                                       object_detections)
            human_tracks = self.human_tracker.update(rgb_image,
                                                     human_detections,
                                                     self.camera_matrix,
                                                     self.dist_coeffs)

            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer1)
            detection_fps = "Detection and track fps : %0.4fhz" % fps
            #print(detection_fps)

            cv2.putText(viz_frame, detection_fps, (5, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
            for track in self.human_tracker.tracks:
                #if track.is_confirmed():
                tl_corner = (int(track.bbox.left()), int(track.bbox.top()))
                br_corner = (int(track.bbox.right()), int(track.bbox.bottom()))
                # if track.class_label == "face":
                #     shape = self.landmark_estimator.estimate(rgb_image, track)
                #     for (x, y) in shape:
                #         cv2.circle(viz_frame, (x, y), 1, (0, 255, 0), -1)
                if track.class_label == "face":
                    rot = track.rotation
                    trans = track.translation
                    #rot, trans = track.get_head_pose()
                    if rot is not None and trans is not None:
                        transform = geometry_msgs.msg.TransformStamped()
                        transform.header.stamp = rospy.Time.now()
                        transform.header.frame_id = self.camera_frame_id
                        transform.child_frame_id = "gaze"
                        transform.transform.translation.x = trans[0]
                        transform.transform.translation.y = trans[1]
                        transform.transform.translation.z = trans[2]
                        q_rot = quaternion_from_euler(rot[0], rot[1], rot[2],
                                                      "rxyz")
                        transform.transform.rotation.x = q_rot[0]
                        transform.transform.rotation.y = q_rot[1]
                        transform.transform.rotation.z = q_rot[2]
                        transform.transform.rotation.w = q_rot[3]
                        self.tf_broadcaster.sendTransform(transform)
                        if track.uuid not in self.internal_simulator:
                            self.internal_simulator.load_urdf(
                                track.uuid, "face.urdf", trans, q_rot)
                        self.internal_simulator.update_entity(
                            track.uuid, trans, q_rot)
                        self.internal_simulator.get_human_visibilities(
                            trans, q_rot)
                        cv2.drawFrameAxes(viz_frame, self.camera_matrix,
                                          self.dist_coeffs,
                                          np.array(rot).reshape((3, 1)),
                                          np.array(trans).reshape(3, 1), 0.03)

                cv2.putText(viz_frame, track.uuid[:6],
                            (tl_corner[0] + 5, tl_corner[1] + 25),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (240, 0, 0), 2)
                cv2.putText(viz_frame, track.class_label,
                            (tl_corner[0] + 5, tl_corner[1] + 45),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (240, 0, 0), 2)
                cv2.rectangle(viz_frame, tl_corner, br_corner, (255, 255, 0),
                              2)
            viz_img_msg = self.bridge.cv2_to_imgmsg(viz_frame)
            self.visualization_publisher.publish(viz_img_msg)
コード例 #17
0
 def drawAxes(self, img, R, t):
     img = cv2.drawFrameAxes(img, self.cameraMatrix,
                             self.distCoeefs, R, t, 30)
コード例 #18
0
ファイル: PNP_solver.py プロジェクト: framaz/eye_control
 def draw_axes(self, img, R, t):
     img = np.array(img, dtype=np.float64)
     img = cv2.drawFrameAxes(img, self.camera_matrix, self.dist_coeefs, R,
                             t, 30)
     return img
コード例 #19
0
 # Compute Depths
 objectPose.center_glassdoor_XYZ = objectPose.depthCompute(center_glassdoor)
 objectPose.center_knob_XYZ = objectPose.depthCompute(center_knob)            
 # To fix origin wrt rgb_optial_frame to knob center
 objectPose.T_washOrigin_CRF[0,3] = objectPose.center_knob_XYZ[0][0]
 objectPose.T_washOrigin_CRF[1,3] = objectPose.center_knob_XYZ[0][1]
 objectPose.T_washOrigin_CRF[2,3] = objectPose.center_knob_XYZ[0][2]
 
 # Apply transformation from depth_optical_frame to base_link
 objectPose.T_wrt_mobilebase = objectPose.applyTransformation('base','camera_rgb_optical_frame',objectPose.T_washOrigin_CRF)           
 # Transformation from base_link to optical frame in order to show axes
 objectPose.T_washOrigin_CRF = objectPose.applyTransformation('camera_rgb_optical_frame','base',objectPose.T_wrt_mobilebase)
 # Show coordinate frame from result of last transformation
 rot_vec,_ =  cv.Rodrigues(objectPose.T_washOrigin_CRF[:3,:3])
 tr_vec = objectPose.T_washOrigin_CRF[:3,3]
 cv.drawFrameAxes(rgbImg, camera_info_A, camera_info_D, rot_vec, tr_vec, 0.1)
 # Transformation from rgb_optical_frame to depth_optical_frame
 objectPose.T_washOrigin_Depth = objectPose.applyTransformation('camera_depth_optical_frame', 'camera_rgb_optical_frame', objectPose.T_washOrigin_CRF)
 # Transformation of origin from WASH MACHINE ORIGIN located in knob center to the down part of the wash machine - necessary for pcl_registration
 objectPose.T_knob_wrt_machine_corner[0,3] = 0 
 objectPose.T_knob_wrt_machine_corner[1,3] = 0
 objectPose.T_knob_wrt_machine_corner[2,3] = 0         
 
            
 objectPose.T_knob_wrt_machine_corner[:3,:3] = np.identity(3,dtype=np.float32)
 objectPose.T_knob_wrt_machine_corner[3,:] = [0,0,0,1]
 # Multiplication matrices
 objectPose.T_washOrigin_Depth = np.dot(objectPose.T_knob_wrt_machine_corner,objectPose.T_washOrigin_Depth)
 
 ##### Publishers ###########################################################
 # Structuring message for transformation matrix between wash machine origin and base link