コード例 #1
0
def reconstruct_object_points_from_relative_camera_pose(
        image_points_1,
        image_points_2,
        camera_matrix,
        relative_rotation_vector,
        relative_translation_vector,
        rotation_vector_1=np.array([[0.0], [0.0], [0.0]]),
        translation_vector_1=np.array([[0.0], [0.0], [0.0]]),
        distance_between_cameras=1.0):
    image_points_1 = np.asarray(image_points_1)
    image_points_2 = np.asarray(image_points_2)
    camera_matrix = np.asarray(camera_matrix)
    relative_rotation_vector = np.asarray(relative_rotation_vector)
    relative_translation_vector = np.asarray(relative_translation_vector)
    rotation_vector_1 = np.asarray(rotation_vector_1)
    translation_vector_1 = np.asarray(translation_vector_1)
    if image_points_1.size == 0 or image_points_2.size == 0:
        return np.zeros((0, 3))
    image_points_1 = image_points_1.reshape((-1, 2))
    image_points_2 = image_points_2.reshape((-1, 2))
    if image_points_1.shape != image_points_2.shape:
        raise ValueError(
            'Sets of image points do not appear to be the same shape')
    camera_matrix = camera_matrix.reshape((3, 3))
    relative_rotation_vector = relative_rotation_vector.reshape(3)
    relative_translation_vector = relative_translation_vector.reshape(3)
    rotation_vector_1 = rotation_vector_1.reshape(3)
    translation_vector_1 = translation_vector_1.reshape(3)
    rotation_vector_2, translation_vector_2 = cv.composeRT(
        rotation_vector_1, translation_vector_1, relative_rotation_vector,
        relative_translation_vector * distance_between_cameras)[:2]
    object_points = reconstruct_object_points_from_camera_poses(
        image_points_1, image_points_2, camera_matrix, rotation_vector_1,
        translation_vector_1, rotation_vector_2, translation_vector_2)
    return object_points
コード例 #2
0
def triangluation(match_inliers1, match_inliers2, R, T):

    # first camera is no translation or rotation
    # Second camera consists of R, T
    global oldR
    global oldT

    Rt1 = np.hstack((oldR, oldT))

    # get into vector form
    oldR = cv2.Rodrigues(oldR)[0]
    R = cv2.Rodrigues(R)[0]

    # compose new R and T (composition of old ones)
    newR, newT, _, _, _, _, _, _, _, _ = cv2.composeRT(oldR, oldT, R, T)

    #transform back to matrix format
    newR = cv2.Rodrigues(newR)[0]
    #print(newR)
    print(newT)

    # Bring R and T together
    Rt2 = np.hstack((newR, newT))

    first_inliers = np.array(match_inliers1).reshape(-1, 3)[:, :2]
    second_inliers = np.array(match_inliers2).reshape(-1, 3)[:, :2]

    # This returns the trianglulated real-world points using 4D homogeneous coordinates
    pts4D = cv2.triangulatePoints(Rt1, Rt2, first_inliers.T,
                                  second_inliers.T).T

    oldR = newR
    oldT = newT

    return pts4D
コード例 #3
0
 def __sub__(self, other):
     """ To solve the marker to marker calulations for the tvec and
         rvec the vector calulation is for marker A and B with camrea C
         AB = AC - BC
         Thus by ower writing the sub part of this object the this calculation
         in python is just Transfer1 - Transfer2
         :param other: Is an other Transfer object
     """
     if type(other) is Transfer:
         log.info("__sub__")
         invTvec, invRvec = other._inversePerspective()
         orgTvec, orgRvec = self._inversePerspective()
         rvec = self.rvec.reshape((3, 1))
         tvec = self.tvec.reshape((3, 1))
         info = cv2.composeRT(rvec, tvec, invRvec[0], invTvec)
         compRvec: np.ndarray = info[0]
         compTvec: np.ndarray = info[1]
         compRvec = compRvec.reshape((1, 1, 3))
         compTvec = compTvec.reshape((1, 1, 3))
         log.info(f"compRvec=\n{compRvec}\ncompTvec=\n{compTvec}")
         # breakpoint()
         log.info("--Create a new transfer--")
         tf = Transfer(self.source, other.source, compTvec, compRvec)
         log.info(f"__sub__ return {tf}")
         return tf
コード例 #4
0
ファイル: err_exp.py プロジェクト: ArhumGit/Camera_Parameters
def calc_rms_stereo(objectpoints, imgpoints_l, imgpoints_r, A1, D1, A2, D2, R,
                    T):
    tot_error = 0
    total_points = 0

    for i, objpoints in enumerate(objectpoints):
        # calculate world <-> cam1 transformation
        _, rvec_l, tvec_l, _ = cv2.solvePnPRansac(objpoints, imgpoints_l[i],
                                                  A1, D1)

        # compute reprojection error for cam1
        rp_l, _ = cv2.projectPoints(objpoints, rvec_l, tvec_l, A1, D1)
        tot_error += np.sum(np.square(np.float64(imgpoints_l[i] - rp_l)))
        total_points += len(objpoints)

        # calculate world <-> cam2 transformation
        rvec_r, tvec_r = cv2.composeRT(rvec_l, tvec_l,
                                       cv2.Rodrigues(R)[0], T)[:2]

        # compute reprojection error for cam2
        rp_r, _ = cv2.projectPoints(objpoints, rvec_r, tvec_r, A2, D2)
        tot_error += np.square(imgpoints_r[i] - rp_r).sum()
        total_points += len(objpoints)

    mean_error = np.sqrt(tot_error / total_points)

    return mean_error
コード例 #5
0
def relatPos(rvec1, tvec1, rvec2, tvec2):
    rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
    rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))
    irvec, itvec = inverse_vec(rvec2, tvec2)
    mtx = cv2.composeRT(rvec1, tvec1, irvec, itvec)
    composedRvec, composedTvec = mtx[0], mtx[1]
    composedRvec = composedRvec.reshape((3, 1))
    composedTvec = composedTvec.reshape((3, 1))
    return composedRvec, composedTvec
コード例 #6
0
ファイル: mvp.py プロジェクト: byteofsoren/3dreconstruction
def relativePosition(rvec1, tvec1, rvec2, tvec2):
    """ Get relative position ofor rvec2, tvec2 copose te return rvec, tvec """
    rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
    rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))
    # Inverse the secound marker
    invRvec, invTvec = inversePerspective(rvec2, tve2)
    info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
    composeRvec, composeTvec = info[0], info[1]
    composeRvec = composeRvec.reshape((3, 1))
    composeTvec = composeTvec.reshape((3, 1))
    return composeRvec, composeTvec
コード例 #7
0
 def relativePosition(rvec1, tvec1, rvec2, tvec2):
     rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
     rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))
     # Inverse the second marker, the right one in the image
     invRvec, invTvec = inversePerspective(rvec2, tvec2)
     orgRvec, orgTvec = inversePerspective(invRvec, invTvec)
     # print("rvec: ", rvec2, "tvec: ", tvec2, "\n and \n", orgRvec, orgTvec)
     info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
     composedRvec, composedTvec = info[0], info[1]
     composedRvec = composedRvec.reshape((3, 1))
     composedTvec = composedTvec.reshape((3, 1))
     return composedRvec, composedTvec
コード例 #8
0
def compose_transformations(rotation_vector_1, translation_vector_1,
                            rotation_vector_2, translation_vector_2):
    rotation_vector_1 = np.asarray(rotation_vector_1).reshape(3)
    translation_vector_1 = np.asarray(translation_vector_1).reshape(3)
    rotation_vector_2 = np.asarray(rotation_vector_2).reshape(3)
    translation_vector_2 = np.asarray(translation_vector_2).reshape(3)
    rotation_vector_composed, translation_vector_composed = cv.composeRT(
        rotation_vector_1, translation_vector_1, rotation_vector_2,
        translation_vector_2)[:2]
    rotation_vector_composed = np.squeeze(rotation_vector_composed)
    translation_vector_composed = np.squeeze(translation_vector_composed)
    return rotation_vector_composed, translation_vector_composed
コード例 #9
0
    def find_extrinsic(self, landmarks, camera):
        _, rotation, translation = solvePnP(
            objectPoints=self.model_points,
            imagePoints=self._extract_model_landmarks(landmarks),
            cameraMatrix=camera.matrix,
            distCoeffs=camera.distortion,
            flags=SOLVEPNP_ITERATIVE)

        rotation, translation = composeRT(rotation, translation,
                                          camera.rotation.T,
                                          camera.translation.T)[:2]

        return rotation, translation
コード例 #10
0
def get_thermal_cameras(cameras_top,R,T):
    thermal_cameras = []
    for top_or_bot,pic_index,visible_points,f,R1,T1,C,distort in cameras_top:
        res = cv2.composeRT(cv2.Rodrigues(R1)[0], T1, cv2.Rodrigues(R)[0], T)
        rvec3 = res[0]
        tvec3 = res[1]
        thermal_cameras.append((pic_index, rvec3, tvec3))
        R_thermal = cv2.Rodrigues(rvec3)[0]
        T_thermal = tvec3
        C_thermal = -np.dot(np.linalg.inv(R_thermal),T_thermal)
        d = C - np.squeeze(C_thermal)
        dd = np.sum(np.abs(d)**2,axis=-1)**(1./2)
    return thermal_cameras
コード例 #11
0
def relativePosition(rvec1, tvec1, rvec2, tvec2):
    """ Get relative position for rvec2 & tvec2. Compose the returned rvec & tvec to use composeRT with rvec2 & tvec2 """
    rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
    rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))

    # Inverse the second marker, the right one in the image
    invRvec, invTvec = inversePerspective(rvec2, tvec2)

    info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
    composedRvec, composedTvec = info[0], info[1]

    composedRvec = composedRvec.reshape((3, 1))
    composedTvec = composedTvec.reshape((3, 1))
    return composedRvec, composedTvec
コード例 #12
0
    def relative_position(self, rvec1, tvec1, rvec2, tvec2):
        # Gets (r/tvecs) relative to each other
        rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
        rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))

        # Inverse the second marker, the right one in the image
        inv_rvec, inv_tvec = self.inverse_perspective(rvec2, tvec2)

        composed_data = cv2.composeRT(rvec1, tvec1, inv_rvec, inv_tvec)
        composed_rvec, composed_tvec = composed_data[0], composed_data[1]

        composed_rvec = composed_rvec.reshape((3, 1))
        composed_tvec = composed_tvec.reshape((3, 1))

        return composed_rvec, composed_tvec
コード例 #13
0
def relativePosition(rvec1, tvec1, rvec2, tvec2):

    rvec1, tvec1 = np.expand_dims(rvec1.squeeze(),1), np.expand_dims(tvec1.squeeze(),1)
    rvec2, tvec2 = np.expand_dims(rvec2.squeeze(),1), np.expand_dims(tvec2.squeeze(),1)
    invRvec, invTvec = inversePerspective(rvec2, tvec2)

    orgRvec, orgTvec = inversePerspective(invRvec, invTvec)

    info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
    composedRvec, composedTvec = info[0], info[1]

    composedRvec = composedRvec.reshape((3, 1))
    composedTvec = composedTvec.reshape((3, 1))

    return composedRvec, composedTvec
コード例 #14
0
    def get_combined_dict(self, frame_data, relative_data):
        combined_dict = {}
        combined_body = {}
        for marker_id in frame_data["ids"]:
            if marker_id not in relative_data:
                continue
            combined_data = cv2.composeRT(
                relative_data[marker_id]["relative_rvec"],
                relative_data[marker_id]["relative_tvec"],
                frame_data["ids"][marker_id]["marker_rvecs"].T,
                frame_data["ids"][marker_id]["marker_tvecs"].T)
            combined_body["combined_rvec"], combined_body[
                "combined_tvec"] = combined_data[0], combined_data[1]
            combined_dict[marker_id] = combined_body.copy()

        return combined_dict
コード例 #15
0
def relative_pose(rotation_vector_parent, translation_vector_parent,
                  rotation_vector_child, translation_vector_child):
    rotation_vector_parent, translation_vector_parent = rotation_vector_parent.reshape(
        (3, 1)), translation_vector_parent.reshape((3, 1))
    rotation_vector_child, translation_vector_child = rotation_vector_child.reshape(
        (3, 1)), translation_vector_child.reshape((3, 1))

    inverse_rotation_vector_child, inverse_translation_vector_child = inverse_pose(
        rotation_vector_child, translation_vector_child)

    composed_matrix = cv2.composeRT(rotation_vector_parent,
                                    translation_vector_parent,
                                    inverse_rotation_vector_child,
                                    inverse_translation_vector_child)
    composed_rotation_vector = composed_matrix[0]
    composed_translation_vector = composed_matrix[1]

    composed_rotation_vector.reshape((3, 1))
    composed_translation_vector.reshape((3, 1))

    return composed_rotation_vector, composed_translation_vector
コード例 #16
0
def get_global_rotation_translation(global_rt_matrix_L,
                                    rt_matrix_R,
                                    verbose=False):
    """Compute the [R|t] matrix for imgR/camR from the first img/cam

    :param global_rt_matrix_L: The global [R|t] matrices corresponding to the
        previous image
    :param rt_matrix_R: The [R|t] matrix corresponding to the next image, to
        be converted into the global equivalent
    :param verbose: as in pipeline()
    """
    # Decompose the [R|t] matrices into R, t
    r_mat_L = global_rt_matrix_L[:, :3]
    t_L = global_rt_matrix_L[:, 3]
    r_mat_R = rt_matrix_R[:, :3]
    t_R = rt_matrix_R[:, 3]

    # Convert the rotation matrices to rotation vectors
    r_vec_L, _ = cv2.Rodrigues(r_mat_L)
    r_vec_R, _ = cv2.Rodrigues(r_mat_R)

    # Compose the new R and t vectors with the latest global R and t vectors (r_mat_L, t_L)
    global_r_vec_R, global_t_R, _, _, _, _, _, _, _, _ = cv2.composeRT(
        r_vec_L, t_L, r_vec_R, t_R)

    # Convert the R vector back into a matrix
    global_r_mat_R, _ = cv2.Rodrigues(global_r_vec_R)

    # Create the global [R|t] matrix
    global_rt_R = np.hstack((global_r_mat_R, global_t_R))

    if verbose:
        # Print the global_rt_R matrix
        print("Global [R|t] matrix:")
        with np.printoptions(suppress=True):
            print(global_rt_R)

    # Return the global_rt_R
    return global_rt_R
コード例 #17
0
def adjust_calibration_origin(world_rotation_vector, world_translation_vector,
                              relative_rotations, relative_translations):
    '''Adjusts orientations and locations based on world rotation and translation.

    If the camera setup is thus that the desired world origin cannot be observed by all cameras
    but you wish to have the coordinate frame be relative to the world origin (or any other origin)
    then the values can be updated with this function. This is particularly useful for sequential
    pose estimates or any generic stereo-calibration.

    Arguments:
        world_rotation_vector {np.array} -- The rotation vector for the reference camera
        world_translation_vector {np.array} -- The translation vector for the reference camera
        relative_rotations {list of 'np.array's} -- List of rotations in the original coordinate frame
        relative_translations {list of 'np.array's} -- List of translations in the original coordinate frame

    Output:
        adjusted_rotation_vectors {list of np.array} -- rotations in space of the world
        adjusted_translation_vectors {list of np.array} -- locations in space of the world
    '''
    adjusted_rotation_vectors = []
    adjusted_translation_vectors = []

    # Format rotation for composeRT
    if world_rotation_vector.shape == (3, 3):
        world_rotation_vector = cv2.Rodrigues(world_rotation_vector)[0]

    for rel_rot, rel_trans in zip(relative_rotations, relative_translations):
        sec_r_vec = rel_rot
        # Format rotation for composeRT
        if sec_r_vec.shape == (3, 3):
            sec_r_vec = cv2.Rodrigues(sec_r_vec)[0]

        adjusted_orientation, adjusted_location = cv2.composeRT(
            world_rotation_vector, world_translation_vector, sec_r_vec, rel_trans)[:2]

        adjusted_rotation_vectors.append(adjusted_orientation)
        adjusted_translation_vectors.append(adjusted_location)

    return adjusted_rotation_vectors, adjusted_translation_vectors
コード例 #18
0
def track(matrix_coefficients, distortion_coefficients):
    global image_width
    global image_height
    """ Real time ArUco marker tracking.  """
    needleComposeRvec, needleComposeTvec = None, None  # Composed for needle
    ultraSoundComposeRvec, ultraSoundComposeTvec = None, None  # Composed for ultrasound
    savedNeedleRvec, savedNeedleTvec = None, None  # Pure Composed
    savedUltraSoundRvec, savedUltraSoundTvec = None, None  # Pure Composed
    TcomposedRvecNeedle, TcomposedTvecNeedle = None, None
    TcomposedRvecUltrasound, TcomposedTvecUltrasound = None, None

    # Behaviour is a key between calibration types.
    # No simulation is equal to 0
    # Needle Calibration is equal to 1
    # Ultrasound Calibration is equal to 2
    # Press
    behaviour = 0
    make_480p()

    while True:
        isCalibrationMarkerDetected = False
        isNeedleDetected = False
        isUltraSoundDetected = False
        ret, frame = cap.read()
        # operations on the frame come here
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Change grayscale
        aruco_dict = aruco.Dictionary_get(
            aruco.DICT_5X5_250)  # Use 5x5 dictionary to find markers
        parameters = aruco.DetectorParameters_create(
        )  # Marker detection parameters

        # lists of ids and the corners beloning to each id
        corners, ids, rejected_img_points = aruco.detectMarkers(
            gray,
            aruco_dict,
            parameters=parameters,
            cameraMatrix=matrix_coefficients,
            distCoeff=distortion_coefficients)

        if behaviour == 0:
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, 'No Calibration', (10, 40), font, 0.7,
                        (180, 250, 199), 2, cv2.LINE_AA)
        elif behaviour == 1:
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, 'Needle calibration', (10, 40), font, 0.7,
                        (180, 250, 199), 2, cv2.LINE_AA)
        else:
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, 'Ultrasound calibration', (10, 40), font, 0.7,
                        (180, 250, 199), 2, cv2.LINE_AA)
            pass

        if np.all(ids is not None):  # If there are markers found by detector
            zipped = zip(ids, corners)
            ids, corners = zip(*(sorted(zipped)))
            axisForFourPoints = np.float32([[-0.025, -0.025, 0],
                                            [-0.025, 0.025, 0],
                                            [0.025, -0.025, 0],
                                            [0.025, 0.025, 0]
                                            ]).reshape(-1,
                                                       3)  # axis for a plane
            axisForTwoPoints = np.float32([[0.01, 0.01, 0],
                                           [-0.01, 0.01,
                                            0]]).reshape(-1,
                                                         3)  # axis for a line
            for i in range(0, len(ids)):  # Iterate in markers
                # Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
                rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(
                    corners[i], 0.02, matrix_coefficients,
                    distortion_coefficients)

                if ids[i] == calibrationMarkerID:
                    calibrationRvec = rvec
                    calibrationTvec = tvec
                    isCalibrationMarkerDetected = True
                    calibrationMarkerCorners = corners[i]
                elif ids[i] == needleMarkerID:
                    needleRvec = rvec
                    needleTvec = tvec
                    isNeedleDetected = True
                    needleCorners = corners[i]
                elif ids[i] == ultraSoundMarkerID:
                    ultraSoundRvec = rvec
                    ultraSoundTvec = tvec
                    isUltraSoundDetected = True
                    ultrasoundCorners = corners[i]

                (rvec -
                 tvec).any()  # get rid of that nasty numpy value array error
                # aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01)  # Draw Axis
                aruco.drawDetectedMarkers(
                    frame, corners)  # Draw A square around the markers

            if isNeedleDetected and needleComposeRvec is not None and needleComposeTvec is not None:
                info = cv2.composeRT(needleComposeRvec, needleComposeTvec,
                                     needleRvec.T, needleTvec.T)
                TcomposedRvecNeedle, TcomposedTvecNeedle = info[0], info[1]
                imgpts, jac = cv2.projectPoints(axisForTwoPoints,
                                                TcomposedRvecNeedle,
                                                TcomposedTvecNeedle,
                                                matrix_coefficients,
                                                distortion_coefficients)
                frame = draw(frame, imgpts, (200, 200, 220))

            if isUltraSoundDetected and ultraSoundComposeRvec is not None and ultraSoundComposeTvec is not None:
                info = cv2.composeRT(ultraSoundComposeRvec,
                                     ultraSoundComposeTvec, ultraSoundRvec.T,
                                     ultraSoundTvec.T)
                TcomposedRvecUltrasound, TcomposedTvecUltrasound = info[
                    0], info[1]
                imgpts, jac = cv2.projectPoints(axisForTwoPoints,
                                                TcomposedRvecUltrasound,
                                                TcomposedTvecUltrasound,
                                                matrix_coefficients,
                                                distortion_coefficients)
                frame = draw(frame, imgpts, (60, 200, 50))



            if isNeedleDetected and needleComposeRvec is not None and needleComposeTvec is not None and \
                isUltraSoundDetected and ultraSoundComposeRvec is not None and ultraSoundComposeTvec is not None:
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(
                    frame, 'X difference:' +
                    str(TcomposedTvecNeedle[0] - TcomposedTvecUltrasound[0]),
                    (10, 70), font, 0.7, (0, 0, 200), 2, cv2.LINE_AA)
                cv2.putText(
                    frame, 'y difference:' +
                    str(TcomposedTvecNeedle[1] - TcomposedTvecUltrasound[1]),
                    (10, 100), font, 0.7, (0, 200, 0), 2, cv2.LINE_AA)
                cv2.putText(
                    frame, 'z difference:' +
                    str(TcomposedTvecNeedle[2] - TcomposedTvecUltrasound[2]),
                    (10, 130), font, 0.7, (200, 0, 0), 2, cv2.LINE_AA)

        # Display the resulting frame
        cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('frame', image_width, image_height)
        cv2.imshow('frame', frame)
        # Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
        key = cv2.waitKey(3) & 0xFF
        if key == ord('q'):  # Quit
            break
        elif key == ord('c'):  # Calibration
            if len(
                    ids
            ) > 1:  # If there are two markers, reverse the second and get the difference
                if isNeedleDetected and isCalibrationMarkerDetected and behaviour == 1:
                    needleComposeRvec, needleComposeTvec = relativePosition(
                        calibrationRvec, calibrationTvec, needleRvec,
                        needleTvec)
                    savedNeedleRvec, savedNeedleTvec = needleComposeRvec, needleComposeTvec
                elif isUltraSoundDetected and isCalibrationMarkerDetected and behaviour == 2:
                    ultraSoundComposeRvec, ultraSoundComposeTvec = relativePosition(
                        calibrationRvec, calibrationTvec, ultraSoundRvec,
                        ultraSoundTvec)
                    savedUltraSoundRvec, savedUltraSoundTvec = ultraSoundComposeRvec, ultraSoundComposeTvec
        elif key == ord('u'):  # Up
            if behaviour == 1 and needleComposeTvec is not None:
                needleComposeTvec = needleComposeTvec + [[0], [0], [0.001]]
            elif behaviour == 2 and ultraSoundComposeTvec is not None:
                ultraSoundComposeTvec = ultraSoundComposeTvec + [[0], [0],
                                                                 [0.001]]
        elif key == ord('d'):  # Down
            if behaviour == 1 and needleComposeTvec is not None:
                needleComposeTvec = needleComposeTvec + [[0], [0], [-0.001]]
            elif behaviour == 2 and ultraSoundComposeTvec is not None:
                ultraSoundComposeTvec = ultraSoundComposeTvec + [[0], [0],
                                                                 [-0.001]]
        elif key == ord('r'):  # Right
            if behaviour == 1 and needleComposeTvec is not None:
                needleComposeTvec = needleComposeTvec + [[0.001], [0], [0]]
            elif behaviour == 2 and ultraSoundComposeTvec is not None:
                ultraSoundComposeTvec = ultraSoundComposeTvec + [[0.001], [0],
                                                                 [0]]
        elif key == ord('l'):  # Left
            if behaviour == 1 and needleComposeTvec is not None:
                needleComposeTvec = needleComposeTvec + [[-0.001], [0], [0]]
            elif behaviour == 2 and ultraSoundComposeTvec is not None:
                ultraSoundComposeTvec = ultraSoundComposeTvec + [[-0.001], [0],
                                                                 [0]]
        elif key == ord('b'):  # Back
            if behaviour == 1 and needleComposeTvec is not None:
                needleComposeTvec = needleComposeTvec + [[0], [-0.001], [0]]
            elif behaviour == 2 and ultraSoundComposeTvec is not None:
                ultraSoundComposeTvec = ultraSoundComposeTvec + [[0], [-0.001],
                                                                 [0]]
        elif key == ord('f'):  # Front
            if behaviour == 1 and needleComposeTvec is not None:
                needleComposeTvec = needleComposeTvec + [[0], [0.001], [0]]
            elif behaviour == 2 and ultraSoundComposeTvec is not None:
                ultraSoundComposeTvec = ultraSoundComposeTvec + [[0], [0.001],
                                                                 [0]]
        elif key == ord('p'):  # print necessary information here
            pass  # Insert necessary print here
        elif key == ord('x'):  # change simulation type
            behaviour = (behaviour + 1) % 3
            print(behaviour)

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
コード例 #19
0
def main():
    # Toda a parte da implementação do SIFT eu peguei do usuário Bilou563 nos fóruns da OpenCV
    # https://answers.opencv.org/question/90742/opencv-depth-map-from-uncalibrated-stereo-system/
    # Acesso em 06/05/2019

    print("Carregando as imagens")

    img1 = cv2.imread('data/FurukawaPonce/MorpheusL.jpg',
                      cv2.CV_8UC1)  #queryimage # left image
    img2 = cv2.imread('data/FurukawaPonce/MorpheusR.jpg',
                      cv2.CV_8UC1)  #trainimage # right image

    # Ajustando o tamanho de img1 para que as imagens tenham o mesmo tamanho (necessário)
    img1 = img1[:, 0:
                1300]  # Crop de uma parte que não tem nada (eu vi antes na imagem)
    # E colocando no mesmo aspect ratio de img2
    img1 = cv2.resize(img1, img2.shape)  # Colocando no mesmo tamanho de img2

    print("Encontrando pontos correspondentes com SIFT")
    #Obtainment of the correspondent point with SIFT
    sift = cv2.xfeatures2d.SIFT_create()

    ###find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    ###FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []
    pts1 = []
    pts2 = []

    ###ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.array(pts1)
    pts2 = np.array(pts2)

    print(
        "Calculando a matriz fundamental com base nos pontos correspondentes")
    #Computation of the fundamental matrix
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    # Obtainment of the rectification matrix and use of the warpPerspective to transform them...
    pts1 = pts1[:, :][mask.ravel() == 1]
    pts2 = pts2[:, :][mask.ravel() == 1]

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)

    p1fNew = pts1.reshape((pts1.shape[0] * 2, 1))
    p2fNew = pts2.reshape((pts2.shape[0] * 2, 1))

    print("Retificando as imagens com base na matriz fundamental")
    retBool, rectmat1, rectmat2 = cv2.stereoRectifyUncalibrated(
        p1fNew, p2fNew, F, img1.shape)

    dst11 = cv2.warpPerspective(img1, rectmat1, img1.shape,
                                cv2.BORDER_ISOLATED)
    dst22 = cv2.warpPerspective(img2, rectmat2, img2.shape,
                                cv2.BORDER_ISOLATED)

    print("Gravando as imagens retificadas")
    # Gravando as imagens retificadas
    cv2.imwrite('data/output/rectifiedMorpheusL.png', dst11)
    cv2.imwrite('data/output/rectifiedMorpheusR.png', dst22)

    print("Calculando a disparidade")
    disp = stereo.compute(dst11.astype(np.uint8), dst22.astype(
        np.uint8)).astype(np.float32) / 16

    plt_show(disp, "data/output/dispMorpheus.png")

    # Histograma que usei para achar os valores 60 e -60 usados
    # para criar disp_filtered logo abaixo
    # É uma tentativa de remover os pontos que ele calculou a
    # disparidade errado/não encontrou match
    # plt.hist(disp.ravel(), 40)
    # plt.title("Histogram with 'auto' bins")
    # hist = plt.gcf()
    # hist.savefig("histMorpheus.png")
    # plt.show()

    disp_filtered = np.where(disp > 60, np.amin(disp), disp)
    disp_filtered = np.where(disp_filtered < -60, -60, disp)

    plt_show(disp_filtered, "data/output/dispMorpheusFiltered.png")

    #########################################
    ######## Cálculo da profundidade ########
    #########################################

    print("Carregando os parâmetros de calibração")
    # Carregando os parâmetros de calibração
    # MorpheusL
    imL_calib = open('data/FurukawaPonce/MorpheusL.txt', 'r')
    texto = imL_calib.read()

    start_f1 = texto.find('fc = ')
    end_f1 = texto.find(']', start_f1)
    f1 = np.fromstring(texto[start_f1 + 6:end_f1].replace(';', ' '),
                       dtype=float,
                       sep=' ')

    start_c1 = texto.find('cc = ')
    end_c1 = texto.find(']', start_c1)
    c1 = np.fromstring(texto[start_c1 + 6:end_c1].replace(';', ' '),
                       dtype=float,
                       sep=' ')

    start_alpha = texto.find('alpha_c = ')
    end_alpha = texto.find(';', start_alpha)
    alpha = float(texto[start_alpha + 10:end_alpha])

    start_R = texto.find('R = ')
    end_R = texto.find(']', start_R)
    R1 = np.fromstring(texto[start_R + 5:end_R].replace(',',
                                                        ' ').replace(';', ' '),
                       dtype=float,
                       sep=' ')
    R1.shape = (3, 3)

    start_Tc = texto.find('Tc = ')
    end_Tc = texto.find(']', start_Tc)
    Tc1 = np.fromstring(texto[start_Tc + 6:end_Tc].replace(';', ' '),
                        dtype=float,
                        sep=' ')
    Tc1.shape = (3, 1)

    matrix1 = np.array([[f1[0], alpha, c1[0]], [0, f1[1], c1[1]], [0, 0, 1]],
                       dtype=float)

    # Compensando a matriz de intrínsecos pelo resize da MorpheusL de acordo com:
    # https://dsp.stackexchange.com/a/6098
    # Acesso em 06/05/2019
    scaling_compensation = np.array([[12 / 13, 0, 1 / 26],
                                     [0, 12 / 13, 1 / 26], [0, 0, 1]])
    matrix1 = scaling_compensation @ matrix1

    # MorpheusR
    imR_calib = open('data/FurukawaPonce/MorpheusR.txt', 'r')
    texto = imR_calib.read()

    start_f2 = texto.find('fc = ')
    end_f2 = texto.find(']', start_f2)
    f2 = np.fromstring(texto[start_f2 + 6:end_f2].replace(';', ' '),
                       dtype=float,
                       sep=' ')

    start_c2 = texto.find('cc = ')
    end_c2 = texto.find(']', start_c2)
    c2 = np.fromstring(texto[start_c2 + 6:end_c2].replace(';', ' '),
                       dtype=float,
                       sep=' ')

    start_alpha = texto.find('alpha_c = ')
    end_alpha = texto.find(';', start_alpha)
    alpha = float(texto[start_alpha + 10:end_alpha])

    start_R = texto.find('R = ')
    end_R = texto.find(']', start_R)
    R2 = np.fromstring(texto[start_R + 5:end_R].replace(',',
                                                        ' ').replace(';', ' '),
                       dtype=float,
                       sep=' ')
    R2.shape = (3, 3)

    start_Tc = texto.find(
        'Tc_8 = ')  # Por alguma razão está como 'Tc_8' nessa imagem
    end_Tc = texto.find(']', start_Tc)
    Tc2 = np.fromstring(texto[start_Tc + 8:end_Tc].replace(';', ' '),
                        dtype=float,
                        sep=' ')
    Tc2.shape = (3, 1)

    matrix2 = np.array([[f2[0], alpha, c1[0]], [0, f2[1], c2[1]], [0, 0, 1]],
                       dtype=float)

    # Ajustando parâmetros
    rvec1, _ = cv2.Rodrigues(R1)
    rvec2, _ = cv2.Rodrigues(R2)

    Tc1.shape = (1, 3)
    Tc2.shape = (1, 3)

    # Calculando o deslocamento e rotação relativos entre as câmeras
    rvec3, tvec3, _, _, _, _, _, _, _, _, = cv2.composeRT(
        rvec1.ravel(), Tc1.ravel(), rvec2.ravel(), Tc2.ravel())

    print("Calculando profundidade")
    focal_length = np.mean([np.mean(f1), np.mean(f2)])
    doffs = c2[0] - c1[0]  # Diferença no eixo x entre os pontos principais
    baseline = np.linalg.norm(tvec3)  # Distância entre as câmeras
    disp2depth_factor = baseline * focal_length

    # Disparidade em mm(?)
    # Tem algum fator errado aqui porque está muito grande
    depth = np.zeros(disp_filtered.shape, dtype=np.uint8)
    depth = disp2depth_factor / (disp_filtered + doffs)
    # plt_show(depth, "depthMorpheus_mm.png")

    furthest = disp2depth_factor / (
        np.amin(disp_filtered[disp_filtered > -60]) + doffs)

    depth = disp2depth_factor / (disp_filtered + doffs)
    # Normalizando a profundidade de acordo com o roteiro
    depth[disp_filtered > -60] = np.floor(
        (disp2depth_factor /
         (disp_filtered[disp_filtered > -60] + doffs)) * 254 / furthest)
    depth[disp_filtered <= -60] = 255

    plt_show(depth, "data/output/depthMorpheus.png")
コード例 #20
0
def track(matrix_coefficients, distortion_coefficients):
    pointCircle = (0, 0)
    markerTvecList = []
    markerRvecList = []
    composedRvec, composedTvec = None, None
    while True:
        ret, frame = cap.read()
        # operations on the frame come here
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Change grayscale
        aruco_dict = aruco.Dictionary_get(
            aruco.DICT_5X5_250)  # Use 5x5 dictionary to find markers
        parameters = aruco.DetectorParameters_create(
        )  # Marker detection parameters

        # lists of ids and the corners beloning to each id
        corners, ids, rejected_img_points = aruco.detectMarkers(
            gray,
            aruco_dict,
            parameters=parameters,
            cameraMatrix=matrix_coefficients,
            distCoeff=distortion_coefficients)

        if np.all(ids is not None):  # If there are markers found by detector
            del markerTvecList[:]
            del markerRvecList[:]
            zipped = zip(ids, corners)
            ids, corners = zip(*(sorted(zipped)))
            axis = np.float32([[-0.01, -0.01, 0], [-0.01, 0.01, 0],
                               [0.01, -0.01, 0], [0.01, 0.01,
                                                  0]]).reshape(-1, 3)
            for i in range(0, len(ids)):  # Iterate in markers
                # Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
                rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(
                    corners[i], 0.02, matrix_coefficients,
                    distortion_coefficients)
                # print(markerPoints)
                (rvec -
                 tvec).any()  # get rid of that nasty numpy value array error
                markerRvecList.append(rvec)
                markerTvecList.append(tvec)

                # aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01)  # Draw Axis
                # cv2.circle(frame, pointCircle, 6, (255, 0, 255), 3)
                aruco.drawDetectedMarkers(
                    frame, corners)  # Draw A square around the markers

            if len(ids) > 1:
                # print("in")
                markerRvecList[0], markerTvecList[0] = markerRvecList[
                    0].reshape((3, 1)), markerTvecList[0].reshape((3, 1))
                markerRvecList[1], markerTvecList[1] = markerRvecList[
                    1].reshape((3, 1)), markerTvecList[1].reshape((3, 1))
                info = cv2.composeRT(markerRvecList[0], markerTvecList[0],
                                     markerRvecList[1], markerTvecList[1])
                TTcomposedRvec, TTcomposedTvec = info[0], info[1]
                aruco.drawAxis(frame, matrix_coefficients,
                               distortion_coefficients, TTcomposedRvec,
                               TTcomposedTvec, 0.01)  # Draw Axis
                print(TTcomposedRvec, TTcomposedTvec)

            if len(
                    ids
            ) > 1 and composedRvec is not None and composedTvec is not None:
                info = cv2.composeRT(composedRvec, composedTvec,
                                     markerRvecList[1].T, markerTvecList[1].T)
                TcomposedRvec, TcomposedTvec = info[0], info[1]

                objectPositions = np.array(
                    [(0, 0, 0)], dtype=np.float)  # 3D point for projection
                imgpts, jac = cv2.projectPoints(axis, TcomposedRvec,
                                                TcomposedTvec,
                                                matrix_coefficients,
                                                distortion_coefficients)
                # print(imgpts)
                frame = draw(frame, corners[0], imgpts)

        # Display the resulting frame
        cv2.imshow('frame', frame)
        # Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
        key = cv2.waitKey(3) & 0xFF
        if key == ord('q'):  # Quit
            break
        elif key == ord('c'):  # Calibration
            if len(
                    ids
            ) > 1:  # If there are two markers, reverse the second and get the difference
                composedRvec, composedTvec = relativePosition(
                    markerRvecList[0], markerTvecList[0], markerRvecList[1],
                    markerTvecList[1])
                # debug: get the relative again so you can be sure about you are doing it right!
                # info = cv2.composeRT(composedRvec, composedTvec, markerRvecList[1], markerTvecList[1])
                # TcomposedRvec, TcomposedTvec = info[0], info[1]
                # print("first: ", markerRvecList[0], markerTvecList[0])  # first marker vectors
                # print("second: ", markerRvecList[1], markerTvecList[1])  # second marker vectors
                # print("composed: ", composedRvec, composedTvec)  # relative marker vectors
                # print("test: ", TcomposedRvec, TcomposedTvec)  # second relative marker vectors, should be equal to first or second
                # aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, TcomposedRvec, TcomposedTvec, 0.01)  # Draw Axis for second relative!

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
コード例 #21
0
    def track(self, matrix_coefficients, distortion_coefficients):
        global pressedKey, needleComposeRvec, needleComposeTvec, ultraSoundComposeRvec, ultraSoundComposeTvec
        """ Real time ArUco marker tracking.  """
        TcomposedRvecNeedle, TcomposedTvecNeedle = None, None
        TcomposedRvecUltrasound, TcomposedTvecUltrasound = None, None

        # Behaviour is a key between calibration types.
        # No simulation is equal to 0
        # Needle Calibration is equal to 1
        # Ultrasound Calibration is equal to 2
        # Press
        behaviour = 0
        try:
            while True:
                # No marker detected
                isCalibrationMarkerDetected = False
                isNeedleDetected = False
                isUltraSoundDetected = False

                # Get the next frame to process
                ret, frame = cap.read()
                # operations on the frame come here
                gray = cv2.cvtColor(frame,
                                    cv2.COLOR_BGR2GRAY)  # Change grayscale
                aruco_dict = aruco.Dictionary_get(
                    aruco.DICT_5X5_250)  # Use 5x5 dictionary to find markers
                parameters = aruco.DetectorParameters_create(
                )  # Marker detection parameters

                # lists of ids and the corners beloning to each id
                corners, ids, rejected_img_points = aruco.detectMarkers(
                    gray,
                    aruco_dict,
                    parameters=parameters,
                    cameraMatrix=matrix_coefficients,
                    distCoeff=distortion_coefficients)

                # Get the behaviour and update the gui
                if behaviour == 0:
                    self.calibrationType.emit('Simulation')
                elif behaviour == 1:
                    self.calibrationType.emit('Needle calibration')
                else:
                    self.calibrationType.emit('Ultrasound calibration')
                    pass

                if np.all(ids is not None
                          ):  # If there are markers found by detector
                    # sort the markers
                    zipped = zip(ids, corners)
                    ids, corners = zip(*(sorted(zipped)))
                    # 4 axis for ultrasound, 2axis for needle
                    axisForFourPoints = np.float32([[-0.02, -0.02, 0],
                                                    [-0.02, 0.02, 0],
                                                    [0.02, -0.02, 0],
                                                    [0.02, 0.02, 0]]).reshape(
                                                        -1,
                                                        3)  # axis for a plan
                    axisForTwoPoints = np.float32([[0.01, 0, 0],
                                                   [-0.01, 0, 0]]).reshape(
                                                       -1,
                                                       3)  # axis for a line

                    for i in range(0, len(ids)):  # Iterate in markers
                        # Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
                        rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(
                            corners[i], 0.02, matrix_coefficients,
                            distortion_coefficients)

                        if ids[i] == calibrationMarkerID:
                            calibrationRvec = rvec
                            calibrationTvec = tvec
                            isCalibrationMarkerDetected = True
                            calibrationMarkerCorners = corners[i]
                        elif ids[i] == needleMarkerID:
                            needleRvec = rvec
                            needleTvec = tvec
                            isNeedleDetected = True
                            needleCorners = corners[i]
                        elif ids[i] == ultraSoundMarkerID:
                            ultraSoundRvec = rvec
                            ultraSoundTvec = tvec
                            isUltraSoundDetected = True
                            ultrasoundCorners = corners[i]

                        (rvec - tvec).any(
                        )  # get rid of that nasty numpy value array error
                        # aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01)
                        frame = aruco.drawDetectedMarkers(
                            frame, corners)  # Draw A square around the markers

                    if isNeedleDetected and needleComposeRvec is not None and needleComposeTvec is not None:
                        info = cv2.composeRT(needleComposeRvec,
                                             needleComposeTvec, needleRvec.T,
                                             needleTvec.T)
                        TcomposedRvecNeedle, TcomposedTvecNeedle = info[
                            0], info[1]
                        imgpts, jac = cv2.projectPoints(
                            axisForTwoPoints, TcomposedRvecNeedle,
                            TcomposedTvecNeedle, matrix_coefficients,
                            distortion_coefficients)
                        frame = draw(frame, imgpts, (200, 200, 220))

                    if isUltraSoundDetected and ultraSoundComposeRvec is not None and ultraSoundComposeTvec is not None:
                        info = cv2.composeRT(ultraSoundComposeRvec,
                                             ultraSoundComposeTvec,
                                             ultraSoundRvec.T,
                                             ultraSoundTvec.T)
                        TcomposedRvecUltrasound, TcomposedTvecUltrasound = info[
                            0], info[1]
                        imgpts, jac = cv2.projectPoints(
                            axisForFourPoints, TcomposedRvecUltrasound,
                            TcomposedTvecUltrasound, matrix_coefficients,
                            distortion_coefficients)
                        frame = draw(frame, imgpts, (60, 200, 50))

                    # If the both markers can be seen by the camera, print the xyz differences between them
                    # So it will guide the user
                    if isNeedleDetected and needleComposeRvec is not None and needleComposeTvec is not None and \
                        isUltraSoundDetected and ultraSoundComposeRvec is not None and ultraSoundComposeTvec is not None:
                        xdiff = round((TcomposedTvecNeedle[0] -
                                       TcomposedTvecUltrasound[0])[0], 3)
                        ydiff = round((TcomposedTvecNeedle[1] -
                                       TcomposedTvecUltrasound[1])[0], 3)
                        zdiff = round((TcomposedTvecNeedle[2] -
                                       TcomposedTvecUltrasound[2])[0], 3)
                        self.changeXDiff.emit('X difference:' + str(xdiff))
                        self.changeYDiff.emit('Y difference:' + str(ydiff))
                        self.changeZDiff.emit('Z difference:' + str(zdiff))

                # Display the resulting frame
                rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1],
                                           rgbImage.shape[0],
                                           QImage.Format_RGB888)
                p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                self.changePixmap.emit(p)

                # Key handling. For marker calibration, saving marker etc.
                if pressedKey is None:  # No key pressed
                    pass
                elif pressedKey == Qt.Key_C:  # Button for calibration.
                    if ids is not None and len(
                            ids
                    ) > 1:  # If there are two markers, reverse the second and get the difference
                        if isNeedleDetected and isCalibrationMarkerDetected and behaviour == 1:
                            needleComposeRvec, needleComposeTvec = relativePosition(
                                calibrationRvec, calibrationTvec, needleRvec,
                                needleTvec)
                            savedNeedleRvec, savedNeedleTvec = needleComposeRvec, needleComposeTvec
                        elif isUltraSoundDetected and isCalibrationMarkerDetected and behaviour == 2:
                            ultraSoundComposeRvec, ultraSoundComposeTvec = relativePosition(
                                calibrationRvec, calibrationTvec,
                                ultraSoundRvec, ultraSoundTvec)
                            savedUltraSoundRvec, savedUltraSoundTvec = ultraSoundComposeRvec, ultraSoundComposeTvec
                elif pressedKey == Qt.Key_U:  # Button for moving z axis 1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0], [0],
                                                                 [0.001]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0
                        ], [0], [0.001]]
                elif pressedKey == Qt.Key_D:  # Button for moving z axis -1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0], [0],
                                                                 [-0.001]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0
                        ], [0], [-0.001]]
                elif pressedKey == Qt.Key_R:  # Button for moving x axis 1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0.001], [0],
                                                                 [0]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0.001
                        ], [0], [0]]
                elif pressedKey == Qt.Key_L:  # Button for moving x axis -1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[-0.001], [0],
                                                                 [0]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            -0.001
                        ], [0], [0]]
                elif pressedKey == Qt.Key_B:  # Button for moving y axis -1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0], [-0.001],
                                                                 [0]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0
                        ], [-0.001], [0]]
                elif pressedKey == Qt.Key_F:  # Button for moving y axis 1 mm
                    if behaviour == 1 and needleComposeTvec is not None:
                        needleComposeTvec = needleComposeTvec + [[0], [0.001],
                                                                 [0]]
                    elif behaviour == 2 and ultraSoundComposeTvec is not None:
                        ultraSoundComposeTvec = ultraSoundComposeTvec + [[
                            0
                        ], [0.001], [0]]
                elif pressedKey == Qt.Key_P:  # print necessary information here, for debug
                    pass  # Insert necessary print here
                elif pressedKey == Qt.Key_S:  # Save the calibration vectors to a file.
                    if (needleComposeRvec is not None
                            and needleComposeTvec is not None
                            and ultraSoundComposeRvec is not None
                            and ultraSoundComposeTvec is not None):
                        print(needleComposeRvec, needleComposeTvec,
                              ultraSoundComposeRvec, ultraSoundComposeTvec)
                        fileObject = open(save_Name, 'wb')
                        data = [
                            needleComposeRvec, needleComposeTvec,
                            ultraSoundComposeRvec, ultraSoundComposeTvec
                        ]
                        pickle.dump(data, fileObject)
                        fileObject.close()
                elif pressedKey == Qt.Key_X:  # change simulation type, "Simulation, needle calibration, ultrasound calibration"
                    behaviour = (behaviour + 1) % 3
                    # print(behaviour)
                pressedKey = None

        except Exception:
            pass
        finally:
            # When everything done, release the capture
            cap.release()

        return frame
コード例 #22
0
def track(matrix_coefficients, distortion_coefficients):
    pointCircle = (0, 0)
    markerTvecList = []
    markerRvecList = []
    composedRvec, composedTvec = None, None
    while True:
        ret, frame = cap.read()
        # operations on the frame come here
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # Change grayscale
        aruco_dict = aruco.Dictionary_get(
            aruco.DICT_5X5_250)  # Use 5x5 dictionary to find markers
        parameters = aruco.DetectorParameters_create(
        )  # Marker detection parameters

        # lists of ids and the corners beloning to each id
        corners, ids, rejected_img_points = aruco.detectMarkers(
            gray,
            aruco_dict,
            parameters=parameters,
            cameraMatrix=np.float32(matrix_coefficients),
            distCoeff=np.float32(distortion_coefficients))

        if np.all(ids is not None):  # If there are markers found by detector
            del markerTvecList[:]
            del markerRvecList[:]
            zipped = zip(ids, corners)
            ids, corners = zip(*(sorted(zipped)))
            axis = np.float32([[-0.01, -0.01, 0], [-0.01, 0.01, 0],
                               [0.01, -0.01, 0], [0.01, 0.01,
                                                  0]]).reshape(-1, 3)
            for i in range(0, len(ids)):  # Iterate in markers
                # Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
                rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(
                    corners[i], 0.02, np.float32(matrix_coefficients),
                    np.float32(distortion_coefficients))

                if ids[i] == firstMarkerID:
                    firstRvec = rvec
                    firstTvec = tvec
                    isFirstMarkerCalibrated = True
                    firstMarkerCorners = corners[i]
                elif ids[i] == secondMarkerID:
                    secondRvec = rvec
                    secondTvec = tvec
                    isSecondMarkerCalibrated = True
                    secondMarkerCorners = corners[i]

                # print(markerPoints)
                (rvec -
                 tvec).any()  # get rid of that nasty numpy value array error
                markerRvecList.append(rvec)
                markerTvecList.append(tvec)

                aruco.drawDetectedMarkers(
                    frame, corners)  # Draw A square around the markers

            if len(
                    ids
            ) > 1 and composedRvec is not None and composedTvec is not None:
                info = cv2.composeRT(composedRvec, composedTvec, secondRvec.T,
                                     secondTvec.T)
                TcomposedRvec, TcomposedTvec = info[0], info[1]

                objectPositions = np.array(
                    [(0, 0, 0)], dtype=np.float)  # 3D point for projection
                imgpts, jac = cv2.projectPoints(
                    axis, TcomposedRvec, TcomposedTvec,
                    np.float32(matrix_coefficients),
                    np.float32(distortion_coefficients))

                # frame = draw(frame, corners[0], imgpts)
                aruco.drawAxis(frame, matrix_coefficients,
                               distortion_coefficients, TcomposedRvec,
                               TcomposedTvec, 0.01)  # Draw Axis
                relativePoint = (int(imgpts[0][0][0]), int(imgpts[0][0][1]))
                cv2.circle(frame, relativePoint, 2, (255, 255, 0))

        # Display the resulting frame
        cv2.imshow('frame', frame)
        # Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
        key = cv2.waitKey(3) & 0xFF
        if key == ord('q'):  # Quit
            break
        elif key == ord('c'):  # Calibration
            if len(
                    ids
            ) > 1:  # If there are two markers, reverse the second and get the difference
                firstRvec, firstTvec = firstRvec.reshape(
                    (3, 1)), firstTvec.reshape((3, 1))
                secondRvec, secondTvec = secondRvec.reshape(
                    (3, 1)), secondTvec.reshape((3, 1))

                composedRvec, composedTvec = relativePosition(
                    firstRvec, firstTvec, secondRvec, secondTvec)

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()