def make_pose_rel(parent: Pose, child: Pose) -> Pose: """ :param parent: e.g. scene object :param child: e.g. action point :return: relative pose """ return Pose( (child.position - parent.position).rotated(parent.orientation, inverse=True), Orientation.from_quaternion(parent.orientation.as_quaternion().inverse() * child.orientation.as_quaternion()), )
def make_pose_abs(parent: Pose, child: Pose) -> Pose: """ :param parent: e.g. scene object :param child: e.g. action point :return: absolute pose """ return Pose( (child.position.rotated(parent.orientation) + parent.position), Orientation.from_quaternion(parent.orientation.as_quaternion() * child.orientation.as_quaternion()), )
def estimate_camera_pose(camera_matrix: List[List[float]], dist_matrix: List[float], image: Image.Image, marker_size: float) -> Dict[int, Pose]: camera_matrix_arr, dist_matrix_arr, gray, corners, ids = detect_corners( camera_matrix, dist_matrix, image, refine=True) ret: Dict[int, Pose] = {} if np.all(ids is None): return ret # TODO do not perform estimation for un-configured markers rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, marker_size, camera_matrix_arr, dist_matrix_arr) rvec = rvec.reshape(len(ids), 3) tvec = tvec.reshape(len(ids), 3) if __debug__: backtorgb = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB) aruco.drawDetectedMarkers(backtorgb, corners) # Draw A square around the markers for idx in range(len(ids)): aruco.drawAxis(backtorgb, camera_matrix_arr, dist_matrix_arr, rvec[idx], tvec[idx], 0.15) cv2.imwrite("marker.jpg", backtorgb) for idx, mid in enumerate(ids): # convert pose of the marker wrt camera to pose of camera wrt marker # based on https://stackoverflow.com/a/51515560/3142796 marker_rot_matrix, _ = cv2.Rodrigues(rvec[idx]) assert np.allclose(np.linalg.inv(marker_rot_matrix), marker_rot_matrix.transpose()) assert math.isclose(np.linalg.det(marker_rot_matrix), 1) camera_rot_matrix = marker_rot_matrix.transpose() camera_trans_vector = np.matmul(-camera_rot_matrix, tvec[idx].reshape(3, 1)).flatten() o = Orientation.from_quaternion( quaternion.from_rotation_matrix(camera_rot_matrix)) ret[mid[0]] = Pose( Position(camera_trans_vector[0], camera_trans_vector[1], camera_trans_vector[2]), o) return ret
def message_to_pose(message: str) -> Pose: tokens = message.split() if len(tokens) != 7: raise YumiException("Invalid format for pose! Got:\n{0}".format(message)) pose_vals = [float(token) for token in tokens] q = pose_vals[3:] t = pose_vals[:3] try: return Pose( Position(t[0], t[1], t[2]) * MM_TO_METERS, Orientation.from_quaternion(quaternion.from_float_array(q)) ) except (IndexError, ValueError): raise YumiException("Invalid pose.")
def _update_childs(parent: Parent, ap_id: str) -> None: for child_id in project.childs(ap_id): try: child_ap = project.bare_action_point(child_id) except CachedProjectException: continue child_ap.position = child_ap.position.rotated(parent.pose.orientation) for ori in project.ap_orientations(child_ap.id): ori.orientation = Orientation.from_quaternion( parent.pose.orientation.as_quaternion() * ori.orientation.as_quaternion() ) updated_aps.add(child_ap.id) _update_childs(parent, child_ap.id)
def forward_kinematics(self, joints: List[Joint]) -> Pose: """Computes forward kinematics. Outputs absolute pose. Inspired by DobotKinematics.py from open-dobot project. :param end_effector_id: Target end effector name :param joints: Input joint values :return: Pose of the given end effector """ self.validate_joints(joints) j1 = joints[0].value j2 = joints[1].value j3 = joints[2].value sj = j2 + j3 radius = ( self.link_2_length * math.cos(j2 - math.pi / 2) + self.link_3_length * math.cos(sj) + self.link_4_length ) x = radius * math.cos(j1) y = radius * math.sin(j1) z = ( self.link_2_length * math.cos(j2) + self.link_3_length * math.cos(sj + math.pi / 2) - self.end_effector_length ) pose = Pose( Position(x, y, z), Orientation.from_quaternion(quaternion.from_euler_angles(0, math.pi, joints[-1].value + j1)), ) if __debug__: self._check_orientation(pose) return tr.make_pose_abs(self.pose, pose)