def movel(self, robot_local_device_name, pose_final, frame, robot_origin_calib_global_name, speed_perc, final_seed=None): robot = self.device_manager.get_device_client(robot_local_device_name) geom_util = GeometryUtil(client_obj=robot) if frame.lower() == "world": var_storage = self.device_manager.get_device_client( "variable_storage") robot_origin_pose = var_storage.getf_variable_value( "globals", robot_origin_calib_global_name).data T_rob = geom_util.named_pose_to_rox_transform( robot_origin_pose.pose) T_des1 = geom_util.pose_to_rox_transform(pose_final) T_des = T_rob.inv() * T_des1 pose_final = geom_util.rox_transform_to_pose(T_des) elif frame.lower() == "robot": T_des = geom_util.pose_to_rox_transform(pose_final) else: assert False, "Unknown parent frame for movel" robot_info = robot.robot_info rox_robot = self._robot_util.robot_info_to_rox_robot(robot_info, 0) robot_state = robot.robot_state.PeekInValue()[0] q_initial = robot_state.joint_position traj = self._generate_movel_trajectory(robot, rox_robot, q_initial, T_des, speed_perc, final_seed) if traj is None: return EmptyGenerator() return TrajectoryMoveGenerator(robot, rox_robot, traj, self._node)
def geometry_pose_inv(pose): """ Invert a pose Parameters: pose (Pose): The pose to invert Return (Pose): The inverted pose """ geom_util = GeometryUtil(node = PyriSandboxContext.node) T_rox = geom_util.pose_to_rox_transform(_convert_to_pose(pose)) T_res = T_rox.inv() return geom_util.rox_transform_to_pose(T_res)
def geometry_pose_multiply(pose_a, pose_b): """ Multiply one pose with another Parameters: * pose_a (Pose): The first pose * pose_b (Pose): The second pose Return (Pose): The result of the multiplication """ geom_util = GeometryUtil(node = PyriSandboxContext.node) T_a = geom_util.pose_to_rox_transform(_convert_to_pose(pose_a)) T_b = geom_util.pose_to_rox_transform(_convert_to_pose(pose_b)) T_res = T_a * T_b return geom_util.rox_transform_to_pose(T_res)
def jog_joints_to_pose(self, pose, speed_perc): print("Jog Joints to Pose is called") # Similar to jog_joints_with_limits. But, # Moves the robot to the specified joint angles with max speed robot = self.robot if robot is not None: robot_state, _ = self.robot.robot_state.PeekInValue() q_current = robot_state.joint_position geom_util = GeometryUtil(client_obj=robot) T_des = geom_util.pose_to_rox_transform(pose) q_des, res = invkin.update_ik_info3(self.robot_rox, T_des, q_current) assert res, "Inverse kinematics failed" self.jog_joints_with_limits( q_des, float(speed_perc) * 0.01 * self.joint_vel_limits, True) else: # Give an error message to show that the robot is not connected print("Robot is not connected to RoboticsJog service yet!")
center = RRN.NewStructure("com.robotraconteur.geometry.NamedPose2D", c) pose2d_dtype = RRN.GetNamedArrayDType("com.robotraconteur.geometry.Pose2D", c) size2d_dtype = RRN.GetNamedArrayDType("com.robotraconteur.geometry.Size2D", c) center.pose = np.zeros((1,),dtype=pose2d_dtype) center.pose[0]["position"]["x"] = 990 center.pose[0]["position"]["y"] = 64 center.pose[0]["orientation"] = np.deg2rad(10) b.center = center size = np.zeros((1,),dtype=size2d_dtype) size[0]["width"] = 100 size[0]["height"] = 100 b.size = size #res = c.detect_aruco_stored_image("test18", "camera_intrinsic_calibration", "camera_extrinsic_calibration0", "DICT_4X4_1000", 200, 0.0375, None) res = c.detect_aruco_camera_capture("camera", "camera_calibration_intrinsic", "camera_calibration_extrinsic", "DICT_4X4_1000", 200, 0.0375, None) m = res.detected_markers[0] print(m.marker_corners) print(res.detected_markers) xyz,rpy = geom_util.pose_to_xyz_rpy(m.marker_pose.pose) T = geom_util.pose_to_rox_transform(m.marker_pose.pose) img_util = ImageUtil(client_obj = c) img = img_util.compressed_image_to_array(res.display_image) cv2.imshow("",img) cv2.waitKey() cv2.destroyAllWindows()
def calibrate(images, joint_poses, aruco_dict, aruco_id, aruco_markersize, flange_to_marker, mtx, dist, cam_pose, rox_robot, robot_local_device_name): """ Apply extrinsic camera calibration operation for images in the given directory path using opencv aruco marker detection, the extrinsic marker poses given in a json file, and the given intrinsic camera parameters.""" assert aruco_dict.startswith("DICT_"), "Invalid aruco dictionary name" aruco_dict = getattr(aruco, aruco_dict) # convert string to python aruco_dict = cv2.aruco.Dictionary_get(aruco_dict) aruco_params = cv2.aruco.DetectorParameters_create() i = 0 imgs_out = [] geom_util = GeometryUtil() image_util = ImageUtil() object_points = [] image_points = [] for img, joints in zip(images, joint_poses): # Find the aruco tag corners # corners, ids, rejected = cv2.aruco.detectMarkers(img, aruco_dict, parameters=aruco_params,cameraMatrix=mtx, distCoeff=dist) corners, ids, rejected = cv2.aruco.detectMarkers( img, aruco_dict, parameters=aruco_params) # #debug # print(str(type(corners))) # <class 'list'> # print(str(corners)) # list of numpy arrays of corners # print(str(type(ids))) # <class 'numpy.ndarray'> # print(str(ids)) if len(corners) > 0: # Only find the id that we desire indexes = np.flatnonzero(ids.flatten() == aruco_id).tolist() corners = [corners[index] for index in indexes] ids = np.asarray([ids[index] for index in indexes]) # #debug # print(str(type(corners))) # <class 'list'> # print(str(corners)) # list of numpy arrays of corners # print(str(type(ids))) # <class 'numpy.ndarray'> # print(str(ids)) if len(ids) > 0: # if there exist at least one id that we desire # Select the first detected one, discard the others corners = corners[0] # now corners is 1 by 4 # # extract the marker corners (which are always returned # # in top-left, top-right, bottom-right, and bottom-left # # order) # corners = corners.reshape((4, 2)) # (topLeft, topRight, bottomRight, bottomLeft) = corners # Estimate the pose of the detected marker in camera frame rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers( corners, aruco_markersize, mtx, dist) # # Debug: Show the detected tag and axis in the image # # # cv2.aruco.drawDetectedMarkers(img, corners) # Draw A square around the markers (Does not work) img1 = img.copy() img_out = cv2.aruco.drawAxis(img1, mtx, dist, rvec, tvec, aruco_markersize * 0.75) # Draw Axis imgs_out.append(img_out) transform_base_2_flange = rox.fwdkin(rox_robot, joints) transform_flange_2_marker = geom_util.pose_to_rox_transform( flange_to_marker) transform_base_2_marker = transform_base_2_flange * transform_flange_2_marker transform_base_2_marker_corners = _marker_corner_poses( transform_base_2_marker, aruco_markersize) # Structure of this disctionary is "filename":[[R_base2marker],[T_base2marker],[R_cam2marker],[T_cam2marker]] for j in range(4): object_points.append(transform_base_2_marker_corners[j].p) image_points.append(corners[0, j]) #pose_pairs_dict[i] = (transform_base_2_marker_corners, corners) i += 1 object_points_np = np.array(object_points, dtype=np.float64) image_points_np = np.array(image_points, dtype=np.float32) # Finally execute the calibration retval, rvec, tvec = cv2.solvePnP(object_points_np, image_points_np, mtx, dist) R_cam2base = cv2.Rodrigues(rvec)[0] T_cam2base = tvec # Add another display image of marker at robot base img_out = cv2.aruco.drawAxis(img, mtx, dist, cv2.Rodrigues(R_cam2base)[0], T_cam2base, aruco_markersize * 0.75) # Draw Axis imgs_out.append(img_out) rox_transform_cam2base = rox.Transform(R_cam2base, T_cam2base, cam_pose.parent_frame_id, robot_local_device_name) rox_transform_world2base = cam_pose * rox_transform_cam2base #R_base2cam = R_cam2base.T #T_base2cam = - R_base2cam @ T_cam2base R_base2cam = rox_transform_world2base.inv().R T_base2cam = rox_transform_world2base.inv().p #debug print("FINAL RESULTS: ") print("str(R_cam2base)") # print(str(type(R_cam2base))) print(str(R_cam2base)) print("str(T_cam2base)") # print(str(type(T_cam2base.flatten()))) print(str(T_cam2base)) print("str(R_base2cam)") # print(str(type(R_base2cam))) print(str(R_base2cam)) print("str(T_base2cam)") # print(str(type(T_base2cam.flatten()))) print(str(T_base2cam)) pose_res = geom_util.rox_transform_to_named_pose(rox_transform_world2base) cov = np.eye(6) * 1e-5 imgs_out2 = [ image_util.array_to_compressed_image_jpg(i, 70) for i in imgs_out ] return pose_res, cov, imgs_out2, 0.0