class HandeyeCalibrationCommander: def __init__(self): self.client = HandeyeClient() def _take_menu(self): print('Press SPACE to take a sample or ENTER to continue\n') i = getchar() if i == ' ': self.client.take_sample() def _display_sample_list(self, sample_list): for i in range(len(sample_list.hand_world_samples.transforms)): print('{}) \n hand->world {} \n camera->marker {}\n'.format( i, sample_list.hand_world_samples.transforms[i], sample_list.camera_marker_samples.transforms[i])) def _edit_menu(self): while len(self.client.get_sample_list().hand_world_samples.transforms ) > 0: prompt_str = 'Press a number and ENTER to delete the respective sample, or ENTER to continue:\n' self._display_sample_list(self.client.get_sample_list()) sample_to_delete = raw_input(prompt_str) if sample_to_delete.isdigit(): self.client.remove_sample(int(sample_to_delete)) else: break def _save_menu(self): print('Press c to compute the calibration or ENTER to continue\n') i = getchar() if i == 'c': cal = self.client.compute_calibration() print(cal) print( 'Press s to save the calibration to parameters and namespace, q to quit or ENTER to continue\n' ) i = getchar() if i == 's': self.client.save() elif i == 'q': quit() def _interactive_menu(self): self._take_menu() self._edit_menu() self._save_menu() def spin_interactive(self): self._edit_menu( ) # the sample list might not be empty when we start the commander while not rospy.is_shutdown(): self._interactive_menu()
class RqtHandeyeCalibration(Plugin): def __init__(self, context): super(RqtHandeyeCalibration, self).__init__(context) # Give QObjects reasonable names self.setObjectName('HandeyeCalibration') # Process standalone plugin command-line arguments from argparse import ArgumentParser parser = ArgumentParser() # Add argument(s) to the parser. parser.add_argument("-q", "--quiet", action="store_true", dest="quiet", help="Put plugin in silent mode") args, unknowns = parser.parse_known_args(context.argv()) if not args.quiet: print 'arguments: ', args print 'unknowns: ', unknowns # Create QWidget self._widget = QWidget() # Get path to UI file which should be in the "resource" folder of this package ui_file = os.path.join(rospkg.RosPack().get_path('rqt_easy_handeye'), 'resource', 'rqt_handeye.ui') # Extend the widget with all attributes and children from UI file loadUi(ui_file, self._widget) # Give QObjects reasonable names self._widget.setObjectName('RqtHandeyeCalibrationUI') # Show _widget.windowTitle on left-top of each plugin (when # it's set in _widget). This is useful when you open multiple # plugins at once. Also if you open multiple instances of your # plugin at once, these lines add number to make it easy to # tell from pane to pane. if context.serial_number() > 1: self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number())) # Add widget to the user interface context.add_widget(self._widget) self.client = HandeyeClient() self._widget.calibNameLineEdit.setText(rospy.get_namespace()) self._widget.trackingBaseFrameLineEdit.setText( self.client.tracking_base_frame) self._widget.trackingMarkerFrameLineEdit.setText( self.client.tracking_marker_frame) self._widget.robotBaseFrameLineEdit.setText( self.client.robot_base_frame) self._widget.robotEffectorFrameLineEdit.setText( self.client.robot_effector_frame) if self.client.eye_on_hand: self._widget.calibTypeLineEdit.setText("eye on hand") else: self._widget.calibTypeLineEdit.setText("eye on base") self._widget.takeButton.clicked[bool].connect(self.handle_take_sample) self._widget.removeButton.clicked[bool].connect( self.handle_remove_sample) self._widget.computeButton.clicked[bool].connect( self.handle_compute_calibration) self._widget.saveButton.clicked[bool].connect( self.handle_save_calibration) def shutdown_plugin(self): # TODO unregister all publishers here pass def save_settings(self, plugin_settings, instance_settings): # TODO save intrinsic configuration, usually using: # instance_settings.set_value(k, v) pass def restore_settings(self, plugin_settings, instance_settings): # TODO restore intrinsic configuration, usually using: # v = instance_settings.value(k) pass # def trigger_configuration(self): # Comment in to signal that the plugin has a way to configure # This will enable a setting button (gear icon) in each dock widget title bar # Usually used to open a modal configuration dialog def _display_sample_list(self, sample_list): self._widget.sampleListWidget.clear() for i in range(len(sample_list.hand_world_samples.transforms)): self._widget.sampleListWidget.addItem( '{}) \n hand->world {} \n camera->marker {}\n'.format( i + 1, sample_list.hand_world_samples.transforms[i], sample_list.camera_marker_samples.transforms[i])) self._widget.sampleListWidget.setCurrentRow( len(sample_list.hand_world_samples.transforms) - 1) def handle_take_sample(self): sample_list = self.client.take_sample() self._display_sample_list(sample_list) def handle_remove_sample(self): index = self._widget.sampleListWidget.currentRow() sample_list = self.client.remove_sample(index) self._display_sample_list(sample_list) def handle_compute_calibration(self): result = self.client.compute_calibration() self._widget.outputBox.setPlainText( str(result.calibration.transform.transform)) def handle_save_calibration(self): self.client.save()
class AutomaticMovements(): NOT_INITED_YET = 0 BAD_PLAN = 1 GOOD_PLAN = 2 MOVED_TO_POSE = 3 BAD_STARTING_POSITION = 4 GOOD_STARTING_POSITION = 5 SAMPLE_TOOK = 6 MOVEMENT_FAILED = 7 def __init__(self): self.handeye_client = HandeyeClient() self.state = AutomaticMovements.NOT_INITED_YET self.samples_taken = 0 self.current_target_pose = -1 # -1 is home self.target_poses = None def handle_check_pose(self): check_pose_result = self.handeye_client.check_starting_pose() if check_pose_result.can_calibrate: print('{} Good starting pose'.format(rospy.get_namespace())) self.state = AutomaticMovements.GOOD_STARTING_POSITION else: print('{} Bad starting pose'.format(rospy.get_namespace())) self.state = AutomaticMovements.BAD_STARTING_POSITION self.current_target_pose = check_pose_result.target_poses.current_target_pose_index self.target_poses = check_pose_result.target_poses.target_poses self.plan_was_successful = None print(self.current_target_pose) def handle_next_pose(self): next_pose_result = self.handeye_client.select_target_pose( self.current_target_pose + 1) self.current_target_pose = next_pose_result.target_poses.current_target_pose_index self.target_poses = next_pose_result.target_poses.target_poses self.plan_was_successful = None self.state = AutomaticMovements.GOOD_STARTING_POSITION print("{} Current target pose {}".format(rospy.get_namespace(), self.current_target_pose)) def handle_plan(self): plan_result = self.handeye_client.plan_to_selected_target_pose() self.plan_was_successful = plan_result.success if self.plan_was_successful: print("{} Plan successful".format(rospy.get_namespace())) self.state = AutomaticMovements.GOOD_PLAN else: print("{} Plan failed".format(rospy.get_namespace())) self.state = AutomaticMovements.BAD_PLAN def handle_execute(self): if self.plan_was_successful: execution_result = self.handeye_client.execute_plan() if execution_result.success: self.state = AutomaticMovements.MOVED_TO_POSE print("{} Execution successful".format(rospy.get_namespace())) else: self.state = AutomaticMovements.MOVEMENT_FAILED print("{} Execution failed".format(rospy.get_namespace())) def handle_take_sample(self): sample_list = self.handeye_client.take_sample() self.samples_taken += 1 print("{} Sample {} taken ".format(rospy.get_namespace(), self.samples_taken)) self.state = AutomaticMovements.SAMPLE_TOOK def handle_compute_calibration(self): result = self.handeye_client.compute_calibration() if result.valid: print("{} Calibration successful".format(rospy.get_namespace())) else: print("{} Calibration failed".format(rospy.get_namespace())) def handle_save_calibration(self): self.handeye_client.save() print("{} Calibration saved".format(rospy.get_namespace())) def handle_logic(self): if self.state == AutomaticMovements.NOT_INITED_YET: self.handle_check_pose() while self.samples_taken < 15: if self.state == AutomaticMovements.SAMPLE_TOOK: self.handle_next_pose() if self.state == AutomaticMovements.GOOD_STARTING_POSITION: self.handle_plan() if self.state == AutomaticMovements.GOOD_PLAN: self.handle_execute() if self.state == AutomaticMovements.MOVED_TO_POSE: rospy.sleep(1.) self.handle_take_sample() self.handle_compute_calibration() self.handle_save_calibration()
class Calibration(object): """Calibration""" node_name = "CALIBRATION" def __init__(self, node_name, playback=False): # TODO: whe shouldn't have to run this node in a seperate calibration namespace, it would make things much better self.robot_ns = 'px150' self.node_name = node_name self.playback = playback self.command = None if self.playback: rospy.loginfo( "[{0}] Calibration launched in playback mode!".format( self.node_name)) rospy.sleep(5) rospy.logdebug("[CALIBRATE] initializing hand eye client") self.client = HandeyeClient() self.experiment_info = ExperimentInfo(self.node_name, namespace=self.robot_ns, id="initial_calibration") # Listen rospy.logdebug("[CALIBRATE] initializing tf2_ros buffer") self.tfBuffer = tf2_ros.Buffer() self.listener = tf2_ros.TransformListener(self.tfBuffer) self.calibration_frame = rospy.get_param('/' + self.robot_ns + '/' + 'robot_base_frame') self.planning_frame = rospy.get_param('/' + self.robot_ns + '/' + 'planning_frame') self.pose_array = None self.pub_e_out = rospy.Publisher("~e_out", FlexGraspErrorCodes, queue_size=10, latch=True) move_robot_topic = '/' + self.robot_ns + '/' + 'move_robot' robot_pose_topic = '/' + self.robot_ns + '/' + 'robot_pose' pose_array_topic = '/' + self.robot_ns + '/' + 'pose_array' self.move_robot_communication = Communication(move_robot_topic, timeout=15, node_name=self.node_name) self.robot_pose_publisher = rospy.Publisher(robot_pose_topic, PoseStamped, queue_size=10, latch=False) self.pose_array_publisher = rospy.Publisher(pose_array_topic, PoseArray, queue_size=5, latch=True) self.output_logger = DataLogger( self.node_name, {"calibration": "calibration_transform"}, {"calibration": TransformStamped}, bag_name=self.node_name) # Subscribe rospy.Subscriber("~e_in", String, self.e_in_cb) # Subscribe to aruco tracker for it to publish the tf rospy.Subscriber('/' + self.robot_ns + '/' + 'aruco_tracker/pose', PoseStamped, self.aruco_tracker_cb) def e_in_cb(self, msg): if self.command is None: self.command = msg.data rospy.logdebug("[{0}] Received event message: {1}".format( self.node_name, self.command)) # reset outputting message msg = FlexGraspErrorCodes() msg.val = FlexGraspErrorCodes.NONE self.pub_e_out.publish(msg) def aruco_tracker_cb(self, msg): pass def init_poses_1(self): r_amplitude = 0.00 z_amplitude = 0.00 r_min = 0.24 z_min = 0.28 # 0.05 pos_intervals = 1 if pos_intervals == 1: r_vec = [r_min + r_amplitude ] # np.linspace(x_min, x_min + 2*x_amplitude, 2) # z_vec = [z_min + z_amplitude] else: r_vec = np.linspace(r_min, r_min + 2 * r_amplitude, pos_intervals) z_vec = np.linspace(z_min, z_min + 2 * z_amplitude, pos_intervals) ai_amplitude = np.deg2rad(38.0) aj_amplitude = np.deg2rad(38.0) ak_amplitude = np.deg2rad(15.0) rot_intervals = 2 ai_vec = np.linspace(-ai_amplitude, ai_amplitude, rot_intervals) aj_vec = np.linspace(-aj_amplitude, aj_amplitude, rot_intervals) ak_vec = [-ak_amplitude, ak_amplitude] return self.generate_poses(r_vec, z_vec, ai_vec, aj_vec, ak_vec) def init_poses_2(self): surface_height = 0.019 height_finger = 0.040 # [m] finger_link2ee_link = 0.023 # [m] grasp_height = height_finger + finger_link2ee_link - surface_height sag_angle = np.deg2rad(6.0) # [deg] r_amplitude = 0.08 z_amplitude = 0.00 r_min = 0.10 z_min = grasp_height # 0.05 pos_intervals = 3 if pos_intervals == 1: r_vec = [r_min + r_amplitude ] # np.linspace(x_min, x_min + 2*x_amplitude, 2) # z_vec = [z_min + z_amplitude] else: r_vec = np.linspace(r_min, r_min + 2 * r_amplitude, pos_intervals) z_vec = np.linspace(z_min, z_min + 2 * z_amplitude, pos_intervals) + np.tan(sag_angle) * r_vec ak_amplitude = np.deg2rad(15.0) rot_intervals = 2 ai_vec = [np.deg2rad(0)] aj_vec = [np.deg2rad(90)] ak_vec = [-ak_amplitude, ak_amplitude] return self.generate_poses_2(r_vec, z_vec, ai_vec, aj_vec, ak_vec) def generate_poses(self, r_vec, z_vec, ai_vec, aj_vec, ak_vec): pose_array = PoseArray() pose_array.header.frame_id = self.calibration_frame pose_array.header.stamp = rospy.Time.now() poses = [] for ak in ak_vec: for r in r_vec: for z in z_vec: for aj in aj_vec: for ai in ai_vec: pose = Pose() x = r * np.cos(ak) y = r * np.sin(ak) pose.position = list_to_position([x, y, z]) pose.orientation = list_to_orientation( [ai, aj, ak]) poses.append(pose) pose_array.poses = poses self.pose_array_publisher.publish(pose_array) self.pose_array = pose_array return FlexGraspErrorCodes.SUCCESS def generate_poses_2(self, r_vec, z_vec, ai_vec, aj_vec, ak_vec): pose_array = PoseArray() pose_array.header.frame_id = self.calibration_frame pose_array.header.stamp = rospy.Time.now() poses = [] for ak in ak_vec: for r, z in zip(r_vec, z_vec): for aj in aj_vec: for ai in ai_vec: pose = Pose() x = r * np.cos(ak) y = r * np.sin(ak) pose.position = list_to_position([x, y, z]) pose.orientation = list_to_orientation([ai, aj, ak]) poses.append(pose) pose_array.poses = poses self.pose_array_publisher.publish(pose_array) self.pose_array = pose_array return FlexGraspErrorCodes.SUCCESS def calibrate(self, track_marker=True): sample_list = self.client.get_sample_list().camera_marker_samples n_samples = len(sample_list) if n_samples > 0: rospy.loginfo( "[{0}] Found {1} old calibration samples, deleting them before recalibrating!" .format(self.node_name, n_samples)) for i in reversed(range(n_samples)): rospy.loginfo("[{0}] Deleting sample {1}".format( self.node_name, i)) self.client.remove_sample(i) sample_list = self.client.get_sample_list().camera_marker_samples n_samples = len(sample_list) if n_samples > 0: rospy.logwarn( "[{0}] Failed to remove all old samples, cannot calibrate". format(self.node_name)) print sample_list return FlexGraspErrorCodes.FAILURE else: rospy.loginfo("[{0}] All old samples have been removed".format( self.node_name)) if self.playback: rospy.loginfo( "[{0}] Playback is active: publishing messages from bag!". format(self.node_name)) messages = self.output_logger.load_messages_from_bag( self.experiment_info.path, self.experiment_info.id) if messages is not None: self.broadcast(messages['calibration']) return FlexGraspErrorCodes.SUCCESS else: return FlexGraspErrorCodes.FAILURE if self.pose_array is None: rospy.logwarn("[CALIBRATE] pose_array is still empty") return FlexGraspErrorCodes.REQUIRED_DATA_MISSING try: trans = self.tfBuffer.lookup_transform( self.planning_frame, self.pose_array.header.frame_id, rospy.Time(0)) except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): rospy.logwarn("[CALIBRATE] failed to get transform from %s to %s", self.pose_array.header.frame_id, self.planning_frame) return FlexGraspErrorCodes.TRANSFORM_POSE_FAILED result = self.move_robot_communication.wait_for_result("reset") for pose in self.pose_array.poses: if rospy.is_shutdown(): return FlexGraspErrorCodes.SHUTDOWN pose_stamped = PoseStamped() pose_stamped.header = self.pose_array.header pose_stamped.pose = pose # transform to planning frame pose_trans = tf2_geometry_msgs.do_transform_pose( pose_stamped, trans) self.robot_pose_publisher.publish(pose_trans) result = self.move_robot_communication.wait_for_result( "move_manipulator") # timout = 5? if result == FlexGraspErrorCodes.SUCCESS: if track_marker: # camera delay + wait a small amount of time for vibrations to stop rospy.sleep(1.5) try: self.client.take_sample() except: rospy.logwarn( "[CALIBRATE] Failed to take sample, marker might not be visible." ) elif result == FlexGraspErrorCodes.DYNAMIXEL_ERROR: rospy.logwarn( "[{0}] Dynamixel error triggered, returning error".format( self.node_name)) return result elif result == FlexGraspErrorCodes.DYNAMIXEL_SEVERE_ERROR: rospy.logwarn( "[{0}] Dynamixel error triggered, returning error".format( self.node_name)) return result # reset result = self.move_robot_communication.wait_for_result("home") # compute calibration transform if not track_marker: return FlexGraspErrorCodes.SUCCESS else: calibration_transform = self.client.compute_calibration() if calibration_transform.valid: rospy.loginfo("[CALIBRATE] Found valid transfrom") self.broadcast(calibration_transform.calibration.transform) self.client.save() return FlexGraspErrorCodes.SUCCESS else: rospy.logwarn("[CALIBRATE] Computed calibration is invalid") return FlexGraspErrorCodes.FAILURE def broadcast(self, transform): rospy.loginfo("[{0}] Broadcasting result".format(self.node_name)) broadcaster = tf2_ros.StaticTransformBroadcaster() broadcaster.sendTransform(transform) if not self.playback: self.output_logger.write_messages_to_bag( {"calibration": transform}, self.experiment_info.path, self.experiment_info.id) def take_action(self): msg = FlexGraspErrorCodes() result = None if self.command == 'e_init': result = FlexGraspErrorCodes.SUCCESS elif self.command == 'calibrate': result = self.init_poses_1() if result == FlexGraspErrorCodes.SUCCESS: result = self.calibrate() elif self.command == 'calibrate_height': result = self.init_poses_2() if result == FlexGraspErrorCodes.SUCCESS: result = self.calibrate(track_marker=False) elif self.command is not None: rospy.logwarn( "[CALIBRATE] Can not take an action: received unknown command %s!", self.command) result = FlexGraspErrorCodes.UNKNOWN_COMMAND # publish success if result is not None: msg.val = result flex_grasp_error_log(result, self.node_name) self.pub_e_out.publish(msg) self.command = None