def __init__(self, node_name, playback=False): self.node_name = node_name # state self.take_picture = False self.playback = playback if self.playback: rospy.loginfo( "[{0}] Object detection launched in playback mode!".format( self.node_name)) # params self.camera_sim = rospy.get_param("camera_sim", DEFAULT_CAMERA_SIM) self.process_image = ProcessImage(name='ros_tomato', pwd='', save=False) # frames self.camera_frame = "camera_color_optical_frame" # data input self.color_image = None self.depth_image = None self.camera_info = None self.pcl = None self.input_logger = self.initialize_input_logger() self.output_logger = self.initialize_output_logger() self.settings_logger = DataLogger( self.node_name, {"settings": "image_processing_settings"}, {"settings": ImageProcessingSettings}, bag_name='image_processing_settings') # cv bridge self.bridge = CvBridge() # params self.patch_size = 5 self.peduncle_height = 0.080 # [m] self.settings = settings_lib_to_msg(self.process_image.get_settings()) self.experiment_info = ExperimentInfo(self.node_name) pub_image_processing_settings = rospy.Publisher( "image_processing_settings", ImageProcessingSettings, queue_size=10, latch=True) pub_image_processing_settings.publish(self.settings) # TODO: log settings! rospy.Subscriber("image_processing_settings", ImageProcessingSettings, self.image_processing_settings_cb)
def initialize_input_logger(self): # inputs topics_in = { 'color_image': 'camera/color/image_raw', 'depth_image': 'camera/aligned_depth_to_color/image_raw', 'camera_info': 'camera/color/camera_info', 'pcl': 'camera/depth_registered/points' } types_in = { 'color_image': Image, 'depth_image': Image, 'camera_info': CameraInfo, 'pcl': PointCloud2 } callbacks = { 'color_image': self.color_image_cb, 'depth_image': self.depth_image_cb, 'camera_info': self.camera_info_cb, 'pcl': self.point_cloud_cb } for key in types_in: rospy.Subscriber(topics_in[key], types_in[key], callbacks[key]) return DataLogger(self.node_name, topics_in, types_in, bag_name='camera')
def initialize_output_logger(self): # outputs topics_out = { 'truss_pose': 'truss_pose', 'tomato_image': 'tomato_image', 'tomato_image_total': 'tomato_image_total', 'depth_image': 'depth_image' } types_out = { 'truss_pose': PoseStamped, 'tomato_image': Image, 'tomato_image_total': Image, 'depth_image': Image } return DataLogger(self.node_name, topics_out, types_out, bag_name=self.node_name)
def __init__(self, node_name, playback=False): self.node_name = node_name # state self.playback = playback if self.playback: rospy.loginfo("[{0}] Transform pose launched in playback mode!".format(self.node_name)) self.simulation = rospy.get_param("robot_sim") self.robot_base_frame = rospy.get_param('robot_base_frame') self.planning_frame = rospy.get_param('planning_frame') # the sag_angle is used to take into account the sagging of the robot during operation if self.simulation: self.sag_angle = None self.peduncle_height = 0.070 # [m] else: self.sag_angle = np.deg2rad(6.0) self.peduncle_height = 0.08 # 75 # [m] # To determine the grasping height we need several dimensions of the manipulator self.surface_height = 0.019 # [m] height_finger = 0.040 # [m] finger_link2ee_link = 0.023 # [m] height_finger_tip = 0.007 diameter_pedunlce = 0.004 pre_grasp_distance = 0.04 # [m] grasp_height = height_finger + finger_link2ee_link - height_finger_tip - diameter_pedunlce pre_grasp_height = grasp_height + pre_grasp_distance grasp_xyz = [0, 0, grasp_height] # [m] pre_grasp_xyz = [0, 0, pre_grasp_height] # [m] place_xyz = grasp_xyz pre_place_xyz = pre_grasp_xyz ee_rpy = [0, np.pi/2, 0] # neutral rotation of ee relative to the robot base frame self.wrist_limits = [-np.pi/2, np.pi/2] # [lower_limit, upper_limit] # for generating a place pose self.pose_generator = PoseGenerator(r_range=[0.15, 0.23], x_min=0.17, frame=self.robot_base_frame, seed=self.node_name) self.exp_info = ExperimentInfo(self.node_name) # Initialize Publishers keys = ['pre_grasp', 'grasp', 'pre_place', 'place'] topics_out = {} types_out = {} for key in keys: topics_out[key] = key + '_pose' types_out[key] = PoseStamped self.action_pose = dict.fromkeys(keys) self.output_logger = DataLogger(self.node_name, topics_out, types_out, bag_name=self.node_name) self.settings_logger = DataLogger(self.node_name, {"settings": "peduncle_height"}, {"settings": Float32}, bag_name='peduncle_height') peduncle_height_pub = rospy.Publisher('peduncle_height', Float32, queue_size=5, latch=True) msg = Float32() msg.data = self.peduncle_height peduncle_height_pub.publish(msg) rospy.Subscriber("peduncle_height", Float32, self.peduncle_height_cb) self.pose_transform = {'pre_grasp': point_to_pose(pre_grasp_xyz, ee_rpy), 'grasp': point_to_pose(grasp_xyz, ee_rpy), 'pre_place': point_to_pose(pre_place_xyz, ee_rpy), 'place': point_to_pose(place_xyz, ee_rpy)} self.tfBuffer = tf2_ros.Buffer() tf2_ros.TransformListener(self.tfBuffer)
class TransformPose(object): def __init__(self, node_name, playback=False): self.node_name = node_name # state self.playback = playback if self.playback: rospy.loginfo("[{0}] Transform pose launched in playback mode!".format(self.node_name)) self.simulation = rospy.get_param("robot_sim") self.robot_base_frame = rospy.get_param('robot_base_frame') self.planning_frame = rospy.get_param('planning_frame') # the sag_angle is used to take into account the sagging of the robot during operation if self.simulation: self.sag_angle = None self.peduncle_height = 0.070 # [m] else: self.sag_angle = np.deg2rad(6.0) self.peduncle_height = 0.08 # 75 # [m] # To determine the grasping height we need several dimensions of the manipulator self.surface_height = 0.019 # [m] height_finger = 0.040 # [m] finger_link2ee_link = 0.023 # [m] height_finger_tip = 0.007 diameter_pedunlce = 0.004 pre_grasp_distance = 0.04 # [m] grasp_height = height_finger + finger_link2ee_link - height_finger_tip - diameter_pedunlce pre_grasp_height = grasp_height + pre_grasp_distance grasp_xyz = [0, 0, grasp_height] # [m] pre_grasp_xyz = [0, 0, pre_grasp_height] # [m] place_xyz = grasp_xyz pre_place_xyz = pre_grasp_xyz ee_rpy = [0, np.pi/2, 0] # neutral rotation of ee relative to the robot base frame self.wrist_limits = [-np.pi/2, np.pi/2] # [lower_limit, upper_limit] # for generating a place pose self.pose_generator = PoseGenerator(r_range=[0.15, 0.23], x_min=0.17, frame=self.robot_base_frame, seed=self.node_name) self.exp_info = ExperimentInfo(self.node_name) # Initialize Publishers keys = ['pre_grasp', 'grasp', 'pre_place', 'place'] topics_out = {} types_out = {} for key in keys: topics_out[key] = key + '_pose' types_out[key] = PoseStamped self.action_pose = dict.fromkeys(keys) self.output_logger = DataLogger(self.node_name, topics_out, types_out, bag_name=self.node_name) self.settings_logger = DataLogger(self.node_name, {"settings": "peduncle_height"}, {"settings": Float32}, bag_name='peduncle_height') peduncle_height_pub = rospy.Publisher('peduncle_height', Float32, queue_size=5, latch=True) msg = Float32() msg.data = self.peduncle_height peduncle_height_pub.publish(msg) rospy.Subscriber("peduncle_height", Float32, self.peduncle_height_cb) self.pose_transform = {'pre_grasp': point_to_pose(pre_grasp_xyz, ee_rpy), 'grasp': point_to_pose(grasp_xyz, ee_rpy), 'pre_place': point_to_pose(pre_place_xyz, ee_rpy), 'place': point_to_pose(place_xyz, ee_rpy)} self.tfBuffer = tf2_ros.Buffer() tf2_ros.TransformListener(self.tfBuffer) def peduncle_height_cb(self, msg): self.peduncle_height = msg.data rospy.logdebug("[%s] Received new peduncle height: %s", self.node_name, self.peduncle_height) def get_object_height(self): """ return the object height, relative to the robot_base frame""" return self.peduncle_height - self.surface_height def transform_current_object_pose(self, current_object_pose): """ transform the current pose of the target object to the robot_base_frame""" current_object_pose = self.transform_pose(current_object_pose, self.robot_base_frame) object_position, object_orientation = pose_to_lists(current_object_pose.pose, 'euler') # override the grasp height using the defined peduncle and surface height current_object_pose.pose.position = list_to_position((object_position[0], object_position[1], self.get_object_height())) current_object_pose = self.limit_wrist_angle(current_object_pose) return current_object_pose def get_target_object_pose(self): """Generate a random place pose""" self.pose_generator.z = self.get_object_height() target_object_pose = self.pose_generator.generate_pose_stamped(seed=int(int(self.exp_info.id)*np.pi*10**2)) target_object_pose = self.limit_wrist_angle(target_object_pose) return target_object_pose def generate_action_poses(self, current_object_pose): """ generates action poses for the pre_grasp, grasp, pre_place and place actions """ if self.playback: rospy.loginfo("[{0}] Playback is active: publishing messages from bag!".format(self.node_name)) success = self.output_logger.publish_messages_from_bag(self.exp_info.path, self.exp_info.id) return success if current_object_pose is None: rospy.logwarn("[%s] Cannot transform pose, since object_pose still empty!", self.node_name) return FlexGraspErrorCodes.TRANSFORM_POSE_FAILED peduncle_height = Float32() peduncle_height.data = self.peduncle_height self.settings_logger.write_messages_to_bag({"settings": peduncle_height}, self.exp_info.path, self.exp_info.id) current_object_pose = self.transform_current_object_pose(current_object_pose) target_object_pose = self.get_target_object_pose() # transform to planning frame current_object_pose = self.transform_pose(current_object_pose, self.planning_frame) target_object_pose = self.transform_pose(target_object_pose, self.planning_frame) action_pose = {} for key in self.pose_transform: added_pose = PoseStamped() if 'grasp' in key: pose_stamped = current_object_pose elif 'place' in key: pose_stamped = target_object_pose else: rospy.logwarn("[%s] Failed to add pose stamped: Unknown key", self.node_name) return FlexGraspErrorCodes.FAILURE added_pose.header.frame_id = pose_stamped.header.frame_id added_pose.header.stamp = pose_stamped.header.stamp added_pose.pose = add_pose(self.pose_transform[key], pose_stamped.pose) if added_pose is None: rospy.logwarn("[%s] Failed to add pose stamped", self.node_name) return FlexGraspErrorCodes.FAILURE else: action_pose[key] = added_pose for key in action_pose: rospy.logdebug("[%s] %s height is %s [m]", self.node_name, key, action_pose[key].pose.position.z) # compensate if self.sag_angle is not None: for key in action_pose: action_pose[key] = self.compensate_for_sagging(action_pose[key]) for key in action_pose: action_pose[key] = self.transform_pose(action_pose[key], self.robot_base_frame) self.action_pose = action_pose success = self.output_logger.publish_messages(self.action_pose, self.exp_info.path, self.exp_info.id) return success def compensate_for_sagging(self, pose_stamped): """ The real robot bends under the weight of itself, therefore it may be desired to slightly increase the target pose height. """ x = pose_stamped.pose.position.x y = pose_stamped.pose.position.y r = (x**2 + y**2)**0.5 delta_height = np.tan(self.sag_angle)*r rospy.logdebug("[%s] Adding %s [m] height to due to radius %s [m]", self.node_name, delta_height, r) pose_stamped.pose.position.z += delta_height return pose_stamped def limit_wrist_angle(self, object_pose): """ limit orientation around z axis of a given pose_stamped, such that the desired wrist angle lies within the joint limits """ # TODO: currently this method only works if the angle_wrist lies within a 180degree range of the limits! if not object_pose.header.frame_id == self.robot_base_frame: rospy.logwarn("Cannot limit orientation, pose is not given with respect to %s frame", self.robot_base_frame) return object_pose object_position, object_orientation = pose_to_lists(object_pose.pose, 'euler') angle_object = object_orientation[2] angle_base = np.arctan2(object_position[1], object_position[0]) angle_wrist = angle_object - angle_base if not(self.wrist_limits[0] < angle_wrist < self.wrist_limits[1]): angle_object = angle_object + np.pi object_pose.pose.orientation = list_to_orientation([0, 0, angle_object]) return object_pose def transform_pose(self, pose_stamped, to_frame): original_frame = pose_stamped.header.frame_id try: transform = self.tfBuffer.lookup_transform(to_frame, original_frame, time=rospy.Time.now()) except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): rospy.logwarn("[%s] Cannot transform pose, failed to lookup transform from %s to %s!", self.node_name, original_frame, to_frame) return FlexGraspErrorCodes.TRANSFORM_POSE_FAILED return tf2_geometry_msgs.do_transform_pose(pose_stamped, transform)
class ObjectDetection(object): """ObjectDetection""" def __init__(self, node_name, playback=False): self.node_name = node_name # state self.take_picture = False self.playback = playback if self.playback: rospy.loginfo( "[{0}] Object detection launched in playback mode!".format( self.node_name)) # params self.camera_sim = rospy.get_param("camera_sim", DEFAULT_CAMERA_SIM) self.process_image = ProcessImage(name='ros_tomato', pwd='', save=False) # frames self.camera_frame = "camera_color_optical_frame" # data input self.color_image = None self.depth_image = None self.camera_info = None self.pcl = None self.input_logger = self.initialize_input_logger() self.output_logger = self.initialize_output_logger() self.settings_logger = DataLogger( self.node_name, {"settings": "image_processing_settings"}, {"settings": ImageProcessingSettings}, bag_name='image_processing_settings') # cv bridge self.bridge = CvBridge() # params self.patch_size = 5 self.peduncle_height = 0.080 # [m] self.settings = settings_lib_to_msg(self.process_image.get_settings()) self.experiment_info = ExperimentInfo(self.node_name) pub_image_processing_settings = rospy.Publisher( "image_processing_settings", ImageProcessingSettings, queue_size=10, latch=True) pub_image_processing_settings.publish(self.settings) # TODO: log settings! rospy.Subscriber("image_processing_settings", ImageProcessingSettings, self.image_processing_settings_cb) def initialize_input_logger(self): # inputs topics_in = { 'color_image': 'camera/color/image_raw', 'depth_image': 'camera/aligned_depth_to_color/image_raw', 'camera_info': 'camera/color/camera_info', 'pcl': 'camera/depth_registered/points' } types_in = { 'color_image': Image, 'depth_image': Image, 'camera_info': CameraInfo, 'pcl': PointCloud2 } callbacks = { 'color_image': self.color_image_cb, 'depth_image': self.depth_image_cb, 'camera_info': self.camera_info_cb, 'pcl': self.point_cloud_cb } for key in types_in: rospy.Subscriber(topics_in[key], types_in[key], callbacks[key]) return DataLogger(self.node_name, topics_in, types_in, bag_name='camera') def initialize_output_logger(self): # outputs topics_out = { 'truss_pose': 'truss_pose', 'tomato_image': 'tomato_image', 'tomato_image_total': 'tomato_image_total', 'depth_image': 'depth_image' } types_out = { 'truss_pose': PoseStamped, 'tomato_image': Image, 'tomato_image_total': Image, 'depth_image': Image } return DataLogger(self.node_name, topics_out, types_out, bag_name=self.node_name) def color_image_cb(self, msg): if (self.color_image is None) and self.take_picture: rospy.logdebug("[{0}] Received color image message".format( self.node_name)) try: self.color_image = self.bridge.imgmsg_to_cv2(msg, "rgb8") except CvBridgeError as e: rospy.logwarn("[{0}] {1}".format(self.node_name, e)) def depth_image_cb(self, msg): if (self.depth_image is None) and self.take_picture: rospy.logdebug("[{0}] Received depth image message".format( self.node_name)) try: if self.camera_sim: self.depth_image = self.bridge.imgmsg_to_cv2( msg, "passthrough") else: self.depth_image = self.bridge.imgmsg_to_cv2( msg, "passthrough") / 1000.0 except CvBridgeError as e: rospy.logwarn("[{0}] {1}".format(self.node_name, e)) def camera_info_cb(self, msg): if (self.camera_info is None) and self.take_picture: rospy.logdebug("[{0}] Received camera info message".format( self.node_name)) self.camera_info = msg def point_cloud_cb(self, msg, force=False): if (self.pcl is None) and self.take_picture: rospy.logdebug("[{0}] Received point cloud info message".format( self.node_name)) self.pcl = msg def image_processing_settings_cb(self, msg): self.settings = msg rospy.logdebug("[{0}] Received image processing settings".format( self.node_name)) def received_messages(self): """Returns a dictionary which contains information about what data has been received""" is_received = { 'color_image': self.color_image is not None, 'depth_image': self.depth_image is not None, 'camera_info': self.camera_info is not None, 'pcl': self.pcl is not None, 'all': True } for key in is_received: is_received['all'] = is_received['all'] and is_received[key] return is_received def print_received_messages(self, is_received): """Prints a warning for the data which has not been received""" for key in is_received: if not is_received[key]: rospy.logwarn("[{0}] Did not receive {1} data yet.".format( self.node_name, key)) def wait_for_messages(self, timeout=1): start_time = rospy.get_time() is_received = {} while rospy.get_time() - start_time < timeout: is_received = self.received_messages() if is_received['all']: self.take_picture = False rospy.logdebug("[{0}] Received all data".format( self.node_name)) return True rospy.sleep(0.1) self.print_received_messages(is_received) return False def save_data(self, result_img=None): """Save visual data""" # information about the image which will be stored image_info = {'px_per_mm': self.compute_px_per_mm()} json_pwd = os.path.join(self.experiment_info.path, self.experiment_info.id + '_info.json') rgb_img = self.color_image depth_img = colored_depth_image(self.depth_image.copy()) with open(json_pwd, "w") as write_file: json.dump(image_info, write_file) self.save_image(rgb_img, self.experiment_info.path, name=self.experiment_info.id + '_rgb.png') self.save_image(depth_img, self.experiment_info.path, name=self.experiment_info.id + '_depth.png') if result_img is not None: self.save_image(result_img, self.experiment_info.path, name=self.experiment_info.id + '_result.png') return FlexGraspErrorCodes.SUCCESS def save_image(self, img, pwd, name): """Save an RGB image to the given path, if the path does not exist create it.""" full_pwd = os.path.join(pwd, name) # Make sure the folder exists if not os.path.isdir(pwd): rospy.loginfo("[{0}]New path, creating a new folder {1}".format( self.node_name, pwd)) os.mkdir(pwd) # OpenCV assumes the image to be BGR if cv2.imwrite(full_pwd, cv2.cvtColor(img, cv2.COLOR_BGR2RGB)): rospy.logdebug("[{0}] Successfully saved image to path {1}".format( self.node_name, full_pwd)) return FlexGraspErrorCodes.SUCCESS else: rospy.logwarn("[{0}] Failed to save image to path %s".format( self.node_name, full_pwd)) return FlexGraspErrorCodes.FAILURE def detect_object(self): """Detect object""" if self.playback: rospy.loginfo( "[{0}] Playback is active: publishing object_detection messages from bag!" .format(self.node_name)) success = self.output_logger.publish_messages_from_bag( self.experiment_info.path, self.experiment_info.id) return success self.settings_logger.write_messages_to_bag({"settings": self.settings}, self.experiment_info.path, self.experiment_info.id) px_per_mm = self.compute_px_per_mm() self.process_image.add_image(self.color_image, px_per_mm=px_per_mm) if self.settings is not None: self.process_image.set_settings(settings_msg_to_lib(self.settings)) if not self.process_image.process_image(): rospy.logwarn("[OBJECT DETECTION] Failed to process image") self.save_data() return FlexGraspErrorCodes.FAILURE object_features = self.process_image.get_object_features() tomato_mask, peduncle_mask, _ = self.process_image.get_segments() truss_visualization = self.process_image.get_truss_visualization( local=True) truss_visualization_total = self.process_image.get_truss_visualization( local=False) json_pwd = os.path.join(self.experiment_info.path, self.experiment_info.id, 'truss_features.json') with open(json_pwd, 'w') as outfile: json.dump(object_features, outfile) self.save_data(result_img=truss_visualization) depth_img = colored_depth_image(self.depth_image.copy()) # publish results # TODO: also publish result in global frame! output_messages = {} output_messages['depth_image'] = self.bridge.cv2_to_imgmsg( depth_img, encoding="rgb8") output_messages['tomato_image'] = self.bridge.cv2_to_imgmsg( truss_visualization, encoding="rgba8") output_messages['tomato_image_total'] = self.bridge.cv2_to_imgmsg( truss_visualization_total, encoding="rgba8") output_messages['truss_pose'] = self.generate_cage_pose( object_features['grasp_location'], peduncle_mask) success = self.output_logger.publish_messages( output_messages, self.experiment_info.path, self.experiment_info.id) return success def generate_cage_pose(self, grasp_features, peduncle_mask): row = grasp_features['row'] col = grasp_features['col'] angle = grasp_features['angle'] if angle is None: rospy.logwarn( "Failed to compute caging pose: object detection returned None!" ) return False # orientation rospy.logdebug("[{0}] Object angle in degree {1}".format( self.node_name, np.rad2deg(angle))) rpy = [0, 0, angle] rs_intrinsics = camera_info2rs_intrinsics(self.camera_info) depth_image_filter = DepthImageFilter(self.depth_image, rs_intrinsics, patch_size=5, node_name=self.node_name) depth_assumption = self.get_table_height() - self.peduncle_height depth_measured = depth_image_filter.get_depth(row, col) # location rospy.loginfo("[{0}] Depth based on assumptions: {1:0.3f} [m]".format( self.node_name, depth_assumption)) rospy.loginfo("[{0}] Depth measured: {1:0.3f} [m]".format( self.node_name, depth_measured)) xyz = depth_image_filter.deproject(row, col, depth_assumption) if np.isnan(xyz).any(): rospy.logwarn( "[{0}] Failed to compute caging pose, will try based on segment!" .format(self.node_name)) xyz = depth_image_filter.deproject(row, col, segment=peduncle_mask) if np.isnan(xyz).any(): rospy.logwarn("[{0}] Failed to compute caging pose!".format( self.node_name)) return False return point_to_pose_stamped(xyz, rpy, self.camera_frame, rospy.Time.now()) def get_table_height(self): """Estimate the distance between the camera and table""" point_cloud_filter = PointCloudFilter(self.pcl, patch_size=5, node_name=self.node_name) heights = point_cloud_filter.get_points(field_names="z") return np.nanmedian(np.array(heights)) def compute_px_per_mm(self): """Estimate the amount of pixels per mm""" height = self.get_table_height() fx = self.camera_info.K[0] fy = self.camera_info.K[4] f = (fx + fy) / 2 px_per_mm = f / height / 1000.0 rospy.logdebug( '[{0}] Distance between camera and table: {1:0.3f} [m]'.format( self.node_name, height)) rospy.logdebug('[{0}] Pixels per mm: {1:0.3f} [px/mm]'.format( self.node_name, px_per_mm)) return px_per_mm def collect_messages(self): """When method is called the class will start collecting required messages""" self.take_picture = True if self.playback: rospy.loginfo( "[{0}] Playback is active: publishing camera messages from bag!" .format(self.node_name)) self.input_logger.publish_messages_from_bag( self.experiment_info.path, self.experiment_info.id) def log_input_messages(self): """"log messages""" if self.playback: rospy.logdebug( "[{0}] Will not logging input messages: running in playback mode!" .format(self.node_name)) else: rospy.logdebug("[{0}] Logging input messages".format( self.node_name)) self.input_logger.write_messages_to_bag(self.get_messages(), self.experiment_info.path, self.experiment_info.id) def get_messages(self): messages = {} if self.color_image is not None: messages['color_image'] = self.bridge.cv2_to_imgmsg( self.color_image, encoding="rgb8") else: rospy.logwarn("[{0}] Failed to get color_image message".format( self.node_name)) messages['color_image'] = None if self.depth_image is not None: messages['depth_image'] = self.bridge.cv2_to_imgmsg( self.depth_image, encoding="passthrough") else: rospy.logwarn("[{0}] Failed to get depth_image message".format( self.node_name)) messages['depth_image'] = None messages['camera_info'] = self.camera_info messages['pcl'] = self.pcl return messages def reset(self): self.color_image = None self.depth_image = None self.camera_info = None self.pcl = None
def __init__(self, node_name, playback=False): # TODO: whe shouldn't have to run this node in a seperate calibration namespace, it would make things much better self.robot_ns = 'px150' self.node_name = node_name self.playback = playback self.command = None if self.playback: rospy.loginfo( "[{0}] Calibration launched in playback mode!".format( self.node_name)) rospy.sleep(5) rospy.logdebug("[CALIBRATE] initializing hand eye client") self.client = HandeyeClient() self.experiment_info = ExperimentInfo(self.node_name, namespace=self.robot_ns, id="initial_calibration") # Listen rospy.logdebug("[CALIBRATE] initializing tf2_ros buffer") self.tfBuffer = tf2_ros.Buffer() self.listener = tf2_ros.TransformListener(self.tfBuffer) self.calibration_frame = rospy.get_param('/' + self.robot_ns + '/' + 'robot_base_frame') self.planning_frame = rospy.get_param('/' + self.robot_ns + '/' + 'planning_frame') self.pose_array = None self.pub_e_out = rospy.Publisher("~e_out", FlexGraspErrorCodes, queue_size=10, latch=True) move_robot_topic = '/' + self.robot_ns + '/' + 'move_robot' robot_pose_topic = '/' + self.robot_ns + '/' + 'robot_pose' pose_array_topic = '/' + self.robot_ns + '/' + 'pose_array' self.move_robot_communication = Communication(move_robot_topic, timeout=15, node_name=self.node_name) self.robot_pose_publisher = rospy.Publisher(robot_pose_topic, PoseStamped, queue_size=10, latch=False) self.pose_array_publisher = rospy.Publisher(pose_array_topic, PoseArray, queue_size=5, latch=True) self.output_logger = DataLogger( self.node_name, {"calibration": "calibration_transform"}, {"calibration": TransformStamped}, bag_name=self.node_name) # Subscribe rospy.Subscriber("~e_in", String, self.e_in_cb) # Subscribe to aruco tracker for it to publish the tf rospy.Subscriber('/' + self.robot_ns + '/' + 'aruco_tracker/pose', PoseStamped, self.aruco_tracker_cb)
class Calibration(object): """Calibration""" node_name = "CALIBRATION" def __init__(self, node_name, playback=False): # TODO: whe shouldn't have to run this node in a seperate calibration namespace, it would make things much better self.robot_ns = 'px150' self.node_name = node_name self.playback = playback self.command = None if self.playback: rospy.loginfo( "[{0}] Calibration launched in playback mode!".format( self.node_name)) rospy.sleep(5) rospy.logdebug("[CALIBRATE] initializing hand eye client") self.client = HandeyeClient() self.experiment_info = ExperimentInfo(self.node_name, namespace=self.robot_ns, id="initial_calibration") # Listen rospy.logdebug("[CALIBRATE] initializing tf2_ros buffer") self.tfBuffer = tf2_ros.Buffer() self.listener = tf2_ros.TransformListener(self.tfBuffer) self.calibration_frame = rospy.get_param('/' + self.robot_ns + '/' + 'robot_base_frame') self.planning_frame = rospy.get_param('/' + self.robot_ns + '/' + 'planning_frame') self.pose_array = None self.pub_e_out = rospy.Publisher("~e_out", FlexGraspErrorCodes, queue_size=10, latch=True) move_robot_topic = '/' + self.robot_ns + '/' + 'move_robot' robot_pose_topic = '/' + self.robot_ns + '/' + 'robot_pose' pose_array_topic = '/' + self.robot_ns + '/' + 'pose_array' self.move_robot_communication = Communication(move_robot_topic, timeout=15, node_name=self.node_name) self.robot_pose_publisher = rospy.Publisher(robot_pose_topic, PoseStamped, queue_size=10, latch=False) self.pose_array_publisher = rospy.Publisher(pose_array_topic, PoseArray, queue_size=5, latch=True) self.output_logger = DataLogger( self.node_name, {"calibration": "calibration_transform"}, {"calibration": TransformStamped}, bag_name=self.node_name) # Subscribe rospy.Subscriber("~e_in", String, self.e_in_cb) # Subscribe to aruco tracker for it to publish the tf rospy.Subscriber('/' + self.robot_ns + '/' + 'aruco_tracker/pose', PoseStamped, self.aruco_tracker_cb) def e_in_cb(self, msg): if self.command is None: self.command = msg.data rospy.logdebug("[{0}] Received event message: {1}".format( self.node_name, self.command)) # reset outputting message msg = FlexGraspErrorCodes() msg.val = FlexGraspErrorCodes.NONE self.pub_e_out.publish(msg) def aruco_tracker_cb(self, msg): pass def init_poses_1(self): r_amplitude = 0.00 z_amplitude = 0.00 r_min = 0.24 z_min = 0.28 # 0.05 pos_intervals = 1 if pos_intervals == 1: r_vec = [r_min + r_amplitude ] # np.linspace(x_min, x_min + 2*x_amplitude, 2) # z_vec = [z_min + z_amplitude] else: r_vec = np.linspace(r_min, r_min + 2 * r_amplitude, pos_intervals) z_vec = np.linspace(z_min, z_min + 2 * z_amplitude, pos_intervals) ai_amplitude = np.deg2rad(38.0) aj_amplitude = np.deg2rad(38.0) ak_amplitude = np.deg2rad(15.0) rot_intervals = 2 ai_vec = np.linspace(-ai_amplitude, ai_amplitude, rot_intervals) aj_vec = np.linspace(-aj_amplitude, aj_amplitude, rot_intervals) ak_vec = [-ak_amplitude, ak_amplitude] return self.generate_poses(r_vec, z_vec, ai_vec, aj_vec, ak_vec) def init_poses_2(self): surface_height = 0.019 height_finger = 0.040 # [m] finger_link2ee_link = 0.023 # [m] grasp_height = height_finger + finger_link2ee_link - surface_height sag_angle = np.deg2rad(6.0) # [deg] r_amplitude = 0.08 z_amplitude = 0.00 r_min = 0.10 z_min = grasp_height # 0.05 pos_intervals = 3 if pos_intervals == 1: r_vec = [r_min + r_amplitude ] # np.linspace(x_min, x_min + 2*x_amplitude, 2) # z_vec = [z_min + z_amplitude] else: r_vec = np.linspace(r_min, r_min + 2 * r_amplitude, pos_intervals) z_vec = np.linspace(z_min, z_min + 2 * z_amplitude, pos_intervals) + np.tan(sag_angle) * r_vec ak_amplitude = np.deg2rad(15.0) rot_intervals = 2 ai_vec = [np.deg2rad(0)] aj_vec = [np.deg2rad(90)] ak_vec = [-ak_amplitude, ak_amplitude] return self.generate_poses_2(r_vec, z_vec, ai_vec, aj_vec, ak_vec) def generate_poses(self, r_vec, z_vec, ai_vec, aj_vec, ak_vec): pose_array = PoseArray() pose_array.header.frame_id = self.calibration_frame pose_array.header.stamp = rospy.Time.now() poses = [] for ak in ak_vec: for r in r_vec: for z in z_vec: for aj in aj_vec: for ai in ai_vec: pose = Pose() x = r * np.cos(ak) y = r * np.sin(ak) pose.position = list_to_position([x, y, z]) pose.orientation = list_to_orientation( [ai, aj, ak]) poses.append(pose) pose_array.poses = poses self.pose_array_publisher.publish(pose_array) self.pose_array = pose_array return FlexGraspErrorCodes.SUCCESS def generate_poses_2(self, r_vec, z_vec, ai_vec, aj_vec, ak_vec): pose_array = PoseArray() pose_array.header.frame_id = self.calibration_frame pose_array.header.stamp = rospy.Time.now() poses = [] for ak in ak_vec: for r, z in zip(r_vec, z_vec): for aj in aj_vec: for ai in ai_vec: pose = Pose() x = r * np.cos(ak) y = r * np.sin(ak) pose.position = list_to_position([x, y, z]) pose.orientation = list_to_orientation([ai, aj, ak]) poses.append(pose) pose_array.poses = poses self.pose_array_publisher.publish(pose_array) self.pose_array = pose_array return FlexGraspErrorCodes.SUCCESS def calibrate(self, track_marker=True): sample_list = self.client.get_sample_list().camera_marker_samples n_samples = len(sample_list) if n_samples > 0: rospy.loginfo( "[{0}] Found {1} old calibration samples, deleting them before recalibrating!" .format(self.node_name, n_samples)) for i in reversed(range(n_samples)): rospy.loginfo("[{0}] Deleting sample {1}".format( self.node_name, i)) self.client.remove_sample(i) sample_list = self.client.get_sample_list().camera_marker_samples n_samples = len(sample_list) if n_samples > 0: rospy.logwarn( "[{0}] Failed to remove all old samples, cannot calibrate". format(self.node_name)) print sample_list return FlexGraspErrorCodes.FAILURE else: rospy.loginfo("[{0}] All old samples have been removed".format( self.node_name)) if self.playback: rospy.loginfo( "[{0}] Playback is active: publishing messages from bag!". format(self.node_name)) messages = self.output_logger.load_messages_from_bag( self.experiment_info.path, self.experiment_info.id) if messages is not None: self.broadcast(messages['calibration']) return FlexGraspErrorCodes.SUCCESS else: return FlexGraspErrorCodes.FAILURE if self.pose_array is None: rospy.logwarn("[CALIBRATE] pose_array is still empty") return FlexGraspErrorCodes.REQUIRED_DATA_MISSING try: trans = self.tfBuffer.lookup_transform( self.planning_frame, self.pose_array.header.frame_id, rospy.Time(0)) except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): rospy.logwarn("[CALIBRATE] failed to get transform from %s to %s", self.pose_array.header.frame_id, self.planning_frame) return FlexGraspErrorCodes.TRANSFORM_POSE_FAILED result = self.move_robot_communication.wait_for_result("reset") for pose in self.pose_array.poses: if rospy.is_shutdown(): return FlexGraspErrorCodes.SHUTDOWN pose_stamped = PoseStamped() pose_stamped.header = self.pose_array.header pose_stamped.pose = pose # transform to planning frame pose_trans = tf2_geometry_msgs.do_transform_pose( pose_stamped, trans) self.robot_pose_publisher.publish(pose_trans) result = self.move_robot_communication.wait_for_result( "move_manipulator") # timout = 5? if result == FlexGraspErrorCodes.SUCCESS: if track_marker: # camera delay + wait a small amount of time for vibrations to stop rospy.sleep(1.5) try: self.client.take_sample() except: rospy.logwarn( "[CALIBRATE] Failed to take sample, marker might not be visible." ) elif result == FlexGraspErrorCodes.DYNAMIXEL_ERROR: rospy.logwarn( "[{0}] Dynamixel error triggered, returning error".format( self.node_name)) return result elif result == FlexGraspErrorCodes.DYNAMIXEL_SEVERE_ERROR: rospy.logwarn( "[{0}] Dynamixel error triggered, returning error".format( self.node_name)) return result # reset result = self.move_robot_communication.wait_for_result("home") # compute calibration transform if not track_marker: return FlexGraspErrorCodes.SUCCESS else: calibration_transform = self.client.compute_calibration() if calibration_transform.valid: rospy.loginfo("[CALIBRATE] Found valid transfrom") self.broadcast(calibration_transform.calibration.transform) self.client.save() return FlexGraspErrorCodes.SUCCESS else: rospy.logwarn("[CALIBRATE] Computed calibration is invalid") return FlexGraspErrorCodes.FAILURE def broadcast(self, transform): rospy.loginfo("[{0}] Broadcasting result".format(self.node_name)) broadcaster = tf2_ros.StaticTransformBroadcaster() broadcaster.sendTransform(transform) if not self.playback: self.output_logger.write_messages_to_bag( {"calibration": transform}, self.experiment_info.path, self.experiment_info.id) def take_action(self): msg = FlexGraspErrorCodes() result = None if self.command == 'e_init': result = FlexGraspErrorCodes.SUCCESS elif self.command == 'calibrate': result = self.init_poses_1() if result == FlexGraspErrorCodes.SUCCESS: result = self.calibrate() elif self.command == 'calibrate_height': result = self.init_poses_2() if result == FlexGraspErrorCodes.SUCCESS: result = self.calibrate(track_marker=False) elif self.command is not None: rospy.logwarn( "[CALIBRATE] Can not take an action: received unknown command %s!", self.command) result = FlexGraspErrorCodes.UNKNOWN_COMMAND # publish success if result is not None: msg.val = result flex_grasp_error_log(result, self.node_name) self.pub_e_out.publish(msg) self.command = None