def callback(depth, rgb, camera_info): bounding_box = BoundingBox() bounding_box.minX = 100 bounding_box.minY = 50 bounding_box.maxX = 924 bounding_box.maxY = 526 rospy.wait_for_service('plan_gqcnn_grasp') try: plan_routine = rospy.ServiceProxy('plan_gqcnn_grasp', GQCNNGraspPlanner) # TODO: Insert your message variables to be sent as a service request resp = plan_routine(rgb, depth, camera_info, bounding_box) print("Response: ", resp.success) except rospy.ServiceException, e: print "Service call failed: %s" % e
def callback(depth, rgb, camera_info): bounding_box = BoundingBox() bounding_box.minX = 0 bounding_box.minY = 0 bounding_box.maxX = 1024 bounding_box.maxY = 576 print "*************************************************" print("camera_info", camera_info.K) rospy.wait_for_service('plan_gqcnn_grasp') try: plan_routine = rospy.ServiceProxy('plan_gqcnn_grasp', GQCNNGraspPlanner) resp = plan_routine(rgb, depth, camera_info, bounding_box) grasp = resp.grasp print "*************************************************" print("Pose: ", resp.grasp.pose) print("Grasp_success_prob: ", resp.grasp.grasp_success_prob) except rospy.ServiceException, e: print "Service call failed: %s" % e
def compute_object_pose(self): camera_intrinsics = self.sensor.ir_intrinsics # get the images from the sensor color_image, depth_image, _ = self.sensor.frames() # inpaint to remove holes inpainted_color_image = color_image.inpaint( rescale_factor=self.config['inpaint_rescale_factor']) inpainted_depth_image = depth_image.inpaint( rescale_factor=self.config['inpaint_rescale_factor']) detector_cfg = self.config['detector'] detector_cfg['image_width'] = inpainted_depth_image.width // 3 detector_cfg['image_height'] = inpainted_depth_image.height // 3 detector = RgbdDetectorFactory.detector('point_cloud_box') detection = detector.detect(inpainted_color_image, inpainted_depth_image, detector_cfg, camera_intrinsics, self.tf_camera_world, vis_foreground=False, vis_segmentation=False)[0] if self.config['vis']['vis_detector_output']: vis.figure() vis.subplot(1, 2, 1) vis.imshow(detection.color_thumbnail) vis.subplot(1, 2, 2) vis.imshow(detection.depth_thumbnail) vis.show() boundingBox = BoundingBox() boundingBox.minY = detection.bounding_box.min_pt[0] boundingBox.minX = detection.bounding_box.min_pt[1] boundingBox.maxY = detection.bounding_box.max_pt[0] boundingBox.maxX = detection.bounding_box.max_pt[1] try: start_time = time.time() planned_grasp_data = self.plan_grasp_client( inpainted_color_image.rosmsg, inpainted_depth_image.rosmsg, camera_intrinsics.rosmsg, boundingBox, None) grasp_plan_time = time.time() - start_time rospy.loginfo("Planning time: {}".format(grasp_plan_time)) rospy.loginfo("Camera CS planned_grasp_data:\n{}".format( planned_grasp_data.grasp.pose)) if planned_grasp_data.grasp.q_value < 0.20: rospy.loginfo("Q value is too small : {}".format( planned_grasp_data.grasp.q_value)) return None grasp_camera_pose = planned_grasp_data.grasp rospy.loginfo('Processing Grasp') self.broadcast_pose_as_transform(self.tf_camera_world.from_frame, "object_link", grasp_camera_pose.pose) # create Stamped ROS Transform camera_world_transform = TransformStamped() camera_world_transform.header.stamp = rospy.Time.now() camera_world_transform.header.frame_id = self.tf_camera_world.from_frame camera_world_transform.child_frame_id = self.tf_camera_world.to_frame camera_world_transform.transform.translation.x = self.tf_camera_world.translation[ 0] camera_world_transform.transform.translation.y = self.tf_camera_world.translation[ 1] camera_world_transform.transform.translation.z = self.tf_camera_world.translation[ 2] q = self.tf_camera_world.quaternion camera_world_transform.transform.rotation.x = q[1] camera_world_transform.transform.rotation.y = q[2] camera_world_transform.transform.rotation.z = q[3] camera_world_transform.transform.rotation.w = q[0] grasp_world_pose = tf2_geometry_msgs.do_transform_pose( grasp_camera_pose, camera_world_transform) # if grasp_world_pose.pose.position.z < 0.05: # rospy.logwarn("Predicted pose Z value is less than 5 cm -> bad pose") # return None # Hack z position # grasp_world_pose.pose.position.z += 0.0075 # Hack orientation to 90 degree vertical pose grasp_world_pose.pose.orientation.x = -0.718339806303 grasp_world_pose.pose.orientation.y = 0.00601026421019 grasp_world_pose.pose.orientation.z = 0.695637686512 grasp_world_pose.pose.orientation.w = 0.00632522789696 rospy.loginfo( "World CS planned_grasp_data:\n{}".format(grasp_world_pose)) return grasp_world_pose.pose except rospy.ServiceException as e: rospy.logerr("Service call failed: \n %s" % e) return None
'baxter_control_node.yaml')['detector'] # load camera tf and intrinsics rospy.loginfo('Loading T_camera_world') T_camera_world = RigidTransform.load(CFG_PATH + 'kinect_to_world.tf') rospy.loginfo("Loading camera intrinsics") raw_camera_info = rospy.wait_for_message('/camera/rgb/camera_info', CameraInfo) camera_intrinsics = perception.CameraIntrinsics( raw_camera_info.header.frame_id, raw_camera_info.K[0], raw_camera_info.K[4], raw_camera_info.K[2], raw_camera_info.K[5], raw_camera_info.K[1], raw_camera_info.height, raw_camera_info.width) # initalize image processing objects cv_bridge = CvBridge() boundingBox = BoundingBox(BOUNDBOX_MIN_X, BOUNDBOX_MIN_Y, BOUNDBOX_MAX_X, BOUNDBOX_MAX_Y) # wait for Grasp Planning Service and create Service Proxy rospy.loginfo("Waiting for planner node") rospy.wait_for_service('plan_gqcnn_grasp') plan_grasp = rospy.ServiceProxy('plan_gqcnn_grasp', GQCNNGraspPlanner) # wait for Grasp Execution Service and create Service Proxy if not DEBUG: rospy.loginfo("Waiting for execution node") rospy.wait_for_service('execute_gqcnn_grasp') execute_grasp = rospy.ServiceProxy('execute_gqcnn_grasp', GQCNNGraspExecuter) # setup safe termination def handler(signum, frame):
def run_experiment(): """ Run the experiment """ if not config['robot_off']: rospy.loginfo('Initializing YuMi') robot, subscriber, left_arm, right_arm, home_pose = init_robot(config) # create ROS CVBridge cv_bridge = CvBridge() # wait for Grasp Planning Service and create Service Proxy rospy.wait_for_service('plan_gqcnn_grasp') plan_grasp = rospy.ServiceProxy('plan_gqcnn_grasp', GQCNNGraspPlanner) # get camera intrinsics camera_intrinsics = sensor.ir_intrinsics # setup experiment logger experiment_logger = GraspIsolatedObjectExperimentLogger(config['experiment_dir'], config['supervisor'], camera_intrinsics, T_camera_world, '/home/autolab/Workspace/vishal_working/catkin_ws/src/gqcnn/cfg/ros_nodes/yumi_control_node.yaml', planner_type=config['planner_type']) logging.info('Saving experiment to %s' %(experiment_logger.experiment_path)) object_keys = config['test_object_keys'] trial_number = 1 re_try = False logging.info('Beginning experiment') while True: if not re_try: experiment_logger.start_trial() obj = np.random.choice(object_keys, size=1)[0] else: re_try = False rospy.loginfo('Please place object: ' + obj + ' on the workspace.') raw_input("Press ENTER when ready ...") # start the next trial rospy.loginfo('Trial %d' % (trial_number)) # get the images from the sensor color_image, depth_image, _ = sensor.frames() # log some trial info experiment_logger.update_trial_attribute('trial_num', trial_number) experiment_logger.update_trial_attribute('color_im', color_image) experiment_logger.update_trial_attribute('depth_im', depth_image) # inpaint to remove holes inpainted_color_image = color_image.inpaint(rescale_factor=config['inpaint_rescale_factor']) inpainted_depth_image = depth_image.inpaint(rescale_factor=config['inpaint_rescale_factor']) detector = RgbdDetectorFactory.detector('point_cloud_box') detection = detector.detect(inpainted_color_image, inpainted_depth_image, detector_cfg, camera_intrinsics, T_camera_world, vis_foreground=False, vis_segmentation=False )[0] if config['vis']['vis_detector_output']: vis.figure() vis.subplot(1,2,1) vis.imshow(detection.color_thumbnail) vis.subplot(1,2,2) vis.imshow(detection.depth_thumbnail) vis.show() boundingBox = BoundingBox() boundingBox.minY = detection.bounding_box.min_pt[0] boundingBox.minX = detection.bounding_box.min_pt[1] boundingBox.maxY = detection.bounding_box.max_pt[0] boundingBox.maxX = detection.bounding_box.max_pt[1] try: start_time = time.time() planned_grasp_data = plan_grasp(inpainted_color_image.rosmsg, inpainted_depth_image.rosmsg, camera_intrinsics.rosmsg, boundingBox) grasp_plan_time = time.time() - start_time lift_gripper_width, T_gripper_world = process_GQCNNGrasp(planned_grasp_data, robot, left_arm, right_arm, subscriber, home_pose, config) # get human label human_input = raw_input('Grasp success, or grasp failure? [s/f] ') while human_input.lower() != 's' and human_input.lower() != 'f': logging.info('Did not understand input. Please answer \'s\' or \'f\'') human_input = raw_input('Grasp success, or grasp failure? [s/f] ') if human_input.lower() == 's': experiment_logger.update_trial_attribute('human_label', 1) else: experiment_logger.update_trial_attribute('human_label', 0) # log result experiment_logger.update_trial_attribute('gripper_pose', T_gripper_world) experiment_logger.update_trial_attribute('planning_time', grasp_plan_time) experiment_logger.update_trial_attribute('gripper_width', lift_gripper_width) experiment_logger.update_trial_attribute('found_grasp', 1) experiment_logger.update_trial_attribute('completed', True) experiment_logger.update_trial_attribute('object_key', obj) trial_number += 1 except rospy.ServiceException as e: rospy.logerr("Service call failed: \n %s" % e) experiment_logger.update_trial_attribute('found_grasp', 0) experiment_logger.update_trial_attribute('completed', True) experiment_logger.update_trial_attribute('object_key', obj) trial_number += 1 except (YuMiCommException, YuMiControlException) as yce: rospy.logerr(str(yce)) if sensor is not None: sensor.stop() if robot is not None: robot.stop() if subscriber is not None and subscriber._started: subscriber.stop() rospy.loginfo("Re-trying") re_try = True robot, subscriber, left_arm, right_arm, home_pose = init_robot(config)
def __init__(self, config): rospy.init_node("gqcnn_base_grasp", anonymous=True) # Moveit! setup moveit_commander.roscpp_initialize(sys.argv) self.robot = moveit_commander.RobotCommander() self.scene = moveit_commander.PlanningSceneInterface() self.arm = moveit_commander.MoveGroupCommander('arm') self.gripper = moveit_commander.MoveGroupCommander('gripper') self.arm.set_start_state_to_current_state() self.arm.set_pose_reference_frame('base') self.arm.set_planner_id('SBLkConfigDefault') self.arm.set_planning_time(10) self.arm.set_max_velocity_scaling_factor(0.04) self.arm.set_max_acceleration_scaling_factor(0.04) self.arm.set_goal_orientation_tolerance(0.1) self.arm.set_workspace([-2, -2, -2, 2, 2, 2]) self.gripper.set_goal_joint_tolerance(0.2) rospy.loginfo(self.arm.get_pose_reference_frame()) #base rospy.loginfo(self.arm.get_planning_frame()) #world # messgae filter for image topic, need carefully set./camera/depth_registered/sw_registered/image_rect,,/camera/rgb/image_rect_color rospy.loginfo('wait_for_message') ############## rospy.wait_for_message("/camera/rgb/image_raw", Image) ########### rospy.wait_for_message("/camera/depth/image", Image) ########### rospy.loginfo('start_callback') ########### self.image_sub = message_filters.Subscriber("/camera/rgb/image_raw", Image) self.depth_sub = message_filters.Subscriber("/camera/depth/image", Image) self.camera_info_topic = "/camera/rgb/camera_info" self.camera_info = rospy.wait_for_message(self.camera_info_topic, CameraInfo) rospy.loginfo(self.camera_info.header.frame_id) ###### self.ts = message_filters.ApproximateTimeSynchronizer( [self.image_sub, self.depth_sub], 1, 1) self.ts.registerCallback(self.cb) self.color_msg = Image() self.depth_msg = Image() self.mask = Image() # bounding box for objects self.bounding_box = BoundingBox() self.bounding_box.maxX = 420 self.bounding_box.maxY = 250 self.bounding_box.minX = 90 self.bounding_box.minY = 40 self.bridge = CvBridge() # transform listener self.listener = tf.TransformListener() # compute_ik service self.ik_srv = rospy.ServiceProxy('/compute_ik', GetPositionIK) rospy.loginfo("Waiting for /compute_ik service...") self.ik_srv.wait_for_service() rospy.loginfo("Connected!") self.service_request = PositionIKRequest() self.service_request.group_name = 'arm' self.service_request.timeout = rospy.Duration(2) self.service_request.avoid_collisions = True # signal self.start = 0
def info_callback(self, msg): self.info = msg self.call_service() def call_service(self): self.count += 1 if self.count % 100 != 0: return if (self.color != self.depth != self.info != self.bbox != None): self.service(self.color, self.depth, self.info, self.bbox) rospy.init_node("grassppperpepr") service = rospy.ServiceProxy("/plan_gqcnn_grasp", GQCNNGraspPlanner) caller = GraspCallbacks(service) boundingBox = BoundingBox() boundingBox.minY = 0.0 boundingBox.minX = 0.0 boundingBox.maxY = 200.0 boundingBox.minY = 200.0 caller.bbox = boundingBox color_sub = rospy.Subscriber("/camera/color/image_raw", Image, caller.color_callback) depth_sub = rospy.Subscriber("/camera/depth/image_rect_raw", Image, caller.depth_callback) info_sub = rospy.Subscriber("/camera/depth/camera_info", CameraInfo, caller.info_callback) rospy.spin()
def run_experiment(): """ Run the experiment """ print("run_experiment") rospy.loginfo('Wait for the service...') # wait for Grasp Planning Service and create Service Proxy rospy.wait_for_service('grasping_policy') plan_grasp = rospy.ServiceProxy('grasping_policy', GQCNNGraspPlanner) # create ROS CVBridge # cv_bridge = CvBridge() # get camera intrinsics # camera_intrinsics = sensor.color_intrinsics camera_intrinsics = sensor.ir_intrinsics rospy.loginfo('camera_intrinsics: {}'.format(camera_intrinsics.rosmsg)) rospy.loginfo('Beginning experiment') # get the images from the sensor color_image, depth_image, _ = sensor.frames() # inpaint to remove holes inpainted_color_image = color_image.inpaint( rescale_factor=config['inpaint_rescale_factor']) inpainted_depth_image = depth_image.inpaint( rescale_factor=config['inpaint_rescale_factor']) print("inpainted_color_image: shape", inpainted_depth_image.shape) detector_cfg['image_width'] = inpainted_depth_image.width // 3 detector_cfg['image_height'] = inpainted_depth_image.height // 3 detector = RgbdDetectorFactory.detector('point_cloud_box') rospy.loginfo("Detect bbox") detection = detector.detect(inpainted_color_image, inpainted_depth_image, detector_cfg, camera_intrinsics, T_camera_world, vis_foreground=False, vis_segmentation=False)[0] if config['vis']['vis_detector_output']: vis.figure() vis.subplot(1, 2, 1) vis.imshow(detection.color_thumbnail) vis.subplot(1, 2, 2) vis.imshow(detection.depth_thumbnail) vis.show() boundingBox = BoundingBox() boundingBox.minY = detection.bounding_box.min_pt[0] boundingBox.minX = detection.bounding_box.min_pt[1] boundingBox.maxY = detection.bounding_box.max_pt[0] boundingBox.maxX = detection.bounding_box.max_pt[1] try: rospy.loginfo("Start planning") start_time = time.time() planned_grasp_data = plan_grasp(inpainted_color_image.rosmsg, inpainted_depth_image.rosmsg, camera_intrinsics.rosmsg, boundingBox, None) grasp_plan_time = time.time() - start_time rospy.loginfo("Planning time: {}".format(grasp_plan_time)) rospy.loginfo("planned_grasp_data:\n{}".format( planned_grasp_data.grasp.pose)) grasp_camera_pose = planned_grasp_data.grasp # create Stamped ROS Transform camera_world_transform = TransformStamped() camera_world_transform.header.stamp = rospy.Time.now() camera_world_transform.header.frame_id = T_camera_world.from_frame camera_world_transform.child_frame_id = T_camera_world.to_frame camera_world_transform.transform.translation.x = T_camera_world.translation[ 0] camera_world_transform.transform.translation.y = T_camera_world.translation[ 1] camera_world_transform.transform.translation.z = T_camera_world.translation[ 2] q = T_camera_world.quaternion camera_world_transform.transform.rotation.x = q[1] camera_world_transform.transform.rotation.y = q[2] camera_world_transform.transform.rotation.z = q[3] camera_world_transform.transform.rotation.w = q[0] grasp_world_pose = tf2_geometry_msgs.do_transform_pose( grasp_camera_pose, camera_world_transform) rospy.loginfo( "World CS planned_grasp_data:\n{}".format(grasp_world_pose)) except rospy.ServiceException as e: rospy.logerr("Service call failed: \n %s" % e)
# setup sensor camera_intr = CameraIntrinsics.load(camera_intr_filename) # read images color_im = ColorImage.open(color_im_filename, frame=camera_intr.frame) depth_im = DepthImage.open(depth_im_filename, frame=camera_intr.frame) # optionally read a segmask segmask = BinaryImage(255 * np.ones(depth_im.shape).astype(np.uint8), frame=camera_intr.frame) if segmask_filename is not None: segmask = BinaryImage.open(segmask_filename) # optionally set a bounding box bounding_box = BoundingBox() bounding_box.minY = 0 bounding_box.minX = 0 bounding_box.maxY = color_im.height bounding_box.maxX = color_im.width # plan grasp grasp_resp = plan_grasp(color_im.rosmsg, depth_im.rosmsg, camera_intr.rosmsg, bounding_box, segmask.rosmsg) grasp = grasp_resp.grasp # convert to a grasp action grasp_type = grasp.grasp_type if grasp_type == GQCNNGrasp.PARALLEL_JAW: center = Point(np.array([grasp.center_px[0], grasp.center_px[1]]), frame=camera_intr.frame)