def main(): arg_fmt = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(formatter_class=arg_fmt, description=main.__doc__) required = parser.add_argument_group('required arguments') required.add_argument('-l', '--limb', required=True, choices=['left', 'right'], help='send joint trajectory to which limb') args = parser.parse_args(rospy.myargv()[1:]) limb = args.limb rospy.init_node('stackit') iksvc, ns = ik_command.connect_service(limb) moveit_commander.roscpp_initialize(sys.argv) rate = rospy.Rate(1) robot = moveit_commander.RobotCommander() scene = moveit_commander.PlanningSceneInterface() group = moveit_commander.MoveGroupCommander(limb + "_arm") group.allow_replanning(True) print("Getting robot state... ") rs = baxter_interface.RobotEnable(CHECK_VERSION) print("Enabling robot... ") rs.enable() #Calibrate gripper gripper_if = baxter_interface.Gripper(limb) if not gripper_if.calibrated(): print "Calibrating gripper" gripper_if.calibrate() obj_manager = ObjectManager() while len(obj_manager.collision_objects) <= 0: rate.sleep() objects = obj_manager.collision_objects object_height = params['object_height'] """if len(objects) > 1: stack_pose = projectPose(objects[len(objects)-1].primitive_poses[0]) stack_pose = incrementPoseMsgZ(stack_pose, object_height*2.0) objects.pop(len(objects)-1) elif len(objects) == 1: stack_pose = projectPose(objects[0].primitive_poses[0])""" stack_pose = Pose(position=Point(0.593, -0.212, object_height - 0.130), orientation=Quaternion(0.6509160466, 0.758886809948, -0.0180992582839, -0.0084573527776)) processed_win = "Processed image" raw_win = "Hand camera" cv2.namedWindow(processed_win) #cv2.namedWindow(raw_win) rospy.on_shutdown(obj_manager.remove_known_objects) for obj in objects: obj_pose = obj.primitive_poses[0] #group.pick(obj.id) #group.place(obj.id, stack_pose) print "Got pose:", obj.primitive_poses[0] pose = projectPose(obj.primitive_poses[0]) pose = incrementPoseMsgZ(pose, object_height * 1.7) print "Modified pose:", pose #if obj.id in obj_manager.id_operations: # obj_manager.id_operations[obj.id] = CollisionObject.REMOVE #obj_manager.publish(obj) print "setting target to pose" # Move to the next block group.clear_pose_targets() group.set_start_state_to_current_state() group.set_pose_target(pose) plan = group.plan() # is there a better way of checking this? plan_found = len(plan.joint_trajectory.points) > 0 if plan_found: #print "============ Waiting while RVIZ displays plan1..." #rospy.sleep(3) group.go(wait=True) imgproc = ObjectFinder("star", None, None) imgproc.subscribe("/cameras/" + limb + "_hand_camera/image") imgproc.publish(limb) vc = VisualCommand(iksvc, limb) vc.subscribe() while (not vc.done) and (not rospy.is_shutdown()): blobArray = [] for centroid, axis in zip(imgproc.centroids, imgproc.axes): blob = BlobInfo() centroid = Point(*centroid) blob.centroid = centroid if axis is None: axis = -1 * numpy.ones(6) blob.axis = Polygon([ Point(*axis[0:3].tolist()), Point(*axis[3:6].tolist()) ]) blobArray.append(blob) msg = BlobInfoArray() msg.blobs = blobArray imgproc.handler_pub.publish(msg) if imgproc.processed is not None: cv2.imshow(processed_win, imgproc.processed) cv2.waitKey(10) vc.unsubscribe() imgproc.unsubscribe() print "Adding attached message" #Add attached message #obj.primitive_poses[0] = incrementPoseMsgZ(obj.primitive_poses[0], *object_height) #this is in the base frame...? obj_manager.publish_attached(obj, limb) #touch_links = [limb+"_gripper", limb+"_gripper_base", limb+"_hand_camera", limb+"_hand_range"] #group.attach_object(obj.id, limb+"_gripper", touch_links = touch_links) # Carefully rise away from the object before we plan another path pose = incrementPoseMsgZ(pose, 2 * object_height) # test this ik_command.service_request_pose(iksvc, pose, limb, blocking=True) else: print "Unable to plan path" continue # what to do? # Move to the stacking position group.clear_pose_targets() group.set_start_state_to_current_state() group.set_pose_target(stack_pose) plan = group.plan() plan_found = len(plan.joint_trajectory.points) > 0 if plan_found: #print "============ Waiting while RVIZ displays plan2..." #rospy.sleep(3) group.go(wait=True) gripper_if.open(block=True) group.detach_object(obj.id) # Carefully rise away from the object before we plan another path pose = incrementPoseMsgZ(stack_pose, 2 * object_height) ik_command.service_request_pose(iksvc, pose, limb, blocking=True) obj.operation = CollisionObject.REMOVE obj_manager.publish(obj) # Get the next stack pose stack_pose = incrementPoseMsgZ(stack_pose, object_height * 3 / 4) """if obj.id in obj_manager.id_operations:
def main(): arg_fmt = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(formatter_class=arg_fmt, description=main.__doc__) required = parser.add_argument_group('required arguments') required.add_argument( '-l', '--limb', required=True, choices=['left', 'right'], help='send joint trajectory to which limb' ) args = parser.parse_args(rospy.myargv()[1:]) limb = args.limb rospy.init_node('stackit') iksvc, ns = ik_command.connect_service(limb) moveit_commander.roscpp_initialize(sys.argv) rate = rospy.Rate(1) robot = moveit_commander.RobotCommander() scene = moveit_commander.PlanningSceneInterface() group = moveit_commander.MoveGroupCommander(limb+"_arm") group.allow_replanning(True) print("Getting robot state... ") rs = baxter_interface.RobotEnable(CHECK_VERSION) print("Enabling robot... ") rs.enable() #Calibrate gripper gripper_if = baxter_interface.Gripper(limb) if not gripper_if.calibrated(): print "Calibrating gripper" gripper_if.calibrate() obj_manager = ObjectManager() while len(obj_manager.collision_objects) <= 0: rate.sleep() objects = obj_manager.collision_objects object_height = params['object_height'] """if len(objects) > 1: stack_pose = projectPose(objects[len(objects)-1].primitive_poses[0]) stack_pose = incrementPoseMsgZ(stack_pose, object_height*2.0) objects.pop(len(objects)-1) elif len(objects) == 1: stack_pose = projectPose(objects[0].primitive_poses[0])""" stack_pose = Pose(position=Point(0.593, -0.212, object_height-0.130), orientation=Quaternion(0.6509160466, 0.758886809948, -0.0180992582839, -0.0084573527776) ) processed_win = "Processed image" raw_win = "Hand camera" cv2.namedWindow(processed_win) #cv2.namedWindow(raw_win) rospy.on_shutdown(obj_manager.remove_known_objects) for obj in objects: obj_pose = obj.primitive_poses[0] #group.pick(obj.id) #group.place(obj.id, stack_pose) print "Got pose:", obj.primitive_poses[0] pose = projectPose(obj.primitive_poses[0]) pose = incrementPoseMsgZ(pose, object_height*1.7) print "Modified pose:", pose #if obj.id in obj_manager.id_operations: # obj_manager.id_operations[obj.id] = CollisionObject.REMOVE #obj_manager.publish(obj) print "setting target to pose" # Move to the next block group.clear_pose_targets() group.set_start_state_to_current_state() group.set_pose_target(pose) plan = group.plan() # is there a better way of checking this? plan_found = len(plan.joint_trajectory.points) > 0 if plan_found: #print "============ Waiting while RVIZ displays plan1..." #rospy.sleep(3) group.go(wait=True) imgproc = ObjectFinder("star", None, None) imgproc.subscribe("/cameras/"+limb+"_hand_camera/image") imgproc.publish(limb) vc = VisualCommand(iksvc, limb) vc.subscribe() while (not vc.done) and (not rospy.is_shutdown()): blobArray = [] for centroid, axis in zip(imgproc.centroids, imgproc.axes): blob = BlobInfo() centroid = Point(*centroid) blob.centroid = centroid if axis is None: axis = -1*numpy.ones(6) blob.axis = Polygon([Point(*axis[0:3].tolist()), Point(*axis[3:6].tolist())]) blobArray.append(blob) msg = BlobInfoArray() msg.blobs = blobArray imgproc.handler_pub.publish(msg) if imgproc.processed is not None: cv2.imshow(processed_win, imgproc.processed) cv2.waitKey(10) vc.unsubscribe() imgproc.unsubscribe() print "Adding attached message" #Add attached message #obj.primitive_poses[0] = incrementPoseMsgZ(obj.primitive_poses[0], *object_height) #this is in the base frame...? obj_manager.publish_attached(obj, limb) #touch_links = [limb+"_gripper", limb+"_gripper_base", limb+"_hand_camera", limb+"_hand_range"] #group.attach_object(obj.id, limb+"_gripper", touch_links = touch_links) # Carefully rise away from the object before we plan another path pose = incrementPoseMsgZ(pose, 2*object_height) # test this ik_command.service_request_pose(iksvc, pose, limb, blocking = True) else: print "Unable to plan path" continue # what to do? # Move to the stacking position group.clear_pose_targets() group.set_start_state_to_current_state() group.set_pose_target(stack_pose) plan = group.plan() plan_found = len(plan.joint_trajectory.points) > 0 if plan_found: #print "============ Waiting while RVIZ displays plan2..." #rospy.sleep(3) group.go(wait=True) gripper_if.open(block=True) group.detach_object(obj.id) # Carefully rise away from the object before we plan another path pose = incrementPoseMsgZ(stack_pose, 2*object_height) ik_command.service_request_pose(iksvc, pose, limb, blocking = True) obj.operation = CollisionObject.REMOVE obj_manager.publish(obj) # Get the next stack pose stack_pose = incrementPoseMsgZ(stack_pose, object_height*3/4) """if obj.id in obj_manager.id_operations:
def main(): arg_fmt = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(formatter_class=arg_fmt, description=main.__doc__) required = parser.add_argument_group('required arguments') required.add_argument( '-l', '--limb', required=False, choices=['left', 'right'], help='which limb to send joint trajectory' ) parser.add_argument('-m', '--method', choices=['color', 'edge', 'star', 'watershed'], required=False, help='which detection method to use') parser.add_argument('-t', '--topic', required=False, help='which image topic to listen on') parser.add_argument('-p', '--pick_point', choices=['true', 'false'], required=False, help='listen for a color topic or select one in the frame') args = parser.parse_args(rospy.myargv()[1:]) if args.limb is None: limb = "right" else: limb = args.limb if args.method is None: args.method = 'color' if args.topic is None: args.topic = "/cameras/"+limb+"_hand_camera/image" if args.pick_point is None: args.pick_point=True print args print("Initializing node... ") rospy.init_node(node_name) rospy.on_shutdown(cleanup) baxter_cams = ["/cameras/right_hand_camera/image", "/cameras/left_hand_camera/image", "/cameras/head_camera/image"] if args.topic in baxter_cams: print("Getting robot state... ") rs = baxter_interface.RobotEnable(CHECK_VERSION) print("Enabling robot... ") rs.enable() cv2.namedWindow(raw_win) cam = CameraSubscriber() cam.subscribe(args.topic) point = None global picked_color picked_color = None if "object_finder_test" in args.topic: # Hardcoded position point = (322, 141) elif args.pick_point and args.method == "color": rospy.loginfo( "Click on the object you would like to track, then press\ any key to continue." ) ml = common.MouseListener() cv2.setMouseCallback(raw_win, ml.onMouse) while not ml.done: if cam.cur_img is not None: cv2.imshow(raw_win, cam.cur_img) cv2.waitKey(cam.cv_wait) point = (ml.x_clicked, ml.y_clicked) elif args.method == "color": # Wait on msg from /object_tracker/picked_color def color_callback(data): global picked_color picked_color = numpy.array((int(data.z), int(data.y), int(data.x))) # b, g, r color_sub = rospy.Subscriber("/object_tracker/picked_color", Point, color_callback) rate = rospy.Rate(cam.cv_wait) while picked_color is None and not rospy.is_shutdown(): rate.sleep() detectMethod = None cam.unsubscribe() cv2.namedWindow(processed_win) if cam.cur_img is not None: cv2.imshow(processed_win, numpy.zeros((cam.cur_img.shape))) print "Starting image processor" imgproc = ObjectFinder(args.method, point, picked_color) imgproc.subscribe(args.topic) imgproc.publish(limb) cv2.namedWindow(edge_win) cv2.setMouseCallback(raw_win, imgproc.updatePoint) while not rospy.is_shutdown(): blobArray = [] for centroid, axis in zip(imgproc.centroids, imgproc.axes): blob = BlobInfo() centroid = Point(*centroid) blob.centroid = centroid if axis is None: axis = -1*numpy.ones(6) blob.axis = Polygon([Point(*axis[0:3].tolist()), Point(*axis[3:6].tolist())]) blobArray.append(blob) msg = BlobInfoArray() msg.blobs = blobArray imgproc.handler_pub.publish(msg) if imgproc.cur_img is not None: cv2.imshow(raw_win, imgproc.cur_img) if imgproc.processed is not None: cv2.imshow(processed_win, imgproc.processed) if imgproc.canny is not None: cv2.imshow(edge_win, imgproc.canny) cv2.waitKey(imgproc.cv_wait)