def get_object_pose(self, key): self.client.wait_for_server() goal = ObjectRecognitionGoal(False, [-1e10, 1e10, -1e10, 1e10]) self.client.send_goal(goal) self.client.wait_for_result() ret = self.client.get_result() for r in ret.recognized_objects.objects: if r.type.key == key: return rox_msg.msg2transform(r.pose.pose.pose) raise Exception("Requested object not found")
def main(): rospy.init_node('test_object_recognition') client = actionlib.SimpleActionClient( '/recognize_objects', ObjectRecognitionAction) client.wait_for_server() goal = ObjectRecognitionGoal() client.send_goal(goal) client.wait_for_result() result = client.get_result() rospy.loginfo('Recognized objects: %s', ', '.join(['{}/{}: {:.2f}'.format(ro.type.db, ro.type.key, ro.confidence) for ro in result.recognized_objects.objects]))
def _vision_get_object_pose(self, key): self.overhead_vision_client.wait_for_server() goal = ObjectRecognitionGoal(False, [-1e10, 1e10, -1e10, 1e10]) self.overhead_vision_client.send_goal(goal) self.overhead_vision_client.wait_for_result() ret = self.overhead_vision_client.get_result() for r in ret.recognized_objects.objects: if r.type.key == key: rox_pose = rox_msg.msg2transform(r.pose.pose.pose) rox_pose.parent_frame_id = r.pose.header.frame_id rox_pose.child_frame_id = key return rox_pose raise Exception("Requested object not found")
def test_actionlib(self): rospy.init_node('ork_client') client = actionlib.SimpleActionClient('recognize_objects', ObjectRecognitionAction) client.wait_for_server() start = rospy.Time.now() # for checking the round trip time. goal = ObjectRecognitionGoal() # Sample region of interest for object detection (disabled by default) # goal.use_roi = True # goal.filter_limits = [-0.4, 0.4, -1.0, 0.2, 0.01, 1.5] client.send_goal(goal, done_cb=on_result) client.wait_for_result() # wait indefinitely for a result # print out the round trip time. rospy.loginfo('Time for 1 detection: %s', (rospy.Time.now() - start).to_sec())
import actionlib import argparse import rospy import subprocess import sys def on_result(status, result): rospy.loginfo('Received result from ORK.') if __name__ == '__main__': rospy.init_node('recognition_client') proc = subprocess.Popen(['./test_server2.py'], shell=True) client = actionlib.SimpleActionClient('recognize_objects', ObjectRecognitionAction) client.wait_for_server() start = rospy.Time.now() # for checking the round trip time. goal = ObjectRecognitionGoal() # Sample region of interest for object detection (disabled by default) # goal.use_roi = True # goal.filter_limits = [-0.4, 0.4, -1.0, 0.2, 0.01, 1.5] client.send_goal(goal, done_cb=on_result) client.wait_for_result() # wait indefinitely for a result # print out the round trip time. rospy.loginfo('Time for 1 detection: %s', (rospy.Time.now() - start).to_sec())
def trigger_detection(self): if self._action_client is None: self.start_action_client() goal = ObjectRecognitionGoal() self._action_client.send_goal(goal, done_cb=self.on_action_result)
sm.userdata.drop_target_pose = PoseStamped() sm.userdata.drop_target_pose.header.frame_id = 'odom' # move_base goal pose in odom frame sm.userdata.move_base_odom = PoseStamped() # move arm trajectory sm.userdata.arm_plan = RobotTrajectory() # joints' retracted position in joint space (arm joints only) sm.userdata.retract_pos = [0.0, 0.0, 0.80, -1.80, 0.0, 0.0] # array of RecognizedObject sm.userdata.object_array = RecognizedObjectArray() # define a object_recognition goal sm.userdata.ork_goal = ObjectRecognitionGoal() sm.userdata.ork_goal.use_roi = False sm.userdata.ork_frame = '' sm.userdata.picked_objects = ['apple', 'big coffee', 'monster'] sm.userdata.target_object = '' with sm: def ork_result_cb(userdata, status, result): if status == GoalStatus.SUCCEEDED: if len(result.recognized_objects.objects) > 0: userdata.ork_object_array = result.recognized_objects.objects userdata.ork_action_frame = result.recognized_objects.header.frame_id bash_str = 'sh ' + PATH_TO_PDF_CREATOR