def __init__(self): # enables self.enable_tilt = True self.enable_look = True self.has_goal = False # pose and lock self.x = 1.0 self.y = 0.0 self.mutex = Lock() self.listener = TransformListener() self.client = actionlib.SimpleActionClient( 'head_controller/point_head', PointHeadAction) self.client.wait_for_server() self.service = rospy.Service('~configure', TiltControl, self.serviceHandler) self.plan_sub = rospy.Subscriber( 'move_base/TrajectoryPlannerROS/local_plan', Path, self.planCallback) self.stat_sub = rospy.Subscriber('move_base/status', GoalStatusArray, self.statCallback)
def __init__(self, args): rospy.init_node('roadmap_server') self.optimization_distance = rospy.get_param('~optimization_distance', 10); self.tf = TransformListener() stereo_cam = camera.Camera((389.0, 389.0, 89.23 * 1e-3, 323.42, 323.42, 274.95)) self.skel = Skeleton(stereo_cam) # self.skel.node_vdist = 1 if False: self.skel.load(args[1]) self.skel.optimize() self.startframe = 100000 else: self.startframe = 0 self.frame_timestamps = {} self.vo = None self.pub = rospy.Publisher("roadmap", vslam.msg.Roadmap) time.sleep(1) #self.send_map(rospy.time(0)) self.wheel_odom_edges = set() rospy.Subscriber('/wide_stereo/raw_stereo', stereo_msgs.msg.RawStereo, self.handle_raw_stereo, queue_size=2, buff_size=7000000)
def __init__(self): self.pose = PoseStamped() self._tf_listener = TransformListener() #self.pose_pub = rospy.Publisher('tool_pose', PoseStamped, queue_size=10) #self.joint_sub = rospy.Subscriber('joint_states', JointState, self.tool_pose_callback) self.joint_srv = rospy.Service('tool_pose', GetToolPose, self.tool_pose_callback)
def __init__(self, name): # wait for moveit while not "/move_group/result" in dict( rospy.get_published_topics()).keys(): rospy.sleep(2) self.group = MoveGroupCommander("arm") self.group.set_start_state_to_current_state() self.group.set_planner_id("RRTConnectkConfigDefault") self.group.set_pose_reference_frame("/base_footprint") self.group.set_max_velocity_scaling_factor(1) self.group.set_num_planning_attempts(50) self.group.set_planning_time(10) self.group.set_goal_position_tolerance(0.01) self.group.set_goal_orientation_tolerance(0.02) self.tf_listener = TransformListener() self._action_name = name self._as = actionlib.SimpleActionServer( self._action_name, elevator.msg.SimpleTargetAction, execute_cb=self.execute_cb, auto_start=False) self._as.start()
def __init__(self, name, server, menu_handler, frame_world, frame_opt_parent, frame_opt_child, frame_sensor, marker_scale): print('Creating a new sensor named ' + name) self.name = name self.server = server self.menu_handler = menu_handler self.listener = TransformListener() self.br = tf.TransformBroadcaster() self.marker_scale = marker_scale # transforms self.world_link = frame_world self.opt_parent_link = frame_opt_parent self.opt_child_link = frame_opt_child self.sensor_link = frame_sensor self.updateAll() # update all the transformations # print('Collected pre, opt and pos transforms.') # # print('preT:\n' + str(self.preT)) # print('optT:\n' + str(self.optT)) # print('posT:\n' + str(self.posT)) self.optTInitial = copy.deepcopy(self.optT) self.createInteractiveMarker() # create interactive marker print('Created interactive marker.') # Start publishing now self.timer_callback = rospy.Timer( rospy.Duration(.1), self.publishTFCallback) # to periodically broadcast
def __init__(self, group, fixed_frame, gripper_frame, cart_srv, listener=None, plan_only=False): self._group = group self._fixed_frame = fixed_frame self._gripper_frame = gripper_frame # a.k.a end-effector frame self._action = actionlib.SimpleActionClient('move_group', MoveGroupAction) self._traj_action = actionlib.SimpleActionClient( 'execute_trajectory', ExecuteTrajectoryAction) self._cart_service = rospy.ServiceProxy(cart_srv, GetCartesianPath) try: self._cart_service.wait_for_service(timeout=3) except: rospy.logerr("Timeout waiting for Cartesian Planning Service!!") self._action.wait_for_server() if listener == None: self._listener = TransformListener() else: self._listener = listener self.plan_only = plan_only self.planner_id = None self.planning_time = 15.0
def __init__(self): self.allowed_planning_time = 10.0 self.fixed_frame = 'base_link' self.gripper_frame = 'wrist_roll_link' self.group_name = 'arm' self.planning_scene_diff = moveit_msgs.msg.PlanningScene() self.planning_scene_diff.is_diff = True self.planning_scene_diff.robot_state.is_diff = True self.look_around = False self.max_acceleration_scaling_factor = 0 self.max_velocity_scaling_factor = 0 self.num_planning_attempts = 1 self.plan_only = False self.planner_id = 'RRTConnectkConfigDefault' self.replan = False self.replan_attempts = 5 self.replan_delay = 1 self.start_state = moveit_msgs.msg.RobotState() self.start_state.is_diff = True self.tolerance = 0.01 self._orientation_contraints = [] self._pose_goal = None self._joint_names = None self._joint_positions = None self._tf_listener = TransformListener()
def __init__(self): self.manip = mic.MoveGroupCommander("manipulator") self.client = act.SimpleActionClient('joint_trajectory_action', trajAction) rp.loginfo("Waiting for server joint_trajectory_action.") self.client.wait_for_server() rp.loginfo("Successfuly connected to server.") self.tfListener = TransformListener()
def get_cube_names(self, pose): point = PoseStamped() point.header.frame_id = 'base_link' point.header.stamp = rospy.Time() point.pose = pose tf_listener = TransformListener() rospy.sleep(1.0) return tf_listener.transformPose('/map', point)
def __init__(self): self._joint_client = actionlib.SimpleActionClient( JOINT_ACTION_SERVER, control_msgs.msg.FollowJointTrajectoryAction) self._joint_client.wait_for_server(rospy.Duration(10)) self._move_group_client = actionlib.SimpleActionClient( MOVE_GROUP_ACTION_SERVER, MoveGroupAction) self._move_group_client.wait_for_server(rospy.Duration(10)) self._compute_ik = rospy.ServiceProxy('compute_ik', GetPositionIK) self._tf_listener = TransformListener()
def __init__(self): self.moving = False self.listener = TransformListener() self.broadcaster = TransformBroadcaster() self.client = actionlib.SimpleActionClient("head_controller/point_head", PointHeadAction) self.client.wait_for_server() self.face_sub = rospy.Subscriber("face_detector/people_tracker_measurements_array", PositionMeasurementArray, self.faceCallback) self.lastFaceCallback = time.time() self.lastHeadTarget = None
def filter_by_distance(features, max_distance): # type: (List[Feature], float) -> List[Feature] cam_pixel_to_point = rospy.ServiceProxy('cam_pixel_to_point', CamPixelToPoint) tf_listener = TransformListener() filtered = [] for feature in features: cam_pos = Vector3() cam_pos.x, cam_pos.y = feature.centroid[0], feature.centroid[1] point = cam_pixel_to_point(cam_pos).point # type: PointStamped tf_listener.waitForTransform('camera_link', point.header.frame_id, rospy.Time(rospy.get_time()), rospy.Duration(1)) point = tf_listener.transformPoint('camera_link', point) if point.point.x <= max_distance: filtered.append(feature) return filtered
def __init__(self, group, frame, listener=None, plan_only=False): self._group = group self._fixed_frame = frame self._action = actionlib.SimpleActionClient('move_group', MoveGroupAction) self._action.wait_for_server() if listener == None: self._listener = TransformListener() else: self._listener = listener self.plan_only = plan_only self.planner_id = None self.planning_time = 15.0
def feature_depths(features): cam_pixel_to_point = rospy.ServiceProxy('cam_pixel_to_point', CamPixelToPoint) tf_listener = TransformListener() depths = [] for feature in features: cam_pos = Vector3() cam_pos.x, cam_pos.y = feature.centroid[0], feature.centroid[1] point = cam_pixel_to_point(cam_pos).point # type: PointStamped tf_listener.waitForTransform('camera_link', point.header.frame_id, rospy.Time(rospy.get_time()), rospy.Duration(1)) point = tf_listener.transformPoint('camera_link', point) depths.append(np.linalg.norm([point.point.x, point.point.y, point.point.z])) return depths
def __init__(self): self.scene = PlanningSceneInterface("base_link") self.pickplace = PickPlaceInterface("left_arm", "left_hand", verbose = True) self.move_group = MoveGroupInterface("left_arm", "base_link") find_topic = "basic_grasping_perception/find_objects" rospy.loginfo("Waiting for %s..." % find_topic) self.find_client = actionlib.SimpleActionClient(find_topic, FindGraspableObjectsAction) self.find_client.wait_for_server(rospy.Duration(15.0)) self.cubes = [] self.tf_listener = TransformListener() # self.gripper_client = actionlib.SimpleActionClient("/robot/end_effector/left_gripper/gripper_action", GripperCommandAction) # self.gripper_client.wait_for_server() # rospy.loginfo("...connected.") rospy.sleep(2.0)
def execute(self, userdata): rospy.logdebug('CamToDropzone: Executing state CamToDropzone') listener = TransformListener() if utils.manipulation is None: utils.manipulation = Manipulation() utils.manipulation.set_turbo_mode() utils.manipulation.set_movement_time(9) # TODO: Exception schmeissen wenn abort_after vergangen ist abort_after = 15 then = int(time.time()) now = int(time.time()) while not listener.frameExists("drop_point"): if not (now - then < abort_after): rospy.logdebug('drop_point frame not found!') return 'fail' rospy.loginfo("wait for drop_point frame") rospy.sleep(2.) now = int(time.time()) then = int(time.time()) now = int(time.time()) while not listener.frameExists("mdl_middle"): if not (now - then < abort_after): rospy.logdebug('CamToDropzone: mdl_middle frame not found!') return 'fail' rospy.sleep(2.) now = int(time.time()) if len(userdata.scan_conveyor_pose) == 0: rospy.logdebug('CamToDropzone: No scanPose, calculate now...') p = utils.manipulation.scan_conveyor_pose() userdata.scan_conveyor_pose.append(p) else: rospy.logdebug('CamToDropzone: scanPose found!') p = userdata.scan_conveyor_pose[0] rospy.logdebug('CamToDropzone: Move arm to scan_conveyor_pose') for i in range(0, 3): if utils.manipulation.move_to(p): return 'scanPoseReached' else: if i == 2: rospy.logdebug('CamToDropzone: Cant move arm to scan_conveyor_pose') return 'fail'
def select_center(features): # type: (List[Feature]) -> Feature cam_pixel_to_point = rospy.ServiceProxy('cam_pixel_to_point', CamPixelToPoint) tf_listener = TransformListener() angles = [] center_ray = np.array([1.0, 0.0, 0.0]) for feature in features: cam_pos = Vector3() cam_pos.x, cam_pos.y = feature.centroid[0], feature.centroid[1] point = cam_pixel_to_point(cam_pos).point # type: PointStamped tf_listener.waitForTransform('camera_link', point.header.frame_id, rospy.Time(rospy.get_time()), rospy.Duration(1)) point = tf_listener.transformPoint('camera_link', point) ray = np.array([point.point.x, point.point.y, point.point.z]) ray /= np.linalg.norm(ray) angle = np.arccos(np.dot(center_ray, ray)) angles.append(abs(angle)) return features[np.argmin(angles)]
def transform_pose(_pose2D, src_frame='CameraTop_frame', dst_frame='/base_footprint', timeout=3): tl = TransformListener() pose_stamped = PoseStamped() pose_stamped.header.stamp = rospy.Time() pose_stamped.header.frame_id = src_frame pose_stamped.pose = Pose(Point(_pose2D.x, _pose2D.y, 0.0), Quaternion()) try: tl.waitForTransform(target_frame=dst_frame, source_frame=src_frame, time=rospy.Time(), timeout=rospy.Duration(timeout)) pose_transf = tl.transformPose(dst_frame, pose_stamped) except Exception as e: rospy.logwarn("Transformation failed!!! %s" % str(e)) return _pose2D return Pose2D(pose_transf.pose.position.x, pose_transf.pose.position.y, 0.0)
def __init__(self): self.arm = rospy.get_param("arm", "r") self.tl = TransformListener() self.perception_srv = rospy.Service("/clickable_world/detect_objects", PerceiveButtons, self.do_perception) self.action_srv = rospy.Service("/clickable_world/grasp_object", ButtonAction, self.do_action) self.obj_seg_srv = rospy.ServiceProxy("/object_button_detect", ObjectButtonDetect) self.grasp_client = actionlib.SimpleActionClient(self.arm + '_overhead_grasp', OverheadGraspAction) self.grasp_client.wait_for_server() self.grasp_setup_client = actionlib.SimpleActionClient(self.arm + '_overhead_grasp_setup', OverheadGraspSetupAction) self.grasp_setup_client.wait_for_server()
def jointsInfo(printoutRate=0.5, anonym=False): """ Creates new node to listen to /joint_states topic\n Usage:\n - printoutRate - print rate to terminal in Hz - default: 0.5 Hz - anonym [True/False] - do you want to run it as anonymous node - default: False """ rp.init_node("abb_jointListener", anonymous=anonym) tfListener = TransformListener() listener = rp.Subscriber("/joint_states", JointState, __listenCb, tfListener) rate = rp.Rate(printoutRate) rate.sleep() rp.spin()
def __init__(self): self.last_vel_time = rospy.Time(0) # pose and lock self.x = 1.0 self.y = 0.0 self.mutex = Lock() self.listener = TransformListener() self.client = actionlib.SimpleActionClient( "head_controller/point_head", PointHeadAction) self.client.wait_for_server() self.plan_sub = rospy.Subscriber("/safe_teleop_base/local_plan", Path, self.planCallback) self.vel_sub = rospy.Subscriber("/teleop/cmd_vel/unsafe", Twist, self.velCallback)
def __init__(self): self.arm = rospy.get_param("arm", "r") self.tl = TransformListener() self.perception_srv = rospy.Service( "/clickable_world/place_table_perceive", PerceiveButtons, self.do_perception) self.action_srv = rospy.Service("/clickable_world/place_object", ButtonAction, self.do_action) self.pc_capture_srv = rospy.ServiceProxy( "/table_detection/surf_seg_capture_pc", std_srvs.srv.Empty) self.table_seg_srv = rospy.ServiceProxy( "/table_detection/segment_surfaces", SegmentSurfaces) self.grasp_client = actionlib.SimpleActionClient( self.arm + '_overhead_grasp', OverheadGraspAction) self.grasp_client.wait_for_server() self.grasp_setup_client = actionlib.SimpleActionClient( self.arm + '_overhead_grasp_setup', OverheadGraspSetupAction) self.grasp_setup_client.wait_for_server()
def __init__(self): self.has_goal = False # pose and lock self.x = 1.0 self.y = 0.0 self.mutex = Lock() self.listener = TransformListener() self.client = actionlib.SimpleActionClient( "head_controller/point_head", PointHeadAction) self.client.wait_for_server() self.plan_sub = rospy.Subscriber( "move_base/TrajectoryPlannerROS/local_plan", Path, self.planCallback) self.stat_sub = rospy.Subscriber("move_base/status", GoalStatusArray, self.statCallback)
def transform_pose(pose, src_frame, dst_frame, timeout=10): """ Transforms the given pose from src_frame to dst_frame. :param src_frame :param dst_frame :param timeout the amount of time allowed (in secs) for a transformation (default 3) """ if str(type(pose)) != str(type(Pose())): rospy.logwarn(colors.BACKGROUND_RED + "The 'pose' should be a Pose object, not '%s'.%s" % (str(type(pose)).split('\'')[1], colors.NATIVE_COLOR)) from tf.listener import TransformListener assert timeout >= 1 pose_stamped = PoseStamped() pose_stamped.header.stamp = rospy.Time() pose_stamped.header.frame_id = src_frame pose_stamped.pose = pose global _tl if not _tl: _tl = TransformListener() rospy.sleep(0.5) timeout -= 0.5 rospy.loginfo("Transforming position from %s to %s coordinates..." % ( src_frame, dst_frame)) # If something fails we'll return the original pose (for testing # with mocks when tf isn't available) result = pose try: _tl.waitForTransform( target_frame=dst_frame, source_frame=src_frame, time=rospy.Time(), timeout=rospy.Duration(timeout)) pose_transf = _tl.transformPose(dst_frame, pose_stamped) result = pose_transf.pose except Exception as e: rospy.logwarn(colors.BACKGROUND_RED + "Warning! Pose transformation failed! %s%s" % (str(e), colors.NATIVE_COLOR)) return result
def __init__(self): # Internal parameters self.publish_rate = rospy.get_param("~publish_rate", 10.0) self.listener = TransformListener() self.desired_pose_frame = rospy.get_param("~desired_pose_frame", "base_link") self.position_match_threshold = rospy.get_param( "~position_match_threshold", 1.0) # Variables to keep track of state self.closest_person = None self.leg_detections = [] self.closest_person_lock = Lock() self.leg_detections_lock = Lock() # Internal helpers self.person_face_distance_func = lambda A, B: np.sqrt( (A.pose.position.x - B.pos.x)**2 + (A.pose.position.y - B.pos.y)**2 + (1.2 - B.pos.z)**2 # A person's face is about 4ft from the floor ) self.leg_detection_is_closest_face = lambda detected_person: ( self.closest_person is not None and self.closest_person. detection_context.pose_source == DetectionContext.POSE_FROM_FACE and self.closest_person.id == detected_person.id) # The subscribers and publishers self.face_sub = rospy.Subscriber( "face_detector/people_tracker_measurements_array", PositionMeasurementArray, self.face_callback) self.leg_sub = rospy.Subscriber("people_tracked", PersonArray, self.leg_callback) self.closest_person_pub = rospy.Publisher('~closest_person', Person, queue_size=10) # Debug self.debug_enabled = rospy.get_param("~debug", False) if self.debug_enabled: self.debug_pub1 = rospy.Publisher("~debug/1", Marker, queue_size=1) self.debug_pub2 = rospy.Publisher("~debug/2", Marker, queue_size=1)
def execute(self, userdata): rospy.logdebug('PlaceTask6: Executing state PlaceTask6') listener = TransformListener() if utils.manipulation is None: utils.manipulation = Manipulation() # TODO: Exception schmeissen wenn abort_after vergangen ist abort_after = 15 then = int(time.time()) now = int(time.time()) while not listener.frameExists("target_zone"): if not now - then < abort_after: rospy.logdebug("PlaceTask6: target_zone frame not found") return 'fail' rospy.loginfo("wait for target_zone frame") rospy.sleep(2.) now = int(time.time()) self.__place_pose = self.get_place_point(userdata) rospy.logdebug('PlaceTask6: Move Arm to Place Pose') rospy.logdebug(self.__place_pose) for i in range(0, 7): if utils.manipulation.move_to(self.__place_pose): rospy.logdebug('PlaceTask6: OpenGripper') utils.manipulation.open_gripper() return 'success' else: rospy.logdebug('PlaceTask6: Calculate new place position...') if i <= 2: self.__place_pose = self.get_place_point(userdata) if i > 2: self.__place_pose = self.get_place_point(userdata, 3) if i == 6: rospy.logdebug('PlaceTask6: Cant find place position!') utils.manipulation.open_gripper() return 'fail'
def main(): rospy.init_node('find_waypoints') joy_subscriber = SubscriberValue('joy', Joy) tf_listener = TransformListener() names = [ 'initial_pose', 'off_ramp_start', 'off_ramp_end', 'ar_search_1', 'ar_search_2', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8', 'on_ramp' ] coords = [] for name in names: rospy.sleep(rospy.Duration(2)) print(name) while not joy_subscriber.value.buttons[0]: rospy.sleep(rospy.Duration(0, 1000)) pose = tf_listener.lookupTransform('/map', '/base_link', rospy.Time()) coords.append(pose) print('Saved') out_file = open( path.join(path.dirname(__file__), '..', 'param', 'find_waypoints_generated.yaml'), 'w') out_file.write('named_poses:\n') for name, ((x, y, z), (rx, ry, rz, rw)) in zip(names, coords): out_file.write(' {name}:\n'.format(name=name)) out_file.write(' position:\n') out_file.write(' - {x}\n'.format(x=x)) out_file.write(' - {y}\n'.format(y=y)) out_file.write(' - {z}\n'.format(z=z)) out_file.write(' orientation:\n') out_file.write(' - {x}\n'.format(x=rx)) out_file.write(' - {y}\n'.format(y=ry)) out_file.write(' - {z}\n'.format(z=rz)) out_file.write(' - {w}\n'.format(w=rw))
def __init__(self): self.marker_pub = rospy.Publisher(HUMAN_TRIGGER_MARKER_PUB, Marker, queue_size=10) self.coord_signal = rospy.Publisher(COORD_SIGNAL_PUB, CoordinationSignal, queue_size=1) self.tf_listener = TransformListener() self.move_base = SimpleActionClient(MOVE_BASE_ACTION_SRV, MoveBaseAction) rospy.loginfo( "{} waiting for move_base action server.".format(NODE_NAME)) self.move_base.wait_for_server() rospy.loginfo("{} found move_base action server.".format(NODE_NAME)) self.navigating_fact_id = None rospy.loginfo(NODE_NAME + " waiting for underworlds start fact service server.") self.start_fact = rospy.ServiceProxy(START_FACT_SRV_NAME, StartFact) self.start_fact.wait_for_service() rospy.loginfo(NODE_NAME + " found underworlds start fact service server.") rospy.loginfo(NODE_NAME + " waiting for underworlds stop fact service server.") self.end_fact = rospy.ServiceProxy(STOP_FACT_SRV_NAME, EndFact) self.end_fact.wait_for_service() rospy.loginfo(NODE_NAME + " found underworlds stop fact service server.") self.coordination_human_timer = None self.trigger_zone_bb = BoundingBox(3.0, 13.0, 5.0, 16.0) self.state = self.State.IDLE rospy.Timer(rospy.Duration(1.0), lambda _: self.draw_trigger_zone(False), oneshot=True) rospy.Timer(rospy.Duration(0.1), self.loop)
def __init__(self): self.moving = False self.listener = TransformListener() self.head_client = actionlib.SimpleActionClient("head_controller/point_head", PointHeadAction) self.head_client.wait_for_server() self.person_sub = rospy.Subscriber( "rail_people_detector/closest_person", Person, self.personCallback ) self.laser_sub = rospy.Subscriber("base_scan", LaserScan, self.laserCallback) self.laser_pub = rospy.Publisher("collision_scan", LaserScan, queue_size=1) self.controllerID = None self.controllerPosition = None self.safeToTrack = True self.lastFaceTimestamp = rospy.Time(0) self.lastHeadTarget = None self.debug = rospy.Publisher("follow_face/debug", Marker, queue_size=1) self.cmdvel = rospy.Publisher("cmd_vel", Twist, queue_size=1) self.marker = None self.last_processed = 0
def __init__(self, output_folder, server, menu_handler, marker_size, calibration_file): if not os.path.exists(output_folder): os.mkdir(output_folder) # Create the new folder else: while True: msg = Fore.YELLOW + "To continue, the directory '{}' will be delete.\n" msg = msg + "Do you wish to continue? [y/N] " + Style.RESET_ALL answer = raw_input(msg.format(output_folder)) if len(answer) > 0 and answer[0].lower() in ('y', 'n'): if answer[0].lower() == 'n': sys.exit(1) else: break else: sys.exit(1) # defaults to N shutil.rmtree(output_folder) # Delete old folder os.mkdir(output_folder) # Recreate the folder self.output_folder = output_folder self.listener = TransformListener() self.sensors = {} self.sensor_labelers = {} self.server = server self.menu_handler = menu_handler self.data_stamp = 0 self.collections = {} self.bridge = CvBridge() self.config = loadJSONConfig(calibration_file) if self.config is None: sys.exit(1) # loadJSON should tell you why. self.world_link = self.config['world_link'] # Add sensors print(Fore.BLUE + 'Sensors:' + Style.RESET_ALL) print('Number of sensors: ' + str(len(self.config['sensors']))) # Go through the sensors in the calib config. for sensor_key, value in self.config['sensors'].items(): # Create a dictionary that describes this sensor sensor_dict = {'_name': sensor_key, 'parent': value['link'], 'calibration_parent': value['parent_link'], 'calibration_child': value['child_link']} # TODO replace by utils function print("Waiting for message") msg = rospy.wait_for_message(value['topic_name'], rospy.AnyMsg) connection_header = msg._connection_header['type'].split('/') ros_pkg = connection_header[0] + '.msg' msg_type = connection_header[1] print('Topic ' + value['topic_name'] + ' has type ' + msg_type) sensor_dict['topic'] = value['topic_name'] sensor_dict['msg_type'] = msg_type # If topic contains a message type then get a camera_info message to store along with the sensor data if sensor_dict['msg_type'] == 'Image': # if it is an image must get camera_info sensor_dict['camera_info_topic'] = os.path.dirname(sensor_dict['topic']) + '/camera_info' from sensor_msgs.msg import CameraInfo camera_info_msg = rospy.wait_for_message(sensor_dict['camera_info_topic'], CameraInfo) from rospy_message_converter import message_converter sensor_dict['camera_info'] = message_converter.convert_ros_message_to_dictionary(camera_info_msg) # Get the kinematic chain form world_link to this sensor's parent link now = rospy.Time() self.listener.waitForTransform(value['link'], self.world_link, now, rospy.Duration(5)) chain = self.listener.chain(value['link'], now, self.world_link, now, self.world_link) chain_list = [] for parent, child in zip(chain[0::], chain[1::]): key = self.generateKey(parent, child) chain_list.append({'key': key, 'parent': parent, 'child': child}) sensor_dict['chain'] = chain_list # Add to sensor dictionary self.sensors[sensor_key] = sensor_dict sensor_labeler = InteractiveDataLabeler(self.server, self.menu_handler, sensor_dict, marker_size, self.config['calibration_pattern']) self.sensor_labelers[sensor_key] = sensor_labeler print('finished visiting sensor ' + sensor_key) print(Fore.BLUE + sensor_key + Style.RESET_ALL + ':\n' + str(sensor_dict)) # print('sensor_labelers:') # print(self.sensor_labelers) self.abstract_transforms = self.getAllAbstractTransforms()