Esempio n. 1
0
class HumanPerceptionFakerNode(object):
    def __init__(self):
        face_pose_str = rospy.get_param("~face_global_pose", "0 0 0 0 0 0")
        float_list = np.array([float(i) for i in face_pose_str.split()])
        face_pose = Vector6D(x=float_list[0],
                             y=float_list[1],
                             z=float_list[2],
                             rx=float_list[3],
                             ry=float_list[4],
                             rz=float_list[5])
        self.fake_face = SceneNode(label="face", pose=face_pose)
        self.fake_face.shapes.append(Sphere(d=0.15))
        self.fake_face.id = "face"
        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")
        self.rgb_camera_info_topic = rospy.get_param("~rgb_camera_info_topic",
                                                     "")
        self.tf_bridge = TfBridge()
        self.world_publisher = WorldPublisher("human_tracks")
        self.image_info_sub = rospy.Subscriber(
            self.rgb_camera_info_topic,
            CameraInfo,
            self.callback,
            queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

    def callback(self, msg):
        header = msg.header
        header.frame_id = self.global_frame_id
        self.world_publisher.publish([self.fake_face], [], header)
        self.tf_bridge.publish_tf_frames([self.fake_face], [], header)

    def run(self):
        while not rospy.is_shutdown():
            rospy.spin()
Esempio n. 2
0
    def __init__(self):
        """
        """
        self.tf_bridge = TfBridge()

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")

        self.bridge = CvBridge()
        self.robot_camera = None
        self.camera_info = None

        self.events = []

        self.robot_camera_clipnear = rospy.get_param("~robot_camera_clipnear", 0.1)
        self.robot_camera_clipfar = rospy.get_param("~robot_camera_clipfar", 25.0)

        self.publish_tf = rospy.get_param("~publish_tf", False)

        self.publish_viz = rospy.get_param("~publish_viz", True)

        self.world_publisher = WorldPublisher("ar_tracks")

        self.marker_publisher = MarkerPublisher("ar_markers")

        self.rgb_camera_info_topic = rospy.get_param("~rgb_camera_info_topic", "/camera/rgb/camera_info")
        rospy.loginfo("[ar_perception] Subscribing to '/{}' topic...".format(self.rgb_camera_info_topic))
        self.camera_info_subscriber = rospy.Subscriber(self.rgb_camera_info_topic, CameraInfo, self.camera_info_callback)

        self.ar_pose_marker_sub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.observation_callback)

        self.ar_nodes = {}

        self.cad_models_search_path = rospy.get_param("~cad_models_search_path", "")

        self.ar_tag_config = rospy.get_param("~ar_tag_config", "")

        if self.ar_tag_config != "":
            with open(self.ar_tag_config, 'r') as config:
                self.ar_config = yaml.safe_load(config)

        for marker in self.ar_config:
            if marker["id"] not in self.ar_nodes:
                node = SceneNode()
                node.id = marker["id"]
                node.label = marker["label"]
                position = marker["position"]
                orientation = marker["orientation"]
                description = marker["description"]
                color = marker["color"]
                shape = Mesh("file://"+self.cad_models_search_path + "/" + marker["file"],
                             x=position["x"], y=position["y"], z=position["z"],
                             rx=orientation["x"], ry=orientation["y"], rz=orientation["z"])
                shape.color[0] = color["r"]
                shape.color[1] = color["g"]
                shape.color[2] = color["b"]
                shape.color[3] = color["a"]
                node.shapes.append(shape)
                node.description = description
                node.state = SceneNodeState.CONFIRMED
                self.ar_nodes[marker["marker_id"]] = node
    def __init__(self):
        """ """
        self.tf_bridge = TfBridge()

        self.n_frame = rospy.get_param("~n_frame", 4)
        self.frame_count = 0

        self.robot_camera = None
        self.camera_info = None
        self.camera_frame_id = None

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")
        self.base_frame_id = rospy.get_param("~base_frame_id", "odom")

        self.use_ar_tags = rospy.get_param("~use_ar_tags", True)
        self.ar_tags_topic = rospy.get_param("ar_tags_topic", "ar_tracks")
        self.ar_tags_topic = "ar_tracks"
        self.use_ar_tags = True

        self.use_motion_capture = rospy.get_param("~use_motion_capture", False)
        self.motion_capture_topic = rospy.get_param("~motion_capture_topic",
                                                    "motion_capture_tracks")
        self.use_motion_capture = True
        self.motion_capture_topic = "mocap_tracks"

        use_simulation_gui = rospy.get_param("~use_simulation_gui", True)
        simulation_config_filename = rospy.get_param(
            "~simulation_config_filename", "")
        cad_models_additional_search_path = rospy.get_param(
            "~cad_models_additional_search_path", "")
        static_entities_config_filename = rospy.get_param(
            "~static_entities_config_filename", "")
        robot_urdf_file_path = rospy.get_param("~robot_urdf_file_path", "")
        self.internal_simulator = InternalSimulator(
            True, simulation_config_filename,
            cad_models_additional_search_path, static_entities_config_filename,
            robot_urdf_file_path, self.global_frame_id, self.base_frame_id)
        self.physics_monitor = GraphicMonitor(
            internal_simulator=self.internal_simulator)
        if self.use_motion_capture is True:
            self.motion_capture_tracks = []
            self.motion_capture_sub = rospy.Subscriber(
                self.motion_capture_topic,
                WorldStamped,
                self.motion_capture_callback,
                queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        if self.use_ar_tags is True:
            self.ar_tags_tracks = []
            self.ar_tags_sub = rospy.Subscriber(
                self.ar_tags_topic,
                WorldStamped,
                self.ar_tags_callback,
                queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.ar_tags_sub = rospy.Subscriber("/mocap_tracks", rospy.AnyMsg,
                                            self.publish_view)
        self.pick_subsc = rospy.Subscriber("/pr2_tasks_node/pr2_facts",
                                           RobotAction, self.pick_callback)
    def __init__(self):
        """
        """
        self.tf_bridge = TfBridge()

        self.listener = tf.TransformListener()



        # ontologiesManipulator =OntologiesManipulator()
        # self.onto = ontologiesManipulator.get("robot")

        self.global_frame_id = rospy.get_param("~global_frame_id")
        print "global fame id is : " + str(self.global_frame_id)
        self.ontologies_manip = OntologiesManipulator()
        self.ontologies_manip.add("robot")
        self.onto=self.ontologies_manip.get("robot")
        self.onto.close()
        self.global_frame_id = rospy.get_param("~global_frame_id", "map")
        self.publish_tf = rospy.get_param("~publish_tf", False)


        self.world_publisher_global = WorldPublisher("ar_tracks", self.global_frame_id)
        self.world_publisher_local =  WorldPublisher("ar_tracks_local")
        # self.marker_publisher = MarkerPublisher("ar_perception_marker")
        # self.ar_pose_marker_sub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.observation_callback)

        self.pose_marker_sub = message_filters.Subscriber("ar_pose_marker", AlvarMarkers)
        self.visible_marker_sub = message_filters.Subscriber("ar_pose_visible_marker",AlvarVisibleMarkers)

        self.synchronous_marker_sub = message_filters.TimeSynchronizer([self.visible_marker_sub,self.pose_marker_sub], 10)
        self.synchronous_marker_sub.registerCallback(self.visible_observation_callback)
        self.filtering_y_axis = rospy.get_param("~filtering_y_axis", FILTERING_Y)
        self.filtering_z_axis = rospy.get_param("~filtering_z_axis", FILTERING_Z)
        self.minimum_velocity = rospy.get_param("~minimum_velocity", MIN_VEL)
        self.minimum_angular_velocity = rospy.get_param("~minimum_angular_velocity", MIN_ANG)
        self.ar_nodes = {}
        self.ar_nodes_local={}

        self.blacklist_id = []
        self.id_link = {} # Dictionarry for tag ID
        # self.joint_state_subscriber = rospy.Subscriber("/joint_states", JointState, self.joint_states_callback)
        self.last_head_pose = None
        self.last_time_head_pose = rospy.Time(0)
        print "filtering is " + str(self.filtering_y_axis) +"° x " +str(self.filtering_z_axis)+"°"
Esempio n. 5
0
 def __init__(self):
     face_pose_str = rospy.get_param("~face_global_pose", "0 0 0 0 0 0")
     float_list = np.array([float(i) for i in face_pose_str.split()])
     face_pose = Vector6D(x=float_list[0],
                          y=float_list[1],
                          z=float_list[2],
                          rx=float_list[3],
                          ry=float_list[4],
                          rz=float_list[5])
     self.fake_face = SceneNode(label="face", pose=face_pose)
     self.fake_face.shapes.append(Sphere(d=0.15))
     self.fake_face.id = "face"
     self.global_frame_id = rospy.get_param("~global_frame_id", "odom")
     self.rgb_camera_info_topic = rospy.get_param("~rgb_camera_info_topic",
                                                  "")
     self.tf_bridge = TfBridge()
     self.world_publisher = WorldPublisher("human_tracks")
     self.image_info_sub = rospy.Subscriber(
         self.rgb_camera_info_topic,
         CameraInfo,
         self.callback,
         queue_size=DEFAULT_SENSOR_QUEUE_SIZE)
Esempio n. 6
0
    def __init__(self,
                 use_gui,
                 cad_models_additional_search_path,
                 env_urdf_file_path,
                 static_entities_config_filename,
                 robot_urdf_file_path,
                 global_frame_id,
                 base_frame_id,
                 position_tolerance=0.005):

        self.tf_bridge = TfBridge()

        self.entity_map = {}

        self.entity_id_map = {}
        self.reverse_entity_id_map = {}

        self.joint_id_map = {}
        self.reverse_joint_id_map = {}

        self.constraint_id_map = {}

        self.markers_id_map = {}

        self.position_tolerance = position_tolerance

        self.global_frame_id = global_frame_id
        self.base_frame_id = base_frame_id

        self.robot_urdf_file_path = robot_urdf_file_path

        self.use_gui = use_gui
        if self.use_gui is True:
            self.client_simulator_id = p.connect(p.GUI)
        else:
            self.client_simulator_id = p.connect(p.DIRECT)

        if cad_models_additional_search_path != "":
            p.setAdditionalSearchPath(cad_models_additional_search_path)

        self.static_nodes = []

        if static_entities_config_filename != "":
            with open(static_entities_config_filename, 'r') as stream:
                static_entities = yaml.load(stream)
                for entity in static_entities:
                    start_pose = Vector6D(x=entity["position"]["x"],
                                          y=entity["position"]["y"],
                                          z=entity["position"]["z"],
                                          rx=entity["orientation"]["x"],
                                          ry=entity["orientation"]["x"],
                                          rz=entity["orientation"]["z"])

                    success, static_node = self.load_urdf(
                        entity["id"],
                        entity["file"],
                        start_pose,
                        label=entity["label"],
                        description=entity["description"],
                        static=True)
                    if success:
                        self.static_nodes.append(static_node)

        p.setGravity(0, 0, -10)
        p.setRealTimeSimulation(0)

        self.robot_loaded = False

        self.joint_state_subscriber = rospy.Subscriber(
            "/joint_states",
            JointState,
            self.joint_states_callback,
            queue_size=1)
Esempio n. 7
0
class InternalSimulator(object):
    def __init__(self,
                 use_gui,
                 cad_models_additional_search_path,
                 env_urdf_file_path,
                 static_entities_config_filename,
                 robot_urdf_file_path,
                 global_frame_id,
                 base_frame_id,
                 position_tolerance=0.005):

        self.tf_bridge = TfBridge()

        self.entity_map = {}

        self.entity_id_map = {}
        self.reverse_entity_id_map = {}

        self.joint_id_map = {}
        self.reverse_joint_id_map = {}

        self.constraint_id_map = {}

        self.markers_id_map = {}

        self.position_tolerance = position_tolerance

        self.global_frame_id = global_frame_id
        self.base_frame_id = base_frame_id

        self.robot_urdf_file_path = robot_urdf_file_path

        self.use_gui = use_gui
        if self.use_gui is True:
            self.client_simulator_id = p.connect(p.GUI)
        else:
            self.client_simulator_id = p.connect(p.DIRECT)

        if cad_models_additional_search_path != "":
            p.setAdditionalSearchPath(cad_models_additional_search_path)

        self.static_nodes = []

        if static_entities_config_filename != "":
            with open(static_entities_config_filename, 'r') as stream:
                static_entities = yaml.load(stream)
                for entity in static_entities:
                    start_pose = Vector6D(x=entity["position"]["x"],
                                          y=entity["position"]["y"],
                                          z=entity["position"]["z"],
                                          rx=entity["orientation"]["x"],
                                          ry=entity["orientation"]["x"],
                                          rz=entity["orientation"]["z"])

                    success, static_node = self.load_urdf(
                        entity["id"],
                        entity["file"],
                        start_pose,
                        label=entity["label"],
                        description=entity["description"],
                        static=True)
                    if success:
                        self.static_nodes.append(static_node)

        p.setGravity(0, 0, -10)
        p.setRealTimeSimulation(0)

        self.robot_loaded = False

        self.joint_state_subscriber = rospy.Subscriber(
            "/joint_states",
            JointState,
            self.joint_states_callback,
            queue_size=1)

    def load_urdf(self,
                  id,
                  filename,
                  start_pose,
                  remove_friction=False,
                  static=False,
                  label="thing",
                  description=""):
        """ """
        try:
            use_fixed_base = 1 if static is True else 0
            base_link_sim_id = p.loadURDF(
                filename,
                start_pose.position().to_array(),
                start_pose.quaternion(),
                useFixedBase=use_fixed_base,
                flags=p.URDF_ENABLE_SLEEPING
                or p.URDF_ENABLE_CACHED_GRAPHICS_SHAPES)

            self.entity_id_map[id] = base_link_sim_id
            # Create a joint map to ease exploration
            self.reverse_entity_id_map[base_link_sim_id] = id
            self.joint_id_map[base_link_sim_id] = {}
            self.reverse_joint_id_map[base_link_sim_id] = {}
            for i in range(0, p.getNumJoints(base_link_sim_id)):
                info = p.getJointInfo(base_link_sim_id, i)
                self.joint_id_map[base_link_sim_id][info[1]] = info[0]
                self.reverse_joint_id_map[base_link_sim_id][info[0]] = info[1]
            # If file successfully loaded
            if base_link_sim_id < 0:
                raise RuntimeError("Invalid URDF")
            scene_node = SceneNode(pose=start_pose, is_static=True)
            scene_node.id = id
            scene_node.label = label
            scene_node.description = description
            sim_id = self.entity_id_map[id]
            visual_shapes = p.getVisualShapeData(sim_id)
            for shape in visual_shapes:
                link_id = shape[1]
                type = shape[2]
                dimensions = shape[3]
                mesh_file_path = shape[4]
                position = shape[5]
                orientation = shape[6]
                rgba_color = shape[7]

                if link_id != -1:
                    link_state = p.getLinkState(sim_id, link_id)
                    t_link = link_state[0]
                    q_link = link_state[1]
                    t_inertial = link_state[2]
                    q_inertial = link_state[3]

                    tf_world_link = np.dot(translation_matrix(t_link),
                                           quaternion_matrix(q_link))
                    tf_inertial_link = np.dot(translation_matrix(t_inertial),
                                              quaternion_matrix(q_inertial))
                    world_transform = np.dot(tf_world_link,
                                             np.linalg.inv(tf_inertial_link))

                else:
                    t_link, q_link = p.getBasePositionAndOrientation(sim_id)
                    world_transform = np.dot(translation_matrix(t_link),
                                             quaternion_matrix(q_link))

                if type == p.GEOM_SPHERE:
                    primitive_shape = Sphere(dimensions[0] * 2.0)
                elif type == p.GEOM_BOX:
                    primitive_shape = Box(dimensions[0], dimensions[1],
                                          dimensions[2])
                elif type == p.GEOM_CYLINDER:
                    primitive_shape = Cylinder(dimensions[1] * 2.0,
                                               dimensions[0])
                elif type == p.GEOM_PLANE:
                    primitive_shape = Box(dimensions[0], dimensions[1], 0.0001)
                elif type == p.GEOM_MESH:
                    primitive_shape = Mesh("file://" + mesh_file_path,
                                           scale_x=dimensions[0],
                                           scale_y=dimensions[1],
                                           scale_z=dimensions[2])
                else:
                    raise NotImplementedError(
                        "Shape capsule not supported at the moment")

                if link_id != -1:
                    shape_transform = np.dot(translation_matrix(position),
                                             quaternion_matrix(orientation))
                    shape_transform = np.dot(world_transform, shape_transform)
                    shape_transform = np.linalg.inv(
                        np.dot(np.linalg.inv(shape_transform),
                               scene_node.pose.transform()))
                    position = translation_from_matrix(shape_transform)
                    orientation = quaternion_from_matrix(shape_transform)

                    primitive_shape.pose.pos.x = position[0]
                    primitive_shape.pose.pos.y = position[1]
                    primitive_shape.pose.pos.z = position[2]

                    primitive_shape.pose.from_quaternion(
                        orientation[0], orientation[1], orientation[2],
                        orientation[3])
                else:
                    shape_transform = np.dot(translation_matrix(position),
                                             quaternion_matrix(orientation))
                    shape_transform = np.dot(world_transform, shape_transform)
                    position = translation_from_matrix(shape_transform)
                    orientation = quaternion_from_matrix(shape_transform)

                    scene_node.pose.pos.x = position[0]
                    scene_node.pose.pos.y = position[1]
                    scene_node.pose.pos.z = position[2]

                    scene_node.pose.from_quaternion(orientation[0],
                                                    orientation[1],
                                                    orientation[2],
                                                    orientation[3])

                if len(rgba_color) > 0:
                    primitive_shape.color[0] = rgba_color[0]
                    primitive_shape.color[1] = rgba_color[1]
                    primitive_shape.color[2] = rgba_color[2]
                    primitive_shape.color[3] = rgba_color[3]

                scene_node.shapes.append(primitive_shape)
            self.entity_map[id] = scene_node
            return True, scene_node
            rospy.loginfo(
                "[simulation] '{}' File successfully loaded".format(filename))
        except Exception as e:
            rospy.logwarn("[simulation] Error loading URDF '{}': {}".format(
                filename, e))
            return False, None

    def get_myself(self):
        node = self.get_entity("myself")
        node.type = SceneNodeType.MYSELF
        node.label = "robot"
        node.description = "I"
        return node

    def get_static_entities(self):
        return self.static_nodes

    def get_entity(self, id):
        if id not in self.entity_map:
            raise ValueError("Invalid id provided : '{}'".format(id))
        return self.entity_map[id]

    def step_simulation(self):
        p.stepSimulation()

    def get_camera_view(self,
                        camera_pose,
                        camera,
                        target_position=None,
                        occlusion_threshold=0.01,
                        rendering_ratio=1.0):
        visible_tracks = []
        rot = quaternion_matrix(camera_pose.quaternion())
        trans = translation_matrix(camera_pose.position().to_array().flatten())
        if target_position is None:
            target = translation_matrix([0.0, 0.0, 1000.0])
            target = translation_from_matrix(np.dot(np.dot(trans, rot),
                                                    target))
        else:
            target = target_position.position().to_array()
        view_matrix = p.computeViewMatrix(camera_pose.position().to_array(),
                                          target, [0, 0, 1])

        width = camera.width
        height = camera.height

        projection_matrix = p.computeProjectionMatrixFOV(
            camera.fov,
            float(width) / height, camera.clipnear, camera.clipfar)

        rendered_width = int(width / rendering_ratio)
        rendered_height = int(height / rendering_ratio)

        width_ratio = width / rendered_width
        height_ratio = height / rendered_height

        if self.use_gui is True:
            camera_image = p.getCameraImage(
                rendered_width,
                rendered_height,
                viewMatrix=view_matrix,
                renderer=p.ER_BULLET_HARDWARE_OPENGL,
                projectionMatrix=projection_matrix)
        else:
            camera_image = p.getCameraImage(rendered_width,
                                            rendered_height,
                                            viewMatrix=view_matrix,
                                            renderer=p.ER_TINY_RENDERER,
                                            projectionMatrix=projection_matrix)

        rgb_image = cv2.resize(np.array(camera_image[2]),
                               (width, height))[:, :, :3]
        depth_image = np.array(camera_image[3], np.float32).reshape(
            (rendered_height, rendered_width))

        far = camera.clipfar
        near = camera.clipnear
        real_depth_image = far * near / (far - (far - near) * depth_image)

        mask_image = camera_image[4]
        unique, counts = np.unique(np.array(mask_image).flatten(),
                                   return_counts=True)

        for sim_id, count in zip(unique, counts):
            if sim_id > 0:
                cv_mask = np.array(mask_image.copy())
                cv_mask[cv_mask != sim_id] = 0
                cv_mask[cv_mask == sim_id] = 255
                xmin, ymin, w, h = cv2.boundingRect(cv_mask.astype(np.uint8))
                mask = cv_mask[ymin:ymin + h, xmin:xmin + w]
                visible_area = w * h + 1
                screen_area = rendered_width * rendered_height + 1
                if screen_area - visible_area == 0:
                    confidence = 1.0
                else:
                    confidence = visible_area / float(screen_area -
                                                      visible_area)
                #TODO compute occlusion score as a ratio between visible 2d bbox and projected 2d bbox areas
                if confidence > occlusion_threshold:

                    depth = real_depth_image[int(ymin + h / 2.0)][int(xmin +
                                                                      w / 2.0)]
                    xmin = int(xmin * width_ratio)
                    ymin = int(ymin * height_ratio)
                    w = int(w * width_ratio)
                    h = int(h * height_ratio)

                    id = self.reverse_entity_id_map[sim_id]
                    scene_node = self.get_entity(id)

                    det = Detection(int(xmin),
                                    int(ymin),
                                    int(xmin + w),
                                    int(ymin + h),
                                    id,
                                    confidence,
                                    depth=depth,
                                    mask=mask)
                    track = SceneNode(detection=det)
                    track.static = scene_node.static
                    track.id = id
                    track.mask = det.mask
                    track.shapes = scene_node.shapes
                    track.pose = scene_node.pose
                    track.label = scene_node.label
                    track.description = scene_node.description
                    visible_tracks.append(track)
        return rgb_image, real_depth_image, visible_tracks

    # def update_entity(self, id, t, q):
    #     if id not in self.entity_id_map:
    #         raise ValueError("Entity <{}> is not loaded into the simulator".format(id))
    #     base_link_sim_id = self.entity_id_map[id]
    #     t_current, q_current = p.getBasePositionAndOrientation(base_link_sim_id)
    #     update_position = not np.allclose(np.array(t_current), t, atol=self.position_tolerance)
    #     update_orientation = not np.allclose(np.array(q_current), q, atol=self.position_tolerance)
    #     if update_position is True or update_orientation is True:
    #         p.resetBasePositionAndOrientation(base_link_sim_id, t, q, physicsClientId=self.client_simulator_id)

    def joint_states_callback(self, joint_states_msg):
        success, pose = self.tf_bridge.get_pose_from_tf(
            self.global_frame_id, self.base_frame_id)
        if success is True:
            if self.robot_loaded is False:
                try:
                    self.load_urdf("myself", self.robot_urdf_file_path, pose)
                    self.robot_loaded = True
                except Exception as e:
                    rospy.logwarn(
                        "[simulation] Exception occured: {}".format(e))
            # try:
            #     self.simulator.update_entity("myself", t, q)
            # except Exception as e:
            #     rospy.logwarn("[simulation] Exception occured: {}".format(e))
        if self.robot_loaded is True:
            joint_indices = []
            target_positions = []
            base_link_sim_id = self.entity_id_map["myself"]
            for joint_state_index, joint_name in enumerate(
                    joint_states_msg.name):
                joint_sim_index = self.joint_id_map[base_link_sim_id][
                    joint_name]
                info = p.getJointInfo(base_link_sim_id,
                                      joint_sim_index,
                                      physicsClientId=self.client_simulator_id)
                joint_name_sim = info[1]
                current_position = p.getJointState(base_link_sim_id,
                                                   joint_sim_index)[0]
                assert (joint_name == joint_name_sim)
                joint_position = joint_states_msg.position[joint_state_index]
                if abs(joint_position -
                       current_position) > self.position_tolerance:
                    joint_indices.append(joint_sim_index)
                    target_positions.append(joint_position)
            p.setJointMotorControlArray(
                base_link_sim_id,
                joint_indices,
                controlMode=p.POSITION_CONTROL,
                targetPositions=target_positions,
                physicsClientId=self.client_simulator_id)
Esempio n. 8
0
    def __init__(self):
        """ """
        self.tf_bridge = TfBridge()

        self.n_frame = rospy.get_param("~n_frame", 4)
        self.frame_count = 0

        self.robot_camera = None
        self.camera_info = None
        self.camera_frame_id = None

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")
        self.base_frame_id = rospy.get_param("~base_frame_id", "odom")

        self.use_ar_tags = rospy.get_param("use_ar_tags", True)
        self.ar_tags_topic = rospy.get_param("ar_tags_topic", "ar_tracks")
        if self.use_ar_tags is True:
            self.ar_tags_tracks = []
            self.ar_tags_sub = rospy.Subscriber(self.ar_tags_topic, WorldStamped, self.ar_tags_callback, queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.use_motion_capture = rospy.get_param("use_motion_capture", True)
        self.motion_capture_topic = rospy.get_param("motion_capture_topic", "motion_capture_tracks")
        if self.use_motion_capture is True:
            self.motion_capture_tracks = []
            self.motion_capture_sub = rospy.Subscriber(self.motion_capture_topic, WorldStamped, self.motion_capture_callback, queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.use_object_perception = rospy.get_param("use_object_perception", True)
        self.object_perception_topic = rospy.get_param("object_perception_topic", "object_tracks")
        if self.use_object_perception is True:
            self.object_tracks = []
            self.object_sub = rospy.Subscriber(self.object_perception_topic, WorldStamped, self.object_perception_callback, queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.use_human_perception = rospy.get_param("use_human_perception", True)
        self.human_perception_topic = rospy.get_param("human_perception_topic", "human_tracks")
        if self.use_human_perception is True:
            self.human_tracks = []
            self.human_sub = rospy.Subscriber(self.human_perception_topic, WorldStamped, self.human_perception_callback, queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.publish_tf = rospy.get_param("~publish_tf", False)
        self.publish_viz = rospy.get_param("~publish_viz", True)
        self.publish_markers = rospy.get_param("~publish_markers", True)

        self.world_publisher = WorldPublisher("corrected_tracks")
        self.other_world_publisher = WorldPublisher("other_view_tracks")
        self.marker_publisher = MarkerPublisher("corrected_markers")

        self.robot_camera_clipnear = rospy.get_param("~robot_camera_clipnear", 0.1)
        self.robot_camera_clipfar = rospy.get_param("~robot_camera_clipfar", 25.0)

        use_simulation_gui = rospy.get_param("~use_simulation_gui", True)
        simulation_config_filename = rospy.get_param("~simulation_config_filename", "")
        cad_models_additional_search_path = rospy.get_param("~cad_models_additional_search_path", "")
        static_entities_config_filename = rospy.get_param("~static_entities_config_filename", "")
        robot_urdf_file_path = rospy.get_param("~robot_urdf_file_path", "")

        self.internal_simulator = InternalSimulator(use_simulation_gui,
                                                    simulation_config_filename,
                                                    cad_models_additional_search_path,
                                                    static_entities_config_filename,
                                                    robot_urdf_file_path,
                                                    self.global_frame_id,
                                                    self.base_frame_id)

        self.other_view_publisher = ViewPublisher("other_view")

        self.robot_perspective_monitor = RobotPerspectiveMonitor(self.internal_simulator)

        self.use_physical_monitoring = rospy.get_param("use_physical_monitoring", True)
        if self.use_physical_monitoring is True:
            self.physics_monitor = PhysicsMonitor(self.internal_simulator)

        self.use_perspective_monitoring = rospy.get_param("use_perspective_monitoring", True)
        if self.use_perspective_monitoring is True:
            self.perspective_monitor = HumanPerspectiveMonitor(self.internal_simulator)

        rospy.Service("/uwds3/get_perspective", GetPerspective, self.handle_perspective_taking)

        self.perspective_facts = []
        self.egocentric_facts = []
        self.physics_facts = []

        self.rgb_camera_info_topic = rospy.get_param("~rgb_camera_info_topic", "/camera/rgb/camera_info")
        rospy.loginfo("[internal_simulator] Subscribing to '/{}' topic...".format(self.rgb_camera_info_topic))
        self.camera_info_subscriber = rospy.Subscriber(self.rgb_camera_info_topic, CameraInfo, self.camera_info_callback)
Esempio n. 9
0
class InternalSimulatorNode(object):
    """ Standalone node for the internal simulator """
    def __init__(self):
        """ """
        self.tf_bridge = TfBridge()

        self.n_frame = rospy.get_param("~n_frame", 4)
        self.frame_count = 0

        self.robot_camera = None
        self.camera_info = None
        self.camera_frame_id = None

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")
        self.base_frame_id = rospy.get_param("~base_frame_id", "odom")

        self.use_ar_tags = rospy.get_param("use_ar_tags", True)
        self.ar_tags_topic = rospy.get_param("ar_tags_topic", "ar_tracks")
        if self.use_ar_tags is True:
            self.ar_tags_tracks = []
            self.ar_tags_sub = rospy.Subscriber(self.ar_tags_topic, WorldStamped, self.ar_tags_callback, queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.use_motion_capture = rospy.get_param("use_motion_capture", True)
        self.motion_capture_topic = rospy.get_param("motion_capture_topic", "motion_capture_tracks")
        if self.use_motion_capture is True:
            self.motion_capture_tracks = []
            self.motion_capture_sub = rospy.Subscriber(self.motion_capture_topic, WorldStamped, self.motion_capture_callback, queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.use_object_perception = rospy.get_param("use_object_perception", True)
        self.object_perception_topic = rospy.get_param("object_perception_topic", "object_tracks")
        if self.use_object_perception is True:
            self.object_tracks = []
            self.object_sub = rospy.Subscriber(self.object_perception_topic, WorldStamped, self.object_perception_callback, queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.use_human_perception = rospy.get_param("use_human_perception", True)
        self.human_perception_topic = rospy.get_param("human_perception_topic", "human_tracks")
        if self.use_human_perception is True:
            self.human_tracks = []
            self.human_sub = rospy.Subscriber(self.human_perception_topic, WorldStamped, self.human_perception_callback, queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.publish_tf = rospy.get_param("~publish_tf", False)
        self.publish_viz = rospy.get_param("~publish_viz", True)
        self.publish_markers = rospy.get_param("~publish_markers", True)

        self.world_publisher = WorldPublisher("corrected_tracks")
        self.other_world_publisher = WorldPublisher("other_view_tracks")
        self.marker_publisher = MarkerPublisher("corrected_markers")

        self.robot_camera_clipnear = rospy.get_param("~robot_camera_clipnear", 0.1)
        self.robot_camera_clipfar = rospy.get_param("~robot_camera_clipfar", 25.0)

        use_simulation_gui = rospy.get_param("~use_simulation_gui", True)
        simulation_config_filename = rospy.get_param("~simulation_config_filename", "")
        cad_models_additional_search_path = rospy.get_param("~cad_models_additional_search_path", "")
        static_entities_config_filename = rospy.get_param("~static_entities_config_filename", "")
        robot_urdf_file_path = rospy.get_param("~robot_urdf_file_path", "")

        self.internal_simulator = InternalSimulator(use_simulation_gui,
                                                    simulation_config_filename,
                                                    cad_models_additional_search_path,
                                                    static_entities_config_filename,
                                                    robot_urdf_file_path,
                                                    self.global_frame_id,
                                                    self.base_frame_id)

        self.other_view_publisher = ViewPublisher("other_view")

        self.robot_perspective_monitor = RobotPerspectiveMonitor(self.internal_simulator)

        self.use_physical_monitoring = rospy.get_param("use_physical_monitoring", True)
        if self.use_physical_monitoring is True:
            self.physics_monitor = PhysicsMonitor(self.internal_simulator)

        self.use_perspective_monitoring = rospy.get_param("use_perspective_monitoring", True)
        if self.use_perspective_monitoring is True:
            self.perspective_monitor = HumanPerspectiveMonitor(self.internal_simulator)

        rospy.Service("/uwds3/get_perspective", GetPerspective, self.handle_perspective_taking)

        self.perspective_facts = []
        self.egocentric_facts = []
        self.physics_facts = []

        self.rgb_camera_info_topic = rospy.get_param("~rgb_camera_info_topic", "/camera/rgb/camera_info")
        rospy.loginfo("[internal_simulator] Subscribing to '/{}' topic...".format(self.rgb_camera_info_topic))
        self.camera_info_subscriber = rospy.Subscriber(self.rgb_camera_info_topic, CameraInfo, self.camera_info_callback)

    def handle_perspective_taking(self, req):
        camera = HumanCamera()
        view_pose = Vector6D().from_msg(req.point_of_view.pose)
        egocentric_relations = []
        if req.use_target is True:
            target_point = Vector3D().from_msg(req.target.point)
            _, _, _, visible_nodes = self.internal_simulator.get_camera_view(view_pose, camera, target=target_point)
        else:
            _, _, _, visible_nodes = self.internal_simulator.get_camera_view(view_pose, camera)
        for node1 in visible_nodes:
            for node2 in visible_nodes:
                if node1 != node2:
                    bbox1 = node1.bbox
                    bbox2 = node2.bbox
                    if is_right_of(bbox1, bbox2) is True:
                        description = node1.description+"("+node1.id[:6]+") is right of "+node2.description+"("+node2.id[:6]+")"
                        egocentric_relations.append(Fact(node1.id, description, predicate="right_of", object=node2.id))
                    if is_left_of(bbox1, bbox2) is True:
                        description = node1.description+"("+node1.id[:6]+") is left of "+node2.description+"("+node2.id[:6]+")"
                        egocentric_relations.append(Fact(node1.id, description, predicate="left_of", object=node2.id))
                    if is_behind(bbox1, bbox2) is True:
                        description = node1.description+"("+node1.id[:6]+") is behind "+node2.description+"("+node2.id[:6]+")"
                        egocentric_relations.append(Fact(node1.id, description, predicate="behind", object=node2.id))
        return visible_nodes, egocentric_relations, True, ""

    def object_perception_callback(self, world_msg):
        object_tracks = []
        for node in world_msg.world.scene:
            object_tracks.append(SceneNode().from_msg(node))
        self.object_tracks = object_tracks

    def human_perception_callback(self, world_msg):
        human_tracks = []
        for node in world_msg.world.scene:
            human_tracks.append(SceneNode().from_msg(node))
        self.human_tracks = human_tracks

    def ar_tags_callback(self, world_msg):
        ar_tags_tracks = []
        for node in world_msg.world.scene:
            ar_tags_tracks.append(SceneNode().from_msg(node))
        self.ar_tags_tracks = ar_tags_tracks

    def motion_capture_callback(self, world_msg):
        motion_capture_tracks = []
        for node in world_msg.world.scene:
            motion_capture_tracks.append(SceneNode().from_msg(node))
        self.motion_capture_tracks = motion_capture_tracks

    def camera_info_callback(self, msg):
        """ """
        if self.camera_info is None:
            rospy.loginfo("[perception] Camera info received !")
            self.camera_info = msg
            self.camera_frame_id = msg.header.frame_id
            self.robot_camera = Camera().from_msg(msg,
                                                  clipnear=self.robot_camera_clipnear,
                                                  clipfar=self.robot_camera_clipfar)
        if self.internal_simulator.is_robot_loaded() is True:
            success, view_pose = self.tf_bridge.get_pose_from_tf(self.global_frame_id, self.camera_frame_id)

            if success is not True:
                rospy.logwarn("[human_perception] The camera sensor is not localized in world space (frame '{}'), please check if the sensor frame is published in /tf".format(self.global_frame_id))
            else:
                header = msg.header
                header.frame_id = self.global_frame_id
                self.frame_count %= self.n_frame

                object_tracks = self.ar_tags_tracks + self.object_tracks
                person_tracks = [f for f in self.human_tracks if f.label == "person"]

                corrected_object_tracks, self.physics_facts = self.physics_monitor.monitor(object_tracks, person_tracks, header.stamp)

                if self.use_perspective_monitoring is True:
                    if self.frame_count == 3:
                        monitoring_timer = cv2.getTickCount()
                        perspective_facts = []
                        face_tracks = [f for f in self.human_tracks if f.label == "face"]
                        person_tracks = [f for f in self.human_tracks if f.label == "person"]
                        success, other_image, other_visible_tracks, perspective_facts = self.perspective_monitor.monitor(face_tracks, person_tracks, header.stamp)
                        monitoring_fps = cv2.getTickFrequency() / (cv2.getTickCount()-monitoring_timer)
                        if success:
                            self.perspective_facts = [s for s in perspective_facts if s.predicate == "visible_by"]
                            self.other_world_publisher.publish(other_visible_tracks, perspective_facts+self.physics_facts, header)
                            self.other_view_publisher.publish(other_image, other_visible_tracks, header.stamp, fps=monitoring_fps)

                _, self.egocentric_facts = self.robot_perspective_monitor.monitor(object_tracks, person_tracks, self.robot_camera, view_pose, header.stamp)

                corrected_tracks = self.internal_simulator.get_static_entities() + self.human_tracks + corrected_object_tracks

                events = self.physics_facts + self.perspective_facts + self.egocentric_facts

                self.world_publisher.publish(corrected_tracks, events, header)

                if self.publish_tf is True:
                    self.tf_bridge.publish_tf_frames(corrected_tracks, action_events , header)

                if self.publish_markers is True:
                    self.marker_publisher.publish(corrected_tracks, header)

                self.frame_count += 1

    def run(self):
        while not rospy.is_shutdown():
            rospy.spin()
class ArPerceptionNode(object):
    def __init__(self):
        """
        """
        self.tf_bridge = TfBridge()

        self.listener = tf.TransformListener()



        # ontologiesManipulator =OntologiesManipulator()
        # self.onto = ontologiesManipulator.get("robot")

        self.global_frame_id = rospy.get_param("~global_frame_id")
        print "global fame id is : " + str(self.global_frame_id)
        self.ontologies_manip = OntologiesManipulator()
        self.ontologies_manip.add("robot")
        self.onto=self.ontologies_manip.get("robot")
        self.onto.close()
        self.global_frame_id = rospy.get_param("~global_frame_id", "map")
        self.publish_tf = rospy.get_param("~publish_tf", False)


        self.world_publisher_global = WorldPublisher("ar_tracks", self.global_frame_id)
        self.world_publisher_local =  WorldPublisher("ar_tracks_local")
        # self.marker_publisher = MarkerPublisher("ar_perception_marker")
        # self.ar_pose_marker_sub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.observation_callback)

        self.pose_marker_sub = message_filters.Subscriber("ar_pose_marker", AlvarMarkers)
        self.visible_marker_sub = message_filters.Subscriber("ar_pose_visible_marker",AlvarVisibleMarkers)

        self.synchronous_marker_sub = message_filters.TimeSynchronizer([self.visible_marker_sub,self.pose_marker_sub], 10)
        self.synchronous_marker_sub.registerCallback(self.visible_observation_callback)
        self.filtering_y_axis = rospy.get_param("~filtering_y_axis", FILTERING_Y)
        self.filtering_z_axis = rospy.get_param("~filtering_z_axis", FILTERING_Z)
        self.minimum_velocity = rospy.get_param("~minimum_velocity", MIN_VEL)
        self.minimum_angular_velocity = rospy.get_param("~minimum_angular_velocity", MIN_ANG)
        self.ar_nodes = {}
        self.ar_nodes_local={}

        self.blacklist_id = []
        self.id_link = {} # Dictionarry for tag ID
        # self.joint_state_subscriber = rospy.Subscriber("/joint_states", JointState, self.joint_states_callback)
        self.last_head_pose = None
        self.last_time_head_pose = rospy.Time(0)
        print "filtering is " + str(self.filtering_y_axis) +"° x " +str(self.filtering_z_axis)+"°"
        # shp1 = Box(2,0.01,1, "shp1",x=0,y=0,z=0,r=1.,a=0,rz=np.radians(self.filtering_y_axis))
        # shp2 = Box(2,0.01,1, "shp2",y=0,x=0,z=0,r=1.,a=0,rz=-np.radians(self.filtering_y_axis))
        # shp3 = Box(2,1,0.01, "shp3",x=0,z=0,y=0,b=1.,a=0,ry=np.radians(self.filtering_z_axis))
        # shp4 = Box(2,1,0.01, "shp4",x=0,y=0,z=0,b=1.,a=0,ry=-np.radians(self.filtering_z_axis))
        # sn1 = SceneNode(pose = Vector6D(x=0,y=0,z=0,rx=0,ry=0,rz=0),label="no_fact")
        # sn2 = SceneNode(pose = Vector6D(x=0,y=0,z=0,rx=0,ry=0,rz=0),label="no_fact")
        # sn3 = SceneNode(pose = Vector6D(x=0,y=0,z=0,rx=0,ry=0,rz=0),label="no_fact")
        # sn4 = SceneNode(pose = Vector6D(x=0,y=0,z=0,rx=0,ry=0,rz=0),label="no_fact")
        # sn1.id="sn1"
        # sn2.id="sn2"
        # sn3.id="sn3"
        # sn4.id="sn4"
        # sn1.shapes=[shp1]
        # sn2.shapes=[shp2]
        # sn3.shapes=[shp3]
        # sn4.shapes=[shp4]
        # self.ar_nodes["sn1"]=sn1
        # self.ar_nodes["sn2"]=sn2
        # self.ar_nodes["sn3"]=sn3
        # self.ar_nodes["sn4"]=sn4
    def observation_callback(self, ar_marker_msgs):
        """
        """

        all_nodes = []
        header = ar_marker_msgs.header

        for marker in ar_marker_msgs.markers:
            if not(marker.id in self.blacklist_id):
                if not (marker.id in self.id_link):
                    self.new_node(marker)
                # print self.id_link

                # print self.id_link.keys()
                id = self.id_link[marker.id]
                pose = Vector6D().from_msg(marker.pose.pose)
                header = marker.header
                if self.ar_nodes[id].pose is None:
                    self.ar_nodes[id].pose = Vector6DStable(x=pose.pos.x, y=pose.pos.y, z=pose.pos.z,
                                                                   rx=pose.rot.x, ry=pose.rot.y, rz=pose.rot.z, time=header.stamp)
                else:
                    self.ar_nodes[id].pose.pos.update_no_kalmann(x=pose.pos.x, y=pose.pos.y, z=pose.pos.z, time=header.stamp)
                    self.ar_nodes[id].pose.rot.update_no_kalmann(x=pose.rot.x, y=pose.rot.y, z=pose.rot.z, time=header.stamp)

                all_nodes.append(self.ar_nodes[id])



        self.world_publisher.publish(self.ar_nodes.values(), [],header)
        # print("pub")

        if self.publish_tf is True:
            self.tf_bridge.publish_tf_frames(self.ar_nodes.values(), [], header)
        # print self.ar_nodes




    def movement_validity(self,header):

        # frame_id = header.frame_id
        # if frame_id[0]=='/':
        #     frame_id = frame_id[1:]
        frame_id = "base_footprint"
# "head_mount_kinect2_rgb_optical_frame"
        bool_,head_pose = self.tf_bridge.get_pose_from_tf("head_mount_kinect2_rgb_link",frame_id ,header.stamp)
        if not bool_:
            return False
        # self.ar_nodes["sn1"].pose= Vector6DStable(x=head_pose.pos.x, y=head_pose.pos.y, z=head_pose.pos.z,
        #                                                rx=head_pose.rot.x, ry=head_pose.rot.y, rz=head_pose.rot.z, time=header.stamp)
        # self.ar_nodes["sn2"].pose= Vector6DStable(x=head_pose.pos.x, y=head_pose.pos.y, z=head_pose.pos.z,
        #                                                rx=head_pose.rot.x, ry=head_pose.rot.y, rz=head_pose.rot.z, time=header.stamp)
        # self.ar_nodes["sn3"].pose= Vector6DStable(x=head_pose.pos.x, y=head_pose.pos.y, z=head_pose.pos.z,
        #                                                rx=head_pose.rot.x, ry=head_pose.rot.y, rz=head_pose.rot.z, time=header.stamp)
        # self.ar_nodes["sn4"].pose= Vector6DStable(x=head_pose.pos.x, y=head_pose.pos.y, z=head_pose.pos.z,
        #                                                rx=head_pose.rot.x, ry=head_pose.rot.y, rz=head_pose.rot.z, time=header.stamp)
        #init for the first time
        if self.last_head_pose == None:
            self.last_head_pose = head_pose

        vel_movement = 0
        ang_movement = 0
        delta= header.stamp-self.last_time_head_pose
        #If we are on a different time frame : (the head might have moved)
        if header.stamp != self.last_time_head_pose:
            vel_movement = np.linalg.norm(
                                    head_pose.pos.to_array() -
                                    self.last_head_pose.pos.to_array() )/delta.to_sec()
            ang_movement = np.linalg.norm(
                                    head_pose.rot.to_array() -
                                    self.last_head_pose.rot.to_array() )/delta.to_sec()



            self.last_time_head_pose = header.stamp
            self.last_head_pose = head_pose
        return ((vel_movement> self.minimum_velocity) or
          ang_movement> self.minimum_angular_velocity)



    def pos_validity_marker(self,marker):
        header = marker.header
        x =  marker.pose.pose.position.x
        y =  marker.pose.pose.position.y
        z =  marker.pose.pose.position.z
        mpose=np.array([x,y,z])


        return self.pos_validityv2(mpose,header)


    def pos_validityv2(self,mvect,header):
        mpose = Vector6DStable(mvect[0],mvect[1],mvect[2])
        frame_id = header.frame_id
        if frame_id[0]=='/':
            frame_id = frame_id[1:]
        bool_,head_pose = self.tf_bridge.get_pose_from_tf("head_mount_kinect2_rgb_link" ,
                                          frame_id,header.stamp)

        #init for the first time
        if self.last_head_pose == None and bool_:
            self.last_head_pose = head_pose

        mpose.from_transform(np.dot(head_pose.transform(),mpose.transform()))
        #mpose is now in the head frame
        if mpose.pos.x==0:
            return False
        xy_angle = np.degrees(np.arctan(mpose.pos.y/mpose.pos.x))
        xz_angle = np.degrees(np.arctan(mpose.pos.z/mpose.pos.x))

        return (abs(xy_angle)<self.filtering_y_axis and
               abs(xz_angle)<self.filtering_z_axis)



    def pos_validity(self,mpose,header):

        frame_id = header.frame_id
        if frame_id[0]=='/':
            frame_id = frame_id[1:]

        x =  mpose[0]
        y =  mpose[1]
        z =  mpose[2]
        bool_,head_pose = self.tf_bridge.get_pose_from_tf(frame_id ,
                                           "head_mount_kinect2_rgb_link",
                                                    header.stamp)


        #init for the first time
        if self.last_head_pose == None:
            self.last_head_pose = head_pose




        direction = np.array([x-self.last_head_pose.pos.x,y-self.last_head_pose.pos.y,z-self.last_head_pose.pos.z])

        rotation_matrix = euler_matrix(self.last_head_pose.rot.x,
                                                            self.last_head_pose.rot.y,
                                                            self.last_head_pose.rot.z)
        un_homogen = lambda x: np.array([i/(x[-1]*1.) for i in x[:-1]])
        xaxis = un_homogen(np.dot(rotation_matrix,[1,0,0,1]))
        yaxis = un_homogen(np.dot(rotation_matrix,[0,1,0,1]))
        zaxis = un_homogen(np.dot(rotation_matrix,[0,0,1,1]))

        proj_on_y=np.dot(direction,yaxis)*yaxis/(np.linalg.norm(yaxis))
        proj_on_z=np.dot(direction,zaxis)*zaxis/(np.linalg.norm(zaxis))
        proj_on_xz_plane = direction-proj_on_y
        proj_on_xy_plane = direction-proj_on_z
        dot_x_xy =np.dot(proj_on_xy_plane,xaxis)/np.linalg.norm(xaxis)/np.linalg.norm(proj_on_xy_plane)
        dot_x_xz =np.dot(proj_on_xz_plane,xaxis)/np.linalg.norm(xaxis)/np.linalg.norm(proj_on_xz_plane)

        return (abs(dot_x_xy)>np.cos(np.radians(self.filtering_y_axis)) and
               abs(dot_x_xz)>np.cos(np.radians(self.filtering_z_axis)))




    def visible_observation_callback(self,visible_ar_marker_msgs, ar_marker_msgs):
        """
        """
        marker_blacklist=[]
        if self.movement_validity(ar_marker_msgs.header):
            self.observation(visible_ar_marker_msgs.header,[],[],is_mov=True)
            return
        for marker in visible_ar_marker_msgs.markers:
            # if marker.header.frame_id!='':
            if not self.pos_validity_marker(marker):
                if not marker.main_id in marker_blacklist:
                    marker_blacklist.append(marker.main_id)


        self.observation( visible_ar_marker_msgs.header,ar_marker_msgs.markers,marker_blacklist,is_mov=False)


    def observation(self,header_, ar_marker_list,marker_blacklist,is_mov=False):
        """
        """
        header = header_
        header_global = rospy.Header(header_.seq,header_.stamp,'/'+self.global_frame_id)
        all_nodes = []
        for marker in ar_marker_list:
            # print marker.id

            if (not((marker.id in self.blacklist_id) or (marker.id in marker_blacklist))):
                if not (marker.id in self.id_link):
                    self.new_node(marker)
            if (not((marker.id in self.blacklist_id) or (marker.id in marker_blacklist))):
                id = self.id_link[marker.id]
                pose = Vector6DStable().from_msg(marker.pose.pose)


                header = marker.header
                header_global.seq = header.seq
                header_global.stamp = header.stamp

                s,pose_map =self.tf_bridge.get_pose_from_tf(self.global_frame_id, header.frame_id[1:],header.stamp)
                s1,pose_map1 = self.tf_bridge.get_pose_from_tf( "base_footprint","map",header.stamp)
                print pose_map1
                if self.ar_nodes_local[id].pose is None:
                    self.ar_nodes_local[id].pose = Vector6DStable(x=pose.pos.x, y=pose.pos.y, z=pose.pos.z,
                                                                   rx=pose.rot.x, ry=pose.rot.y, rz=pose.rot.z, time=header.stamp)
                if s:
                    if self.ar_nodes[id].pose is None:
                        self.ar_nodes[id].pose = Vector6DStable(x=pose.pos.x, y=pose.pos.y, z=pose.pos.z,
                                                                      rx=pose.rot.x, ry=pose.rot.y, rz=pose.rot.z, time=header.stamp)
                    else:
                            # if id == "cube_GGTB":
                            #     print pose.pos.z

                            self.ar_nodes[id].pose.pos.update_no_kalmann(x=pose.pos.x, y=pose.pos.y, z=pose.pos.z, time=header.stamp)
                            self.ar_nodes[id].pose.rot.update_no_kalmann(x=pose.rot.x, y=pose.rot.y, z=pose.rot.z, time=header.stamp)

                            self.ar_nodes_local[id].pose.pos.update_no_kalmann(x=pose.pos.x, y=pose.pos.y, z=pose.pos.z, time=header.stamp)
                            self.ar_nodes_local[id].pose.rot.update_no_kalmann(x=pose.rot.x, y=pose.rot.y, z=pose.rot.z, time=header.stamp)
                            # if id == "cube_GGTB":
                            #     print pose.pos.z

                    self.ar_nodes[id].pose.from_transform(np.dot(pose_map.transform(),self.ar_nodes[id].pose.transform()))
                            # if id=="box_C3":
                    #     print self.ar_nodes[id].pose.pos
                    self.ar_nodes[id].last_update = header.stamp
            self.world_publisher_global.publish(self.ar_nodes.values(), [],header_global)
        self.world_publisher_local.publish(self.ar_nodes_local.values(),[],header)


        if self.publish_tf is True and len(header_global.frame_id)>0:
            self.tf_bridge.publish_tf_frames(self.ar_nodes.values(), [], header_global)
        # self.marker_publisher.publish(self.ar_nodes.values(),header_global)
    



    def new_node(self,marker):
        #Get real id of marker id from onto
        #get mesh of marker id from onto
        #get label from onto
        node = SceneNode()
        node_local=SceneNode()
        nodeid = self.onto.individuals.getFrom("hasArId","real#"+str(marker.id))
        # print n   odeid
        # print nodeid
        # nodeid = self.onto.individuals.getFrom("hasArId","real#230")
        # nodeid = "cube_GBTG_2"
        # print self.onto.individuals.getType("Cube")

        if nodeid == []:
            self.blacklist_id.append(marker.id)
        else:
            # print "hh"
            # print marker.id
            self.id_link[marker.id]=nodeid[0]
            path=self.onto.individuals.getOn(nodeid[0],"hasMesh")[0].split("#")[-1]

            node.label ="label"
            node_local.label = "label"
            print path
            shape = Mesh(path,
                         x=0, y=0, z=0,
                         rx=0, ry=0, rz=0)
            r,g,b=0,0,0
            if "ox" in nodeid[0]:
                r,g,b=0.82,0.42, 0.12
            if "cube" in nodeid[0]:
                r,g,b=0,0,1
            if "GBTB" in nodeid[0]:
                r,g,b= 1,0,0
            if "BGTG" in nodeid[0]:
                r,g,b=0,1,0


            shape.color[0] = r
            shape.color[1] = g
            shape.color[2] = b
            shape.color[3] = 1
            node.shapes.append(shape)
            node.id = nodeid[0]
            node_local.shapes.append(shape)
            node_local.id = nodeid[0]

            self.ar_nodes[nodeid[0]] = node
            self.ar_nodes_local[nodeid[0]] = node_local

    def run(self):
        while not rospy.is_shutdown():
            rospy.spin()
Esempio n. 11
0
    def __init__(self):
        """
        """
        self.tf_bridge = TfBridge()
        self.cv_bridge = CvBridge()

        self.n_frame = rospy.get_param("~n_frame", 4)
        self.frame_count = 0

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")

        object_detector_weights_filename = rospy.get_param(
            "~object_detector_weights_filename", "")
        object_detector_model_filename = rospy.get_param(
            "~object_detector_model_filename", "")
        object_detector_config_filename = rospy.get_param(
            "~object_detector_config_filename", "")

        enable_cuda = rospy.get_param("~enable_cuda", True)
        use_mask_rcnn = rospy.get_param("~use_mask_rcnn", False)

        if use_mask_rcnn is True:
            self.object_detector = MaskRCNNDetector(
                object_detector_weights_filename,
                object_detector_model_filename,
                object_detector_config_filename,
                enable_cuda=enable_cuda)
        else:
            self.object_detector = SSDDetector(
                object_detector_weights_filename,
                object_detector_model_filename,
                object_detector_config_filename,
                enable_cuda=enable_cuda)

        self.n_init = rospy.get_param("~n_init", 1)

        self.max_iou_distance = rospy.get_param("~max_iou_distance", 0.98)
        self.max_appearance_distance = rospy.get_param(
            "~max_appearance_distance", 0.25)

        self.max_lost = rospy.get_param("~max_lost", 4)
        self.max_age = rospy.get_param("~max_age", 12)

        self.robot_camera = None
        self.camera_info = None

        self.table = None
        self.events = []

        self.robot_camera_clipnear = rospy.get_param("~robot_camera_clipnear",
                                                     0.1)
        self.robot_camera_clipfar = rospy.get_param("~robot_camera_clipfar",
                                                    25.0)

        self.object_tracker = MultiObjectTracker(iou_cost,
                                                 appearance_cost,
                                                 self.max_iou_distance,
                                                 self.max_appearance_distance,
                                                 self.n_init,
                                                 self.max_lost,
                                                 self.max_age,
                                                 use_tracker=True)

        appearance_features_weights_filename = rospy.get_param(
            "~appearance_features_weights_filename", "")
        appearance_features_model_filename = rospy.get_param(
            "~appearance_features_model_filename", "")

        self.appearance_features_estimator = AppearanceFeaturesEstimator(
            appearance_features_weights_filename,
            appearance_features_model_filename)

        self.shape_estimator = ShapeEstimator()
        self.color_features_estimator = ColorFeaturesEstimator()
        self.object_pose_estimator = ObjectPoseEstimator()

        self.publish_tf = rospy.get_param("~publish_tf", True)
        self.publish_viz = rospy.get_param("~publish_viz", True)
        self.publish_markers = rospy.get_param("~publish_markers", True)

        self.world_publisher = WorldPublisher("object_tracks")
        self.view_publisher = ViewPublisher("object_perception")
        self.marker_publisher = MarkerPublisher("object_markers")

        self.use_depth = rospy.get_param("~use_depth", False)
        self.rgb_image_topic = rospy.get_param("~rgb_image_topic",
                                               "/camera/rgb/image_raw")
        self.rgb_camera_info_topic = rospy.get_param(
            "~rgb_camera_info_topic", "/camera/rgb/camera_info")

        rospy.loginfo(
            "[object_perception] Subscribing to '/{}' topic...".format(
                self.rgb_camera_info_topic))
        self.camera_info_subscriber = rospy.Subscriber(
            self.rgb_camera_info_topic, CameraInfo, self.camera_info_callback)

        if self.use_depth is True:
            self.depth_image_topic = rospy.get_param(
                "~depth_image_topic", "/camera/depth/image_raw")
            self.depth_camera_info_topic = rospy.get_param(
                "~depth_camera_info_topic", "/camera/depth/camera_info")

            rospy.loginfo(
                "[object_perception] Subscribing to '/{}' topic...".format(
                    self.rgb_image_topic))
            self.rgb_image_sub = message_filters.Subscriber(
                self.rgb_image_topic, Image)
            rospy.loginfo(
                "[object_perception] Subscribing to '/{}' topic...".format(
                    self.depth_image_topic))
            self.depth_image_sub = message_filters.Subscriber(
                self.depth_image_topic, Image)

            self.sync = message_filters.ApproximateTimeSynchronizer(
                [self.rgb_image_sub, self.depth_image_sub],
                DEFAULT_SENSOR_QUEUE_SIZE,
                0.1,
                allow_headerless=True)
            self.sync.registerCallback(self.observation_callback)
        else:
            rospy.loginfo(
                "[object_perception] Subscribing to '/{}' topic...".format(
                    self.rgb_image_topic))
            self.rgb_image_sub = rospy.Subscriber(
                self.rgb_image_topic,
                Image,
                self.observation_callback,
                queue_size=DEFAULT_SENSOR_QUEUE_SIZE)
Esempio n. 12
0
class ObjectPerceptionNode(object):
    def __init__(self):
        """
        """
        self.tf_bridge = TfBridge()
        self.cv_bridge = CvBridge()

        self.n_frame = rospy.get_param("~n_frame", 4)
        self.frame_count = 0

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")

        object_detector_weights_filename = rospy.get_param(
            "~object_detector_weights_filename", "")
        object_detector_model_filename = rospy.get_param(
            "~object_detector_model_filename", "")
        object_detector_config_filename = rospy.get_param(
            "~object_detector_config_filename", "")

        enable_cuda = rospy.get_param("~enable_cuda", True)
        use_mask_rcnn = rospy.get_param("~use_mask_rcnn", False)

        if use_mask_rcnn is True:
            self.object_detector = MaskRCNNDetector(
                object_detector_weights_filename,
                object_detector_model_filename,
                object_detector_config_filename,
                enable_cuda=enable_cuda)
        else:
            self.object_detector = SSDDetector(
                object_detector_weights_filename,
                object_detector_model_filename,
                object_detector_config_filename,
                enable_cuda=enable_cuda)

        self.n_init = rospy.get_param("~n_init", 1)

        self.max_iou_distance = rospy.get_param("~max_iou_distance", 0.98)
        self.max_appearance_distance = rospy.get_param(
            "~max_appearance_distance", 0.25)

        self.max_lost = rospy.get_param("~max_lost", 4)
        self.max_age = rospy.get_param("~max_age", 12)

        self.robot_camera = None
        self.camera_info = None

        self.table = None
        self.events = []

        self.robot_camera_clipnear = rospy.get_param("~robot_camera_clipnear",
                                                     0.1)
        self.robot_camera_clipfar = rospy.get_param("~robot_camera_clipfar",
                                                    25.0)

        self.object_tracker = MultiObjectTracker(iou_cost,
                                                 appearance_cost,
                                                 self.max_iou_distance,
                                                 self.max_appearance_distance,
                                                 self.n_init,
                                                 self.max_lost,
                                                 self.max_age,
                                                 use_tracker=True)

        appearance_features_weights_filename = rospy.get_param(
            "~appearance_features_weights_filename", "")
        appearance_features_model_filename = rospy.get_param(
            "~appearance_features_model_filename", "")

        self.appearance_features_estimator = AppearanceFeaturesEstimator(
            appearance_features_weights_filename,
            appearance_features_model_filename)

        self.shape_estimator = ShapeEstimator()
        self.color_features_estimator = ColorFeaturesEstimator()
        self.object_pose_estimator = ObjectPoseEstimator()

        self.publish_tf = rospy.get_param("~publish_tf", True)
        self.publish_viz = rospy.get_param("~publish_viz", True)
        self.publish_markers = rospy.get_param("~publish_markers", True)

        self.world_publisher = WorldPublisher("object_tracks")
        self.view_publisher = ViewPublisher("object_perception")
        self.marker_publisher = MarkerPublisher("object_markers")

        self.use_depth = rospy.get_param("~use_depth", False)
        self.rgb_image_topic = rospy.get_param("~rgb_image_topic",
                                               "/camera/rgb/image_raw")
        self.rgb_camera_info_topic = rospy.get_param(
            "~rgb_camera_info_topic", "/camera/rgb/camera_info")

        rospy.loginfo(
            "[object_perception] Subscribing to '/{}' topic...".format(
                self.rgb_camera_info_topic))
        self.camera_info_subscriber = rospy.Subscriber(
            self.rgb_camera_info_topic, CameraInfo, self.camera_info_callback)

        if self.use_depth is True:
            self.depth_image_topic = rospy.get_param(
                "~depth_image_topic", "/camera/depth/image_raw")
            self.depth_camera_info_topic = rospy.get_param(
                "~depth_camera_info_topic", "/camera/depth/camera_info")

            rospy.loginfo(
                "[object_perception] Subscribing to '/{}' topic...".format(
                    self.rgb_image_topic))
            self.rgb_image_sub = message_filters.Subscriber(
                self.rgb_image_topic, Image)
            rospy.loginfo(
                "[object_perception] Subscribing to '/{}' topic...".format(
                    self.depth_image_topic))
            self.depth_image_sub = message_filters.Subscriber(
                self.depth_image_topic, Image)

            self.sync = message_filters.ApproximateTimeSynchronizer(
                [self.rgb_image_sub, self.depth_image_sub],
                DEFAULT_SENSOR_QUEUE_SIZE,
                0.1,
                allow_headerless=True)
            self.sync.registerCallback(self.observation_callback)
        else:
            rospy.loginfo(
                "[object_perception] Subscribing to '/{}' topic...".format(
                    self.rgb_image_topic))
            self.rgb_image_sub = rospy.Subscriber(
                self.rgb_image_topic,
                Image,
                self.observation_callback,
                queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

    def camera_info_callback(self, msg):
        """ """
        if self.camera_info is None:
            rospy.loginfo("[perception] Camera info received !")
        self.camera_info = msg
        self.camera_frame_id = msg.header.frame_id
        self.robot_camera = Camera().from_msg(
            msg,
            clipnear=self.robot_camera_clipnear,
            clipfar=self.robot_camera_clipfar)

    def observation_callback(self, bgr_image_msg, depth_image_msg=None):
        """
        """
        if self.robot_camera is not None:
            header = bgr_image_msg.header
            header.frame_id = self.global_frame_id
            bgr_image = self.cv_bridge.imgmsg_to_cv2(bgr_image_msg, "bgr8")
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            if depth_image_msg is not None:
                depth_image = self.cv_bridge.imgmsg_to_cv2(depth_image_msg)
            else:
                depth_image = None

            _, self.image_height, self.image_width = bgr_image.shape

            success, view_pose = self.tf_bridge.get_pose_from_tf(
                self.global_frame_id, self.camera_frame_id)

            if success is not True:
                rospy.logwarn(
                    "[object_perception] The camera sensor is not localized in world space (frame '{}'), please check if the sensor frame is published in /tf"
                    .format(self.global_frame_id))
            else:
                self.frame_count %= self.n_frame
                all_nodes, events = self.perception_pipeline(
                    view_pose,
                    rgb_image,
                    depth_image=depth_image,
                    time=header.stamp)

                self.world_publisher.publish(all_nodes, events, header)

                if self.publish_markers is True:
                    self.marker_publisher.publish(all_nodes, header)

                if self.publish_tf is True:
                    self.tf_bridge.publish_tf_frames(all_nodes, events, header)

                self.frame_count += 1

    def perception_pipeline(self,
                            view_pose,
                            rgb_image,
                            depth_image=None,
                            time=None):
        ######################################################
        # Detection
        ######################################################
        pipeline_timer = cv2.getTickCount()

        detection_timer = cv2.getTickCount()

        detections = []
        if self.frame_count == 0:
            detections = self.object_detector.detect(rgb_image,
                                                     depth_image=depth_image)
        else:
            detections = []

        detection_fps = cv2.getTickFrequency() / (cv2.getTickCount() -
                                                  detection_timer)
        ####################################################################
        # Features estimation
        ####################################################################
        features_timer = cv2.getTickCount()

        if self.frame_count == 0:
            self.appearance_features_estimator.estimate(rgb_image, detections)
            self.color_features_estimator.estimate(rgb_image, detections)

        features_fps = cv2.getTickFrequency() / (cv2.getTickCount() -
                                                 features_timer)
        ######################################################
        # Tracking
        ######################################################
        tracking_timer = cv2.getTickCount()

        if self.frame_count == 0:
            self.object_tracks = self.object_tracker.update(
                rgb_image, detections, depth_image=depth_image, time=time)
        else:
            self.object_tracks = self.object_tracker.update(
                rgb_image, [], depth_image=depth_image, time=time)

        tracks = self.object_tracks

        tracking_fps = cv2.getTickFrequency() / (cv2.getTickCount() -
                                                 tracking_timer)
        ########################################################
        # Pose & Shape estimation
        ########################################################
        pose_timer = cv2.getTickCount()

        self.object_pose_estimator.estimate(tracks, view_pose,
                                            self.robot_camera)
        self.shape_estimator.estimate(rgb_image, tracks, self.robot_camera)

        pose_fps = cv2.getTickFrequency() / (cv2.getTickCount() - pose_timer)

        pipeline_fps = cv2.getTickFrequency() / (cv2.getTickCount() -
                                                 pipeline_timer)
        ########################################################
        # Visualization
        ########################################################
        if self.publish_viz is True:
            self.view_publisher.publish(rgb_image,
                                        tracks,
                                        time,
                                        overlay_image=None,
                                        fps=pipeline_fps,
                                        view_pose=view_pose,
                                        camera=self.robot_camera)

        all_nodes = tracks
        return all_nodes, self.events

    def run(self):
        while not rospy.is_shutdown():
            rospy.spin()
class InternalSimulatorNode(object):
    """ Standalone node for the internal simulator """
    def __init__(self):
        """ """
        self.tf_bridge = TfBridge()

        self.n_frame = rospy.get_param("~n_frame", 4)
        self.frame_count = 0

        self.robot_camera = None
        self.camera_info = None
        self.camera_frame_id = None

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")
        self.base_frame_id = rospy.get_param("~base_frame_id", "odom")

        self.use_ar_tags = rospy.get_param("~use_ar_tags", True)
        self.ar_tags_topic = rospy.get_param("ar_tags_topic", "ar_tracks")
        self.ar_tags_topic = "ar_tracks"
        self.use_ar_tags = True

        self.use_motion_capture = rospy.get_param("~use_motion_capture", False)
        self.motion_capture_topic = rospy.get_param("~motion_capture_topic",
                                                    "motion_capture_tracks")
        self.use_motion_capture = True
        self.motion_capture_topic = "mocap_tracks"

        use_simulation_gui = rospy.get_param("~use_simulation_gui", True)
        simulation_config_filename = rospy.get_param(
            "~simulation_config_filename", "")
        cad_models_additional_search_path = rospy.get_param(
            "~cad_models_additional_search_path", "")
        static_entities_config_filename = rospy.get_param(
            "~static_entities_config_filename", "")
        robot_urdf_file_path = rospy.get_param("~robot_urdf_file_path", "")
        self.internal_simulator = InternalSimulator(
            True, simulation_config_filename,
            cad_models_additional_search_path, static_entities_config_filename,
            robot_urdf_file_path, self.global_frame_id, self.base_frame_id)
        self.physics_monitor = GraphicMonitor(
            internal_simulator=self.internal_simulator)
        if self.use_motion_capture is True:
            self.motion_capture_tracks = []
            self.motion_capture_sub = rospy.Subscriber(
                self.motion_capture_topic,
                WorldStamped,
                self.motion_capture_callback,
                queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        if self.use_ar_tags is True:
            self.ar_tags_tracks = []
            self.ar_tags_sub = rospy.Subscriber(
                self.ar_tags_topic,
                WorldStamped,
                self.ar_tags_callback,
                queue_size=DEFAULT_SENSOR_QUEUE_SIZE)

        self.ar_tags_sub = rospy.Subscriber("/mocap_tracks", rospy.AnyMsg,
                                            self.publish_view)
        self.pick_subsc = rospy.Subscriber("/pr2_tasks_node/pr2_facts",
                                           RobotAction, self.pick_callback)

    def publish_view(self, tf):
        self.physics_monitor.publish_view(tf)

    def pick_callback(self, msg):
        self.physics_monitor.pick_callback(msg)

    def ar_tags_callback(self, world_msg):

        ar_tags_tracks = []
        for node in world_msg.world.scene:
            # print self.internal_simulator.entity_id_map
            ar_tags_tracks.append(SceneNode().from_msg(node))
        self.ar_tags_tracks = ar_tags_tracks
        #world_msg.header.frame_id[1:]
        if world_msg.header.frame_id[1:] != '':
            s, pose = self.tf_bridge.get_pose_from_tf(
                self.global_frame_id, world_msg.header.frame_id[1:])
        else:
            pose = None

        self.physics_monitor.monitor(ar_tags_tracks, pose, world_msg.header)
        # self.physics_monitor.monitor([], pose, world_msg.header)

    def motion_capture_callback(self, world_msg):
        motion_capture_tracks = []
        for node in world_msg.world.scene:
            motion_capture_tracks.append(SceneNode().from_msg(node))
        self.physics_monitor.mocap(motion_capture_tracks, world_msg.header)

    def run(self):
        while not rospy.is_shutdown():
            rospy.spin()
Esempio n. 14
0
class ArPerceptionNode(object):
    def __init__(self):
        """
        """
        self.tf_bridge = TfBridge()

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")

        self.bridge = CvBridge()
        self.robot_camera = None
        self.camera_info = None

        self.events = []

        self.robot_camera_clipnear = rospy.get_param("~robot_camera_clipnear", 0.1)
        self.robot_camera_clipfar = rospy.get_param("~robot_camera_clipfar", 25.0)

        self.publish_tf = rospy.get_param("~publish_tf", False)

        self.publish_viz = rospy.get_param("~publish_viz", True)

        self.world_publisher = WorldPublisher("ar_tracks")

        self.marker_publisher = MarkerPublisher("ar_markers")

        self.rgb_camera_info_topic = rospy.get_param("~rgb_camera_info_topic", "/camera/rgb/camera_info")
        rospy.loginfo("[ar_perception] Subscribing to '/{}' topic...".format(self.rgb_camera_info_topic))
        self.camera_info_subscriber = rospy.Subscriber(self.rgb_camera_info_topic, CameraInfo, self.camera_info_callback)

        self.ar_pose_marker_sub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.observation_callback)

        self.ar_nodes = {}

        self.cad_models_search_path = rospy.get_param("~cad_models_search_path", "")

        self.ar_tag_config = rospy.get_param("~ar_tag_config", "")

        if self.ar_tag_config != "":
            with open(self.ar_tag_config, 'r') as config:
                self.ar_config = yaml.safe_load(config)

        for marker in self.ar_config:
            if marker["id"] not in self.ar_nodes:
                node = SceneNode()
                node.id = marker["id"]
                node.label = marker["label"]
                position = marker["position"]
                orientation = marker["orientation"]
                description = marker["description"]
                color = marker["color"]
                shape = Mesh("file://"+self.cad_models_search_path + "/" + marker["file"],
                             x=position["x"], y=position["y"], z=position["z"],
                             rx=orientation["x"], ry=orientation["y"], rz=orientation["z"])
                shape.color[0] = color["r"]
                shape.color[1] = color["g"]
                shape.color[2] = color["b"]
                shape.color[3] = color["a"]
                node.shapes.append(shape)
                node.description = description
                node.state = SceneNodeState.CONFIRMED
                self.ar_nodes[marker["marker_id"]] = node

    def camera_info_callback(self, msg):
        """ """
        if self.camera_info is None:
            rospy.loginfo("[ar_perception] Camera info received !")
        self.camera_info = msg
        self.camera_frame_id = msg.header.frame_id
        self.robot_camera = Camera().from_msg(msg,
                                              clipnear=self.robot_camera_clipnear,
                                              clipfar=self.robot_camera_clipfar)

    def observation_callback(self, ar_marker_msgs):
        """
        """
        if self.robot_camera is not None:
            header = ar_marker_msgs.header
            header.stamp = rospy.Time()
            header.frame_id = self.global_frame_id

            success, view_pose = self.tf_bridge.get_pose_from_tf(self.global_frame_id, self.camera_frame_id)

            if success is not True:
                rospy.logwarn("[ar_perception] The camera sensor is not localized in world space (frame '{}'), please check if the sensor frame is published in /tf".format(self.global_frame_id))
            else:
                all_nodes = []

                for marker in ar_marker_msgs.markers:
                    if marker.id in self.ar_nodes:
                        pose = Vector6D().from_msg(marker.pose.pose)
                        if self.ar_nodes[marker.id].pose is None:
                            self.ar_nodes[marker.id].pose = Vector6DStable(x=pose.pos.x, y=pose.pos.y, z=pose.pos.z,
                                                                           rx=pose.rot.x, ry=pose.rot.y, rz=pose.rot.z, time=header.stamp)
                        else:
                            self.ar_nodes[marker.id].pose.pos.update(x=pose.pos.x, y=pose.pos.y, z=pose.pos.z, time=header.stamp)
                            self.ar_nodes[marker.id].pose.rot.update(x=pose.rot.x, y=pose.rot.y, z=pose.rot.z, time=header.stamp)
                        all_nodes.append(self.ar_nodes[marker.id])

                self.world_publisher.publish(all_nodes, [], header)

                if self.publish_viz is True:
                    self.marker_publisher.publish(all_nodes, header)

                if self.publish_tf is True:
                    self.tf_bridge.publish_tf_frames(all_nodes, [], header)

    def run(self):
        while not rospy.is_shutdown():
            rospy.spin()
    def __init__(self):
        """
        """

        self.tf_bridge = TfBridge()

        self.n_frame = rospy.get_param("~n_frame", 4)
        self.frame_count = 0

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")

        self.color = rospy.get_param("~color", "red")

        self.debug_topics = rospy.get_param("~debug_topics", True)

        self.color_detector = ColorDetector(debug_topics=self.debug_topics,
                                            color=self.color)

        self.n_init = rospy.get_param("~n_init", 1)

        self.max_iou_distance = rospy.get_param("~max_iou_distance", 0.98)

        self.max_lost = rospy.get_param("~max_lost", 4)
        self.max_age = rospy.get_param("~max_age", 300)

        self.bridge = CvBridge()
        self.robot_camera = None
        self.camera_info = None

        self.table = None
        self.events = []

        self.robot_camera_clipnear = rospy.get_param("~robot_camera_clipnear",
                                                     0.1)
        self.robot_camera_clipfar = rospy.get_param("~robot_camera_clipfar",
                                                    25.0)

        self.object_tracker = MultiObjectTracker(iou_cost,
                                                 centroid_cost,
                                                 self.max_iou_distance,
                                                 None,
                                                 self.n_init,
                                                 self.max_lost,
                                                 self.max_age,
                                                 use_tracker=True)

        self.shape_estimator = ShapeEstimator()
        self.object_pose_estimator = ObjectPoseEstimator()

        self.publish_tf = rospy.get_param("~publish_tf", False)

        self.publish_viz = rospy.get_param("~publish_viz", True)

        self.world_publisher = WorldPublisher("color_object_tracks")

        self.view_publisher = ViewPublisher("color_object_perception")
        self.marker_publisher = MarkerPublisher("color_object_markers")

        self.use_depth = rospy.get_param("~use_depth", False)
        self.rgb_image_topic = rospy.get_param("~rgb_image_topic",
                                               "/camera/rgb/image_raw")
        self.rgb_camera_info_topic = rospy.get_param(
            "~rgb_camera_info_topic", "/camera/rgb/camera_info")

        rospy.loginfo(
            "[color_perception] Subscribing to '/{}' topic...".format(
                self.rgb_camera_info_topic))
        self.camera_info_subscriber = rospy.Subscriber(
            self.rgb_camera_info_topic, CameraInfo, self.camera_info_callback)

        if self.use_depth is True:
            self.depth_image_topic = rospy.get_param(
                "~depth_image_topic", "/camera/depth/image_raw")
            self.depth_camera_info_topic = rospy.get_param(
                "~depth_camera_info_topic", "/camera/depth/camera_info")

            rospy.loginfo(
                "[color_perception] Subscribing to '/{}' topic...".format(
                    self.rgb_image_topic))
            self.rgb_image_sub = message_filters.Subscriber(
                self.rgb_image_topic, Image)
            rospy.loginfo(
                "[color_perception] Subscribing to '/{}' topic...".format(
                    self.depth_image_topic))
            self.depth_image_sub = message_filters.Subscriber(
                self.depth_image_topic, Image)

            self.sync = message_filters.ApproximateTimeSynchronizer(
                [self.rgb_image_sub, self.depth_image_sub],
                DEFAULT_SENSOR_QUEUE_SIZE,
                0.1,
                allow_headerless=True)
            self.sync.registerCallback(self.observation_callback)
        else:
            rospy.loginfo(
                "[color_perception] Subscribing to '/{}' topic...".format(
                    self.rgb_image_topic))
            self.rgb_image_sub = rospy.Subscriber(
                self.rgb_image_topic,
                Image,
                self.observation_callback,
                queue_size=DEFAULT_SENSOR_QUEUE_SIZE)