예제 #1
0
 def start_track(self, rgb_image, detection, depth_image=None):
     """Start to track a detection"""
     self.tracks.append(
         SceneNode(detection=detection,
                   n_init=self.n_init,
                   max_lost=self.max_lost,
                   max_age=self.max_age))
     track_indice = len(self.tracks) - 1
     if self.use_tracker is True:
         self.tracks[track_indice].start_tracker()
         self.tracks[track_indice].tracker.update(rgb_image, detection)
     return len(self.tracks) - 1
예제 #2
0
    def __init__(self):
        """
        """
        self.tf_bridge = TfBridge()

        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")

        self.bridge = CvBridge()
        self.robot_camera = None
        self.camera_info = None

        self.events = []

        self.robot_camera_clipnear = rospy.get_param("~robot_camera_clipnear", 0.1)
        self.robot_camera_clipfar = rospy.get_param("~robot_camera_clipfar", 25.0)

        self.publish_tf = rospy.get_param("~publish_tf", False)

        self.publish_viz = rospy.get_param("~publish_viz", True)

        self.world_publisher = WorldPublisher("ar_tracks")

        self.marker_publisher = MarkerPublisher("ar_markers")

        self.rgb_camera_info_topic = rospy.get_param("~rgb_camera_info_topic", "/camera/rgb/camera_info")
        rospy.loginfo("[ar_perception] Subscribing to '/{}' topic...".format(self.rgb_camera_info_topic))
        self.camera_info_subscriber = rospy.Subscriber(self.rgb_camera_info_topic, CameraInfo, self.camera_info_callback)

        self.ar_pose_marker_sub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.observation_callback)

        self.ar_nodes = {}

        self.cad_models_search_path = rospy.get_param("~cad_models_search_path", "")

        self.ar_tag_config = rospy.get_param("~ar_tag_config", "")

        if self.ar_tag_config != "":
            with open(self.ar_tag_config, 'r') as config:
                self.ar_config = yaml.safe_load(config)

        for marker in self.ar_config:
            if marker["id"] not in self.ar_nodes:
                node = SceneNode()
                node.id = marker["id"]
                node.label = marker["label"]
                position = marker["position"]
                orientation = marker["orientation"]
                description = marker["description"]
                color = marker["color"]
                shape = Mesh("file://"+self.cad_models_search_path + "/" + marker["file"],
                             x=position["x"], y=position["y"], z=position["z"],
                             rx=orientation["x"], ry=orientation["y"], rz=orientation["z"])
                shape.color[0] = color["r"]
                shape.color[1] = color["g"]
                shape.color[2] = color["b"]
                shape.color[3] = color["a"]
                node.shapes.append(shape)
                node.description = description
                node.state = SceneNodeState.CONFIRMED
                self.ar_nodes[marker["marker_id"]] = node
    def new_node(self,marker):
        #Get real id of marker id from onto
        #get mesh of marker id from onto
        #get label from onto
        node = SceneNode()
        node_local=SceneNode()
        nodeid = self.onto.individuals.getFrom("hasArId","real#"+str(marker.id))
        # print n   odeid
        # print nodeid
        # nodeid = self.onto.individuals.getFrom("hasArId","real#230")
        # nodeid = "cube_GBTG_2"
        # print self.onto.individuals.getType("Cube")

        if nodeid == []:
            self.blacklist_id.append(marker.id)
        else:
            # print "hh"
            # print marker.id
            self.id_link[marker.id]=nodeid[0]
            path=self.onto.individuals.getOn(nodeid[0],"hasMesh")[0].split("#")[-1]

            node.label ="label"
            node_local.label = "label"
            print path
            shape = Mesh(path,
                         x=0, y=0, z=0,
                         rx=0, ry=0, rz=0)
            r,g,b=0,0,0
            if "ox" in nodeid[0]:
                r,g,b=0.82,0.42, 0.12
            if "cube" in nodeid[0]:
                r,g,b=0,0,1
            if "GBTB" in nodeid[0]:
                r,g,b= 1,0,0
            if "BGTG" in nodeid[0]:
                r,g,b=0,1,0


            shape.color[0] = r
            shape.color[1] = g
            shape.color[2] = b
            shape.color[3] = 1
            node.shapes.append(shape)
            node.id = nodeid[0]
            node_local.shapes.append(shape)
            node_local.id = nodeid[0]

            self.ar_nodes[nodeid[0]] = node
            self.ar_nodes_local[nodeid[0]] = node_local
    def ar_tags_callback(self, world_msg):

        ar_tags_tracks = []
        for node in world_msg.world.scene:
            # print self.internal_simulator.entity_id_map
            ar_tags_tracks.append(SceneNode().from_msg(node))
        self.ar_tags_tracks = ar_tags_tracks
        #world_msg.header.frame_id[1:]
        if world_msg.header.frame_id[1:] != '':
            s, pose = self.tf_bridge.get_pose_from_tf(
                self.global_frame_id, world_msg.header.frame_id[1:])
        else:
            pose = None

        self.physics_monitor.monitor(ar_tags_tracks, pose, world_msg.header)
예제 #5
0
    def callback(self, world_msg):
        """ World callback """
        scene_nodes = {}
        for node_msg in world_msg.world.scene:
            node = SceneNode().from_msg(node_msg)
            scene_nodes[node.id] = node
            if node.id not in self.created_nodes:
                self.add_scene_node(node)
                self.created_nodes[node.id] = True

        for situation_msg in world_msg.world.timeline:
            situation = TemporalPredicate().from_msg(situation_msg)
            self.learn_affordance(situation)
            self.update_situation(situation)

        self.scene_nodes = scene_nodes
예제 #6
0
 def __init__(self):
     face_pose_str = rospy.get_param("~face_global_pose", "0 0 0 0 0 0")
     float_list = np.array([float(i) for i in face_pose_str.split()])
     face_pose = Vector6D(x=float_list[0],
                          y=float_list[1],
                          z=float_list[2],
                          rx=float_list[3],
                          ry=float_list[4],
                          rz=float_list[5])
     self.fake_face = SceneNode(label="face", pose=face_pose)
     self.fake_face.shapes.append(Sphere(d=0.15))
     self.fake_face.id = "face"
     self.global_frame_id = rospy.get_param("~global_frame_id", "odom")
     self.rgb_camera_info_topic = rospy.get_param("~rgb_camera_info_topic",
                                                  "")
     self.tf_bridge = TfBridge()
     self.world_publisher = WorldPublisher("human_tracks")
     self.image_info_sub = rospy.Subscriber(
         self.rgb_camera_info_topic,
         CameraInfo,
         self.callback,
         queue_size=DEFAULT_SENSOR_QUEUE_SIZE)
예제 #7
0
    def load_urdf(self,
                  id,
                  filename,
                  start_pose,
                  remove_friction=False,
                  static=False,
                  label="thing",
                  description=""):
        """ """
        try:
            use_fixed_base = 1 if static is True else 0
            base_link_sim_id = p.loadURDF(
                filename,
                start_pose.position().to_array(),
                start_pose.quaternion(),
                useFixedBase=use_fixed_base,
                flags=p.URDF_ENABLE_SLEEPING
                or p.URDF_ENABLE_CACHED_GRAPHICS_SHAPES)

            self.entity_id_map[id] = base_link_sim_id
            # Create a joint map to ease exploration
            self.reverse_entity_id_map[base_link_sim_id] = id
            self.joint_id_map[base_link_sim_id] = {}
            self.reverse_joint_id_map[base_link_sim_id] = {}
            for i in range(0, p.getNumJoints(base_link_sim_id)):
                info = p.getJointInfo(base_link_sim_id, i)
                self.joint_id_map[base_link_sim_id][info[1]] = info[0]
                self.reverse_joint_id_map[base_link_sim_id][info[0]] = info[1]
            # If file successfully loaded
            if base_link_sim_id < 0:
                raise RuntimeError("Invalid URDF")
            scene_node = SceneNode(pose=start_pose, is_static=True)
            scene_node.id = id
            scene_node.label = label
            scene_node.description = description
            sim_id = self.entity_id_map[id]
            visual_shapes = p.getVisualShapeData(sim_id)
            for shape in visual_shapes:
                link_id = shape[1]
                type = shape[2]
                dimensions = shape[3]
                mesh_file_path = shape[4]
                position = shape[5]
                orientation = shape[6]
                rgba_color = shape[7]

                if link_id != -1:
                    link_state = p.getLinkState(sim_id, link_id)
                    t_link = link_state[0]
                    q_link = link_state[1]
                    t_inertial = link_state[2]
                    q_inertial = link_state[3]

                    tf_world_link = np.dot(translation_matrix(t_link),
                                           quaternion_matrix(q_link))
                    tf_inertial_link = np.dot(translation_matrix(t_inertial),
                                              quaternion_matrix(q_inertial))
                    world_transform = np.dot(tf_world_link,
                                             np.linalg.inv(tf_inertial_link))

                else:
                    t_link, q_link = p.getBasePositionAndOrientation(sim_id)
                    world_transform = np.dot(translation_matrix(t_link),
                                             quaternion_matrix(q_link))

                if type == p.GEOM_SPHERE:
                    primitive_shape = Sphere(dimensions[0] * 2.0)
                elif type == p.GEOM_BOX:
                    primitive_shape = Box(dimensions[0], dimensions[1],
                                          dimensions[2])
                elif type == p.GEOM_CYLINDER:
                    primitive_shape = Cylinder(dimensions[1] * 2.0,
                                               dimensions[0])
                elif type == p.GEOM_PLANE:
                    primitive_shape = Box(dimensions[0], dimensions[1], 0.0001)
                elif type == p.GEOM_MESH:
                    primitive_shape = Mesh("file://" + mesh_file_path,
                                           scale_x=dimensions[0],
                                           scale_y=dimensions[1],
                                           scale_z=dimensions[2])
                else:
                    raise NotImplementedError(
                        "Shape capsule not supported at the moment")

                if link_id != -1:
                    shape_transform = np.dot(translation_matrix(position),
                                             quaternion_matrix(orientation))
                    shape_transform = np.dot(world_transform, shape_transform)
                    shape_transform = np.linalg.inv(
                        np.dot(np.linalg.inv(shape_transform),
                               scene_node.pose.transform()))
                    position = translation_from_matrix(shape_transform)
                    orientation = quaternion_from_matrix(shape_transform)

                    primitive_shape.pose.pos.x = position[0]
                    primitive_shape.pose.pos.y = position[1]
                    primitive_shape.pose.pos.z = position[2]

                    primitive_shape.pose.from_quaternion(
                        orientation[0], orientation[1], orientation[2],
                        orientation[3])
                else:
                    shape_transform = np.dot(translation_matrix(position),
                                             quaternion_matrix(orientation))
                    shape_transform = np.dot(world_transform, shape_transform)
                    position = translation_from_matrix(shape_transform)
                    orientation = quaternion_from_matrix(shape_transform)

                    scene_node.pose.pos.x = position[0]
                    scene_node.pose.pos.y = position[1]
                    scene_node.pose.pos.z = position[2]

                    scene_node.pose.from_quaternion(orientation[0],
                                                    orientation[1],
                                                    orientation[2],
                                                    orientation[3])

                if len(rgba_color) > 0:
                    primitive_shape.color[0] = rgba_color[0]
                    primitive_shape.color[1] = rgba_color[1]
                    primitive_shape.color[2] = rgba_color[2]
                    primitive_shape.color[3] = rgba_color[3]

                scene_node.shapes.append(primitive_shape)
            self.entity_map[id] = scene_node
            return True, scene_node
            rospy.loginfo(
                "[simulation] '{}' File successfully loaded".format(filename))
        except Exception as e:
            rospy.logwarn("[simulation] Error loading URDF '{}': {}".format(
                filename, e))
            return False, None
예제 #8
0
    def get_camera_view(self,
                        camera_pose,
                        camera,
                        target_position=None,
                        occlusion_threshold=0.01,
                        rendering_ratio=1.0):
        visible_tracks = []
        rot = quaternion_matrix(camera_pose.quaternion())
        trans = translation_matrix(camera_pose.position().to_array().flatten())
        if target_position is None:
            target = translation_matrix([0.0, 0.0, 1000.0])
            target = translation_from_matrix(np.dot(np.dot(trans, rot),
                                                    target))
        else:
            target = target_position.position().to_array()
        view_matrix = p.computeViewMatrix(camera_pose.position().to_array(),
                                          target, [0, 0, 1])

        width = camera.width
        height = camera.height

        projection_matrix = p.computeProjectionMatrixFOV(
            camera.fov,
            float(width) / height, camera.clipnear, camera.clipfar)

        rendered_width = int(width / rendering_ratio)
        rendered_height = int(height / rendering_ratio)

        width_ratio = width / rendered_width
        height_ratio = height / rendered_height

        if self.use_gui is True:
            camera_image = p.getCameraImage(
                rendered_width,
                rendered_height,
                viewMatrix=view_matrix,
                renderer=p.ER_BULLET_HARDWARE_OPENGL,
                projectionMatrix=projection_matrix)
        else:
            camera_image = p.getCameraImage(rendered_width,
                                            rendered_height,
                                            viewMatrix=view_matrix,
                                            renderer=p.ER_TINY_RENDERER,
                                            projectionMatrix=projection_matrix)

        rgb_image = cv2.resize(np.array(camera_image[2]),
                               (width, height))[:, :, :3]
        depth_image = np.array(camera_image[3], np.float32).reshape(
            (rendered_height, rendered_width))

        far = camera.clipfar
        near = camera.clipnear
        real_depth_image = far * near / (far - (far - near) * depth_image)

        mask_image = camera_image[4]
        unique, counts = np.unique(np.array(mask_image).flatten(),
                                   return_counts=True)

        for sim_id, count in zip(unique, counts):
            if sim_id > 0:
                cv_mask = np.array(mask_image.copy())
                cv_mask[cv_mask != sim_id] = 0
                cv_mask[cv_mask == sim_id] = 255
                xmin, ymin, w, h = cv2.boundingRect(cv_mask.astype(np.uint8))
                mask = cv_mask[ymin:ymin + h, xmin:xmin + w]
                visible_area = w * h + 1
                screen_area = rendered_width * rendered_height + 1
                if screen_area - visible_area == 0:
                    confidence = 1.0
                else:
                    confidence = visible_area / float(screen_area -
                                                      visible_area)
                #TODO compute occlusion score as a ratio between visible 2d bbox and projected 2d bbox areas
                if confidence > occlusion_threshold:

                    depth = real_depth_image[int(ymin + h / 2.0)][int(xmin +
                                                                      w / 2.0)]
                    xmin = int(xmin * width_ratio)
                    ymin = int(ymin * height_ratio)
                    w = int(w * width_ratio)
                    h = int(h * height_ratio)

                    id = self.reverse_entity_id_map[sim_id]
                    scene_node = self.get_entity(id)

                    det = Detection(int(xmin),
                                    int(ymin),
                                    int(xmin + w),
                                    int(ymin + h),
                                    id,
                                    confidence,
                                    depth=depth,
                                    mask=mask)
                    track = SceneNode(detection=det)
                    track.static = scene_node.static
                    track.id = id
                    track.mask = det.mask
                    track.shapes = scene_node.shapes
                    track.pose = scene_node.pose
                    track.label = scene_node.label
                    track.description = scene_node.description
                    visible_tracks.append(track)
        return rgb_image, real_depth_image, visible_tracks
예제 #9
0
 def motion_capture_callback(self, world_msg):
     motion_capture_tracks = []
     for node in world_msg.world.scene:
         motion_capture_tracks.append(SceneNode().from_msg(node))
     self.motion_capture_tracks = motion_capture_tracks
예제 #10
0
 def ar_tags_callback(self, world_msg):
     ar_tags_tracks = []
     for node in world_msg.world.scene:
         ar_tags_tracks.append(SceneNode().from_msg(node))
     self.ar_tags_tracks = ar_tags_tracks
예제 #11
0
 def human_perception_callback(self, world_msg):
     human_tracks = []
     for node in world_msg.world.scene:
         human_tracks.append(SceneNode().from_msg(node))
     self.human_tracks = human_tracks
예제 #12
0
 def object_perception_callback(self, world_msg):
     object_tracks = []
     for node in world_msg.world.scene:
         object_tracks.append(SceneNode().from_msg(node))
     self.object_tracks = object_tracks
 def motion_capture_callback(self, world_msg):
     motion_capture_tracks = []
     for node in world_msg.world.scene:
         motion_capture_tracks.append(SceneNode().from_msg(node))
     self.physics_monitor.mocap(motion_capture_tracks, world_msg.header)
예제 #14
0
 def world_callback(self, world_msg):
     self.header = world_msg.header
     self.scene_nodes = {}
     for node in world_msg.world.scene:
         self.scene_nodes[node.id] = SceneNode().from_msg(node)