Пример #1
0
    def update_edge(self,
                    node_name,
                    edge_id,
                    new_action=None,
                    new_top_vel=None):
        msg_store = MessageStoreProxy(collection='topological_maps')
        # The query retrieves the node name with the given name from the given pointset.
        query = {"name": node_name, "pointset": self.name}
        # The meta-information is some additional information about the specific
        # map that we are interested in (?)
        query_meta = {}
        query_meta["pointset"] = self.name
        query_meta["map"] = self.map
        # This returns a tuple containing the object, if it exists, and some
        # information about how it's stored in the database.
        available = msg_store.query(TopologicalNode._type, query, query_meta)
        if len(available) == 1:
            for edge in available[0][0].edges:
                if edge.edge_id == edge_id:
                    edge.action = new_action or edge.action
                    edge.top_vel = new_top_vel or edge.top_vel

            msg_store.update(available[0][0], query_meta, query, upsert=True)
        else:
            rospy.logerr("Impossible to store in DB " + str(len(available)) +
                         " waypoints found after query")
            rospy.logerr("Available data: " + str(available))
Пример #2
0
 def add_edge(self, or_waypoint, de_waypoint, action):
     #print 'removing edge: '+edge_name
     rospy.loginfo('Adding Edge from ' + or_waypoint + ' to ' +
                   de_waypoint + ' using ' + action)
     node_name = or_waypoint
     #nodeindx = self._get_node_index(edged[0])
     msg_store = MessageStoreProxy(collection='topological_maps')
     query = {"name": node_name, "pointset": self.name}
     query_meta = {}
     query_meta["pointset"] = self.name
     query_meta["map"] = self.map
     available = msg_store.query(TopologicalNode._type, query, query_meta)
     if len(available) == 1:
         found = False
         for i in available[0][0].edges:
             #print i.node
             if i.node == de_waypoint:
                 found = True
                 break
         if not found:
             edge = Edge()
             edge.node = de_waypoint
             edge.action = action
             edge.top_vel = 0.55
             available[0][0].edges.append(edge)
             msg_store.update(available[0][0],
                              query_meta,
                              query,
                              upsert=True)
         else:
             rospy.logerr("Edge already exist: Try deleting it first")
     else:
         rospy.logerr("Impossible to store in DB " + str(len(available)) +
                      " waypoints found after query")
         rospy.logerr("Available data: " + str(available))
 def add_edge(self, or_waypoint, de_waypoint, action) :
     #print 'removing edge: '+edge_name
     rospy.loginfo('Adding Edge from '+or_waypoint+' to '+de_waypoint+' using '+action)
     node_name = or_waypoint
     #nodeindx = self._get_node_index(edged[0])
     msg_store = MessageStoreProxy(collection='topological_maps')
     query = {"name" : node_name, "pointset": self.name}
     query_meta = {}
     query_meta["pointset"] = self.name
     query_meta["map"] = self.map
     available = msg_store.query(TopologicalNode._type, query, query_meta)
     if len(available) == 1 :
         found =False
         for i in available[0][0].edges :
             #print i.node
             if i.node == de_waypoint :
                 found=True
                 break
         if not found :
             edge = Edge()
             edge.node = de_waypoint
             edge.action = action
             edge.top_vel = 0.55
             available[0][0].edges.append(edge)
             msg_store.update(available[0][0], query_meta, query, upsert=True)
         else :
             rospy.logerr("Edge already exist: Try deleting it first")
     else :
         rospy.logerr("Impossible to store in DB "+str(len(available))+" waypoints found after query")
         rospy.logerr("Available data: "+str(available))
Пример #4
0
    def update_node_name(self, node_name, new_name):
        msg_store = MessageStoreProxy(collection='topological_maps')
        # The query retrieves the node name with the given name from the given pointset.
        query = {"name": node_name, "pointset": self.name}
        # The meta-information is some additional information about the specific
        # map that we are interested in (?)
        query_meta = {}
        query_meta["pointset"] = self.name
        query_meta["map"] = self.map
        # This returns a tuple containing the object, if it exists, and some
        # information about how it's stored in the database.
        available = msg_store.query(TopologicalNode._type, query, query_meta)

        if len(available) == 1:
            available[0][0].name = new_name
            # Also need to update all edges which involve the renamed node
            allnodes_query = {"pointset": self.name}
            allnodes_query_meta = {}
            allnodes_query_meta["pointset"] = self.name
            allnodes_query_meta["map"] = self.map
            # this produces a list of tuples, each with [0] as the node, [1] as database info
            allnodes_available = msg_store.query(TopologicalNode._type, {},
                                                 allnodes_query_meta)

            # Check the edges of each node for a reference to the node to be
            # renamed, and change the edge id if there is one. Enumerate the
            # values so that we can edit the objects in place to send them back
            # to the database.
            for node_ind, node_tpl in enumerate(allnodes_available):
                for edge_ind, edge in enumerate(node_tpl[0].edges):
                    # change names of the edges in other nodes, and update their values in the database
                    if node_tpl[
                            0].name != node_name and node_name in edge.edge_id:
                        allnodes_available[node_ind][0].edges[
                            edge_ind].edge_id = edge.edge_id.replace(
                                node_name, new_name)
                        # must also update the name of the node this edge goes to
                        allnodes_available[node_ind][0].edges[
                            edge_ind].node = new_name
                        curnode_query = {
                            "name": node_tpl[0].name,
                            "pointset": self.name
                        }
                        msg_store.update(allnodes_available[node_ind][0],
                                         allnodes_query_meta,
                                         curnode_query,
                                         upsert=True)

            # update all edge ids for this node
            for edge_ind, edge in enumerate(available[0][0].edges):
                available[0][0].edges[edge_ind].edge_id = edge.edge_id.replace(
                    node_name, new_name)

            msg_store.update(available[0][0], query_meta, query, upsert=True)
        else:
            rospy.logerr("Impossible to store in DB " + str(len(available)) +
                         " waypoints found after query")
            rospy.logerr("Available data: " + str(available))
Пример #5
0
    def add_edge(self, or_waypoint, de_waypoint, action, edge_id):

        rospy.loginfo('Adding Edge from ' + or_waypoint + ' to ' +
                      de_waypoint + ' using ' + action)
        node_name = or_waypoint

        #nodeindx = self._get_node_index(edged[0])
        msg_store = MessageStoreProxy(collection='topological_maps')
        query = {"name": node_name, "pointset": self.nodes.name}
        query_meta = {}
        query_meta["pointset"] = self.nodes.name
        query_meta["map"] = self.nodes.map

        #print query, query_meta
        available = msg_store.query(
            strands_navigation_msgs.msg.TopologicalNode._type, query,
            query_meta)
        #print len(available)
        if len(available) == 1:
            eids = []
            for i in available[0][0].edges:
                eids.append(i.edge_id)

            if not edge_id or edge_id in eids:
                test = 0
                eid = '%s_%s' % (or_waypoint, de_waypoint)
                while eid in eids:
                    eid = '%s_%s_%03d' % (or_waypoint, de_waypoint, test)
                    test += 1
            else:
                eid = edge_id

            edge = strands_navigation_msgs.msg.Edge()
            edge.node = de_waypoint
            edge.action = action
            edge.top_vel = 0.55
            edge.edge_id = eid
            edge.map_2d = available[0][0].map

            available[0][0].edges.append(edge)

            #print available[0][0]
            msg_store.update(available[0][0], query_meta, query, upsert=True)
            return True
        else:
            rospy.logerr("Impossible to store in DB " + str(len(available)) +
                         " waypoints found after query")
            rospy.logerr("Available data: " + str(available))
            return False
 def update_node_waypoint(self, node_name, new_pose) :
     msg_store = MessageStoreProxy(collection='topological_maps')
     query = {"name" : node_name, "pointset": self.name}
     query_meta = {}
     query_meta["pointset"] = self.name
     query_meta["map"] = self.map
     available = msg_store.query(TopologicalNode._type, query, query_meta)
     if len(available) == 1 :
         positionZ=available[0][0].pose.position.z
         available[0][0].pose = new_pose
         available[0][0].pose.position.z = positionZ
         msg_store.update(available[0][0], query_meta, query, upsert=True)
     else :
         rospy.logerr("Impossible to store in DB "+str(len(available))+" waypoints found after query")
         rospy.logerr("Available data: "+str(available))
Пример #7
0
 def update_node_waypoint(self, node_name, new_pose):
     msg_store = MessageStoreProxy(collection='topological_maps')
     query = {"name": node_name, "pointset": self.name}
     query_meta = {}
     query_meta["pointset"] = self.name
     query_meta["map"] = self.map
     available = msg_store.query(TopologicalNode._type, query, query_meta)
     if len(available) == 1:
         positionZ = available[0][0].pose.position.z
         available[0][0].pose = new_pose
         available[0][0].pose.position.z = positionZ
         msg_store.update(available[0][0], query_meta, query, upsert=True)
     else:
         rospy.logerr("Impossible to store in DB " + str(len(available)) +
                      " waypoints found after query")
         rospy.logerr("Available data: " + str(available))
 def update_node_vertex(self, node_name, vertex_index, vertex_pose) :
     msg_store = MessageStoreProxy(collection='topological_maps')
     query = {"name" : node_name, "pointset": self.name}
     query_meta = {}
     query_meta["pointset"] = self.name
     query_meta["map"] = self.map
     available = msg_store.query(TopologicalNode._type, query, query_meta)
     if len(available) == 1 :
         #vertex_to_edit=available[0][0].verts[vertex_index]
         new_x_pos = -(available[0][0].pose.position.x - vertex_pose.position.x)
         new_y_pos = -(available[0][0].pose.position.y - vertex_pose.position.y)
         available[0][0].verts[vertex_index].x = new_x_pos
         available[0][0].verts[vertex_index].y = new_y_pos
         msg_store.update(available[0][0], query_meta, query, upsert=True)
     else :
         rospy.logerr("Impossible to store in DB "+str(len(available))+" waypoints found after query")
         rospy.logerr("Available data: "+str(available))
Пример #9
0
    def update_node_tolerance(self, name, new_xy, new_yaw):
        msg_store = MessageStoreProxy(collection='topological_maps')
        query = {"name": name, "pointset": self.name}
        query_meta = {}
        query_meta["pointset"] = self.name
        query_meta["map"] = self.nodes.map
        available = msg_store.query(TopologicalNode._type, query, query_meta)
        if len(available) == 1:
            available[0][0].xy_goal_tolerance = new_xy
            available[0][0].yaw_goal_tolerance = new_yaw

            msg_store.update(available[0][0], query_meta, query, upsert=True)
            return True, ""
        else:
            rospy.logerr("Impossible to store in DB "+str(len(available))+" waypoints found after query")
            rospy.logerr("Available data: "+str(available))
            return False, ""
Пример #10
0
    def add_edge(self, or_waypoint, de_waypoint, action, edge_id) :

        rospy.loginfo('Adding Edge from '+or_waypoint+' to '+de_waypoint+' using '+action)
        node_name = or_waypoint

        #nodeindx = self._get_node_index(edged[0])
        msg_store = MessageStoreProxy(collection='topological_maps')
        query = {"name" : node_name, "pointset": self.nodes.name}
        query_meta = {}
        query_meta["pointset"] = self.nodes.name
        query_meta["map"] = self.nodes.map

        #print query, query_meta
        available = msg_store.query(strands_navigation_msgs.msg.TopologicalNode._type, query, query_meta)
        #print len(available)
        if len(available) == 1 :
            eids = []
            for i in available[0][0].edges :
                eids.append(i.edge_id)

            if not edge_id or edge_id in eids:
                test=0
                eid = '%s_%s' %(or_waypoint, de_waypoint)
                while eid in eids:
                    eid = '%s_%s_%3d' %(or_waypoint, de_waypoint, test)
                    test += 1
            else:
                eid=edge_id

            edge = strands_navigation_msgs.msg.Edge()
            edge.node = de_waypoint
            edge.action = action
            edge.top_vel = 0.55
            edge.edge_id = eid
            edge.map_2d = available[0][0].map

            available[0][0].edges.append(edge)
                     
            #print available[0][0]
            msg_store.update(available[0][0], query_meta, query, upsert=True)
            return True
        else :
            rospy.logerr("Impossible to store in DB "+str(len(available))+" waypoints found after query")
            rospy.logerr("Available data: "+str(available))
            return False
Пример #11
0
def rename_node(name, new_name, top_map_name):
    """
    Renames a topological map node. All edges will be updated to reflect the change.

    @param name: the current name of the node to be changec
    @param new_name: the new name to give the node
    @param top_map_name: the name of the topological map to work with (pointset)

    @return (number of nodes changed, number of edges changed)
    """

    maps = queries.get_maps()
    if top_map_name not in maps:
        raise Exception("Unknown topological map.")

    msg_store = MessageStoreProxy(collection='topological_maps')

    nodes = msg_store.query(TopologicalNode._type, {},
                            {'pointset': top_map_name})
    node_names = [node.name for node, meta in nodes]
    node_renames = 0
    edge_changes = 0
    node_changes = 0

    if name not in node_names:
        raise Exception("No such node.")
    if new_name in node_names:
        raise Exception("New node name already in use.")

    old_metas = []
    for node, meta in nodes:
        old_metas.append(copy.deepcopy(meta))
        if meta["node"] == name:
            meta["node"] = new_name
        if node.name == name:
            node.name = new_name
            node_renames += 1
            if node_renames > 1:
                raise Exception("More than one node has the same name!")
        for edge in node.edges:
            if edge.node == name:
                edge.node = new_name
            if edge.edge_id.find(name) > -1:
                edge.edge_id = edge.edge_id.replace(name, new_name)
                edge_changes += 1

    # Save the changed nodes
    for ((node, meta), old_meta) in zip(nodes, old_metas):
        changed = msg_store.update(node, meta, {
            'name': old_meta['node'],
            'pointset': meta['pointset']
        })
        if changed.success:
            node_changes += 1

    return (node_changes, edge_changes)
Пример #12
0
 def update_node_vertex(self, node_name, vertex_index, vertex_pose):
     msg_store = MessageStoreProxy(collection='topological_maps')
     query = {"name": node_name, "pointset": self.name}
     query_meta = {}
     query_meta["pointset"] = self.name
     query_meta["map"] = self.map
     available = msg_store.query(TopologicalNode._type, query, query_meta)
     if len(available) == 1:
         #vertex_to_edit=available[0][0].verts[vertex_index]
         new_x_pos = -(available[0][0].pose.position.x -
                       vertex_pose.position.x)
         new_y_pos = -(available[0][0].pose.position.y -
                       vertex_pose.position.y)
         available[0][0].verts[vertex_index].x = new_x_pos
         available[0][0].verts[vertex_index].y = new_y_pos
         msg_store.update(available[0][0], query_meta, query, upsert=True)
     else:
         rospy.logerr("Impossible to store in DB " + str(len(available)) +
                      " waypoints found after query")
         rospy.logerr("Available data: " + str(available))
 def remove_edge(self, edge_name) :
     #print 'removing edge: '+edge_name
     rospy.loginfo('Removing Edge: '+edge_name)
     msg_store = MessageStoreProxy(collection='topological_maps')
     query = {"edges.edge_id" : edge_name, "pointset": self.nodes.name}
     query_meta = {}
     query_meta["pointset"] = self.nodes.name
     query_meta["map"] = self.nodes.map
     available = msg_store.query(TopologicalNode._type, query, query_meta)
     
     if len(available) >= 1 :
         for i in available :
             print i[0]
             for j in i[0].edges:
                 if j.edge_id == edge_name :
                     i[0].edges.remove(j)
                     msg_store.update(i[0], query_meta, query, upsert=True)
     else :
         rospy.logerr("Impossible to store in DB "+str(len(available))+" waypoints found after query")
         rospy.logerr("Available data: "+str(available)) 
def rename_node(name, new_name, top_map_name):
    """
    Renames a topological map node. All edges will be updated to reflect the change.

    @param name: the current name of the node to be changec
    @param new_name: the new name to give the node
    @param top_map_name: the name of the topological map to work with (pointset)

    @return (number of nodes changed, number of edges changed)
    """

    maps = queries.get_maps()
    if top_map_name not in maps:
        raise Exception("Unknown topological map.")

    msg_store = MessageStoreProxy(collection='topological_maps')
    
    nodes = msg_store.query(TopologicalNode._type, {}, {'pointset':top_map_name})
    node_names = [node.name for node,meta in nodes]
    node_renames = 0
    edge_changes = 0
    node_changes = 0

    if name not in node_names:
        raise Exception("No such node.")
    if new_name in node_names:
        raise Exception("New node name already in use.")

    old_metas = []
    for node, meta in nodes:
        old_metas.append(copy.deepcopy(meta))
        if meta["node"] == name:
            meta["node"] = new_name
        if node.name == name:
            node.name = new_name
            node_renames += 1
            if node_renames > 1:
                raise Exception("More than one node has the same name!")
        for edge in node.edges:
            if edge.node == name:
                edge.node = new_name
            if edge.edge_id.find(name) > -1:
                edge.edge_id = edge.edge_id.replace(name, new_name)
                edge_changes += 1
            
    # Save the changed nodes
    for ((node, meta), old_meta) in zip(nodes, old_metas):
        changed = msg_store.update(node, meta, {'name':old_meta['node'],
                                                'pointset':meta['pointset']})
        if changed.success:
            node_changes += 1

    return (node_changes, edge_changes)
Пример #15
0
    def remove_edge(self, edge_name):
        #print 'removing edge: '+edge_name
        rospy.loginfo('Removing Edge: ' + edge_name)
        msg_store = MessageStoreProxy(collection='topological_maps')
        query = {"edges.edge_id": edge_name, "pointset": self.nodes.name}
        query_meta = {}
        query_meta["pointset"] = self.nodes.name
        query_meta["map"] = self.nodes.map
        available = msg_store.query(TopologicalNode._type, query, query_meta)

        if len(available) >= 1:
            for i in available:
                print i[0]
                for j in i[0].edges:
                    if j.edge_id == edge_name:
                        i[0].edges.remove(j)
                        msg_store.update(i[0], query_meta, query, upsert=True)
        else:
            rospy.logerr("Impossible to store in DB " + str(len(available)) +
                         " waypoints found after query")
            rospy.logerr("Available data: " + str(available))
 def remove_edge(self, edge_name) :
     #print 'removing edge: '+edge_name
     rospy.loginfo('Removing Edge: '+edge_name)
     edged = edge_name.split('_')
     #print edged
     node_name = edged[0]
     #nodeindx = self._get_node_index(edged[0])
     msg_store = MessageStoreProxy(collection='topological_maps')
     query = {"name" : node_name, "pointset": self.name}
     query_meta = {}
     query_meta["pointset"] = self.name
     query_meta["map"] = self.map
     available = msg_store.query(TopologicalNode._type, query, query_meta)
     if len(available) == 1 :
         for i in available[0][0].edges :
             #print i.node
             if i.node == edged[1] :
                 available[0][0].edges.remove(i)
         msg_store.update(available[0][0], query_meta, query, upsert=True)
     else :
         rospy.logerr("Impossible to store in DB "+str(len(available))+" waypoints found after query")
         rospy.logerr("Available data: "+str(available))
Пример #17
0
 def remove_edge(self, edge_name):
     #print 'removing edge: '+edge_name
     rospy.loginfo('Removing Edge: ' + edge_name)
     edged = edge_name.split('_')
     #print edged
     node_name = edged[0]
     #nodeindx = self._get_node_index(edged[0])
     msg_store = MessageStoreProxy(collection='topological_maps')
     query = {"name": node_name, "pointset": self.name}
     query_meta = {}
     query_meta["pointset"] = self.name
     query_meta["map"] = self.map
     available = msg_store.query(TopologicalNode._type, query, query_meta)
     if len(available) == 1:
         for i in available[0][0].edges:
             #print i.node
             if i.node == edged[1]:
                 available[0][0].edges.remove(i)
         msg_store.update(available[0][0], query_meta, query, upsert=True)
     else:
         rospy.logerr("Impossible to store in DB " + str(len(available)) +
                      " waypoints found after query")
         rospy.logerr("Available data: " + str(available))
Пример #18
0
class SkeletonManager(object):
    """To deal with Skeleton messages once they are published as incremental msgs by OpenNI2."""
    def __init__(self):

        self.record_rgb = rospy.get_param("~rec_rgb", True)
        print "recording RGB images: %s" % self.record_rgb  # to deal with the anonymous setting at tsc

        self.accumulate_data = {}  # accumulates multiple skeleton msg
        self.accumulate_robot = {}  # accumulates multiple skeleton msg
        self.sk_mapping = {}  # does something in for the image logging

        self.soma_map = rospy.get_param("~soma_map",
                                        "collect_data_map_cleaned")
        # self.soma_config = rospy.get_param("~soma_config", "test")

        self.map_info = "don't know"  # topological map name
        self.current_node = "don't care"  # topological node waypoint
        self.robot_pose = Pose()  # pose of the robot
        self.ptu_pan = self.ptu_tilt = 0.0

        self.reduce_frame_rate_by = rospy.get_param("~frame_rate_reduce",
                                                    8)  # roughly: 3-4Hz
        self.max_num_frames = rospy.get_param("~max_frames",
                                              500)  # roughly 2mins
        self.soma_roi_store = MessageStoreProxy(database='somadata',
                                                collection='roi')

        # directory to store the data
        self.date = str(datetime.datetime.now().date())

        self.dir1 = '/home/' + getpass.getuser(
        ) + '/SkeletonDataset/no_consent/' + self.date + '/'
        if not os.path.exists(self.dir1):
            print '  -create folder:', self.dir1
            os.makedirs(self.dir1)

        # flags to make sure we received every thing
        self._flag_robot = 0
        self._flag_node = 0
        self._flag_rgb = 0
        #self._flag_rgb_sk = 0
        self._flag_depth = 0

        self.fx = 525.0
        self.fy = 525.0
        self.cx = 319.5
        self.cy = 239.5
        # depth threshold on recordings
        self.dist_thresh = rospy.get_param("~dist_thresh", 1.5)

        # open cv stuff
        self.cv_bridge = CvBridge()

        # Get rosparams
        self._with_logging = rospy.get_param("~log_to_mongo", "True")
        self._message_store = rospy.get_param("~message_store",
                                              "people_skeleton")
        self._database = rospy.get_param("~database", "message_store")
        self.camera = "head_xtion"

        self.restrict_to_rois = rospy.get_param("~use_roi", False)

        if self.restrict_to_rois:
            self.roi_config = rospy.get_param("~roi_config", "test")
            # SOMA services
            rospy.loginfo("Wait for soma roi service")
            rospy.wait_for_service('/soma/query_rois')
            self.soma_query = rospy.ServiceProxy('/soma/query_rois',
                                                 SOMAQueryROIs)
            rospy.loginfo("Done")

            self.get_soma_rois()
            print "restricted to soma ROI: %s. %s" % (self.restrict_to_rois,
                                                      self.roi_config)

        # listeners
        rospy.Subscriber("skeleton_data/incremental", skeleton_message,
                         self.incremental_callback)
        rospy.Subscriber('/' + self.camera + '/rgb/image_color',
                         sensor_msgs.msg.Image,
                         callback=self.rgb_callback,
                         queue_size=10)
        # rospy.Subscriber('/'+self.camera+'/rgb/sk_tracks', sensor_msgs.msg.Image, callback=self.rgb_sk_callback, queue_size=10)
        rospy.Subscriber('/' + self.camera + '/depth/image',
                         sensor_msgs.msg.Image,
                         self.depth_callback,
                         queue_size=10)
        rospy.Subscriber("/robot_pose",
                         Pose,
                         callback=self.robot_callback,
                         queue_size=10)
        rospy.Subscriber("/current_node",
                         String,
                         callback=self.node_callback,
                         queue_size=1)
        rospy.Subscriber("/ptu/state",
                         sensor_msgs.msg.JointState,
                         callback=self.ptu_callback,
                         queue_size=1)
        self.topo_listerner = rospy.Subscriber("/topological_map",
                                               TopologicalMap,
                                               self.map_callback,
                                               queue_size=10)
        rospy.Subscriber("skeleton_data/state", skeleton_tracker_state,
                         self.state_callback)

        # publishers:
        # self.publish_incr = rospy.Publisher('skeleton_data/incremental', skeleton_message, queue_size = 10)
        self.publish_comp = rospy.Publisher('skeleton_data/complete',
                                            SkeletonComplete,
                                            queue_size=10)
        self.rate = rospy.Rate(15.0)

        # mongo store
        if self._with_logging:
            rospy.loginfo("Connecting to mongodb...%s" % self._message_store)
            self._store_client = MessageStoreProxy(
                collection=self._message_store, database=self._database)
            self.learning_store_client = MessageStoreProxy(
                collection="activity_learning", database=self._database)

    def get_soma_rois(self):
        """Restrict the logging to certain soma regions only
           Log the ROI along with the detection - to be used in the learning
        """
        self.rois = {}
        # for (roi, meta) in self.soma_roi_store.query(SOMAROIObject._type):
        query = SOMAQueryROIsRequest(query_type=0,
                                     roiconfigs=[self.roi_config],
                                     returnmostrecent=True)
        for roi in self.soma_query(query).rois:
            if roi.map_name != self.soma_map: continue
            if roi.config != self.roi_config: continue
            #if roi.geotype != "Polygon": continue
            k = roi.type + "_" + roi.id
            self.rois[k] = Polygon([(p.position.x, p.position.y)
                                    for p in roi.posearray.poses])

    def convert_to_world_frame(self, pose, robot_msg):
        """Convert a single camera frame coordinate into a map frame coordinate"""
        fx = 525.0
        fy = 525.0
        cx = 319.5
        cy = 239.5

        y, z, x = pose.x, pose.y, pose.z

        xr = robot_msg.robot_pose.position.x
        yr = robot_msg.robot_pose.position.y
        zr = robot_msg.robot_pose.position.z
        ax = robot_msg.robot_pose.orientation.x
        ay = robot_msg.robot_pose.orientation.y
        az = robot_msg.robot_pose.orientation.z
        aw = robot_msg.robot_pose.orientation.w
        roll, pr, yawr = euler_from_quaternion([ax, ay, az, aw])

        yawr += robot_msg.PTU_pan
        pr += robot_msg.PTU_tilt

        # transformation from camera to map
        rot_y = np.matrix([[np.cos(pr), 0, np.sin(pr)], [0, 1, 0],
                           [-np.sin(pr), 0, np.cos(pr)]])
        rot_z = np.matrix([[np.cos(yawr), -np.sin(yawr), 0],
                           [np.sin(yawr), np.cos(yawr), 0], [0, 0, 1]])
        rot = rot_z * rot_y

        pos_r = np.matrix([[xr], [yr],
                           [zr + 1.66]])  # robot's position in map frame
        pos_p = np.matrix([[x], [-y],
                           [-z]])  # person's position in camera frame

        map_pos = rot * pos_p + pos_r  # person's position in map frame
        x_mf = map_pos[0, 0]
        y_mf = map_pos[1, 0]
        z_mf = map_pos[2, 0]

        # print ">>" , x_mf, y_mf, z_mf
        return Point(x_mf, y_mf, z_mf)

    def _publish_complete_data(self, subj, uuid, vis=False):
        """when user goes "out of scene" publish their accumulated data"""
        # print ">> publishing these: ", uuid, len(self.accumulate_data[uuid])

        st = self.accumulate_data[uuid][0].time
        en = self.accumulate_data[uuid][-1].time
        # print ">>", self.accumulate_data[uuid][0].joints[0].pose

        first_pose = self.accumulate_data[uuid][0].joints[0].pose.position
        robot_msg = self.accumulate_robot[uuid][0]
        first_map_point = self.convert_to_world_frame(first_pose, robot_msg)

        if self.restrict_to_rois:
            for key, polygon in self.rois.items():
                if polygon.contains(
                        Point([first_map_point.x, first_map_point.y])):
                    f1 = open(self.sk_mapping[uuid]['meta'] + '/meta.txt', 'w')
                    f1.write('person_roi: %s' % key)
                    f1.close()

        vis = False
        if vis:
            print ">>>"
            print "storing: ", uuid, type(uuid)
            print "date: ", self.date  #, type(self.date)
            print "number of detectons: ", len(
                self.accumulate_data[uuid]
            )  #, type(len(self.accumulate_data[uuid]))
            print "map name: %s. (x,y) Position: (%s,%s)" % (
                self.soma_map, first_map_point.x, first_map_point.y
            )  #, type(self.map_info)
            print "current node: ", self.current_node, type(self.current_node)
            print "start/end rostime:", st, en  #type(st), en, type(en)

        msg = SkeletonComplete( uuid = uuid, \
                                date = self.sk_mapping[uuid]['date'], \
                                time = self.sk_mapping[uuid]['time'], \
                                skeleton_data = self.accumulate_data[uuid], \
                                number_of_detections = len(self.accumulate_data[uuid]), \
                                map_name = self.soma_map, current_topo_node = self.current_node, \
                                start_time = st, end_time = en, robot_data = self.accumulate_robot[uuid], \
                                human_map_point = first_map_point)

        self.publish_comp.publish(msg)
        rospy.loginfo("User #%s: published %s msgs as %s" %
                      (subj, len(self.accumulate_data[uuid]), msg.uuid))

        if self._with_logging:
            query = {"uuid": msg.uuid}
            #self._store_client.insert(traj_msg, meta)
            self._store_client.update(message=msg,
                                      message_query=query,
                                      upsert=True)

            # add a blank activity learning message here:
            msg = HumanActivities(uuid=uuid, date=self.sk_mapping[uuid]['date'], time=self.sk_mapping[uuid]['time'], \
                                  map_point=first_map_point, cpm=False, start_time=self.accumulate_data[uuid][0].time, \
                                  end_time=self.accumulate_data[uuid][-1].time, qsrs=False, activity=False, topics=[], temporal=False)
            # print "here ", msg
            self.learning_store_client.insert(message=msg)

        # remove the user from the users dictionary and the accumulated data dict.
        del self.accumulate_data[uuid]
        del self.sk_mapping[uuid]

    def _dump_images(self, msg):
        """For each incremental skeleton - dump an rgb/depth image to file"""

        self.inc_sk = msg
        if str(datetime.datetime.now().date()) != self.date:
            print 'new day!'
            self.date = str(datetime.datetime.now().date())
            self.dir1 = '/home/' + getpass.getuser(
            ) + '/SkeletonDataset/no_consent/' + self.date + '/'
            print 'checking if folder exists:', self.dir1
            if not os.path.exists(self.dir1):
                print '  -create folder:', self.dir1
                os.makedirs(self.dir1)

        if self.sk_mapping[
                msg.uuid]["state"] is 'Tracking' and self.sk_mapping[
                    msg.uuid]["frame"] is 1:

            self.sk_mapping[msg.uuid]['time'] = str(
                datetime.datetime.now().time()).split('.')[0]
            t = self.sk_mapping[msg.uuid]['time'] + '_'
            print '  -new skeleton detected with id:', msg.uuid
            new_dir = self.dir1 + self.date + '_' + t + msg.uuid  #+'_'+waypoint
            self.sk_mapping[msg.uuid]['meta'] = new_dir

            if not os.path.exists(new_dir):
                os.makedirs(new_dir)
                os.makedirs(new_dir + '/depth')
                os.makedirs(new_dir + '/robot')
                os.makedirs(new_dir + '/skeleton')
                if self.record_rgb:
                    os.makedirs(new_dir + '/rgb')
                    #os.makedirs(new_dir+'/rgb_sk')

                # create the empty bag file (closed in /skeleton_action)
                # self.bag_file = rosbag.Bag(new_dir+'/detection.bag', 'w')

        t = self.sk_mapping[self.inc_sk.uuid]['time'] + '_'
        new_dir = self.dir1 + self.date + '_' + t + self.inc_sk.uuid  #+'_'+waypoint
        if os.path.exists(new_dir):
            # setup saving dir and frame
            d = new_dir + '/'
            f = self.sk_mapping[self.inc_sk.uuid]['frame']
            if f < 10: f_str = '0000' + str(f)
            elif f < 100: f_str = '000' + str(f)
            elif f < 1000: f_str = '00' + str(f)
            elif f < 10000: f_str = '0' + str(f)
            elif f < 100000: f_str = str(f)

            # save images
            if self.record_rgb:
                cv2.imwrite(d + 'rgb/rgb_' + f_str + '.jpg', self.rgb)
                #cv2.imwrite(d+'rgb_sk/sk_'+f_str+'.jpg', self.rgb_sk)
            cv2.imwrite(d + 'depth/depth_' + f_str + '.jpg',
                        self.xtion_img_d_rgb)

            # try:
            #     self.bag_file.write('rgb', self.rgb_msg)
            #     self.bag_file.write('depth', self.depth_msg)
            #     self.bag_file.write('rgb_sk', self.rgb_sk_msg)
            # except:
            #     rospy.logwarn("Can not write rgb, depth, and rgb_sk to a bag file.")

            x = float(self.robot_pose.position.x)
            y = float(self.robot_pose.position.y)
            z = float(self.robot_pose.position.z)
            xo = float(self.robot_pose.orientation.x)
            yo = float(self.robot_pose.orientation.y)
            zo = float(self.robot_pose.orientation.z)
            wo = float(self.robot_pose.orientation.w)
            p = Point(x, y, z)
            q = Quaternion(xo, yo, zo, wo)
            robot = Pose(p, q)

            pan = float(self.ptu_pan)
            tilt = float(self.ptu_tilt)

            # try:
            #     self.bag_file.write('robot', robot)
            # except:
            #    rospy.logwarn("cannot write the robot position to bag. Has the bag been closed already?")

            # save robot data  in text file
            f1 = open(d + 'robot/robot_' + f_str + '.txt', 'w')
            f1.write('position\n')
            f1.write('x:' + str(x) + '\n')
            f1.write('y:' + str(y) + '\n')
            f1.write('z:' + str(z) + '\n')
            f1.write('orientation\n')
            f1.write('x:' + str(xo) + '\n')
            f1.write('y:' + str(yo) + '\n')
            f1.write('z:' + str(zo) + '\n')
            f1.write('w:' + str(wo) + '\n')
            f1.write('ptu_state\n')
            f1.write('ptu_pan:' + str(pan) + '\n')
            f1.write('ptu_tilt:' + str(tilt) + '\n')
            f1.close()

            #save the SOMA roi to file: Now uses the person map frame roi
            # if self.restrict_to_rois:
            #     f1 = open(d+'/meta.txt','w')
            #     f1.write('robot_roi: %s' % self.roi)
            #     f1.close()

            # save skeleton data in bag file
            #x=float(self.robot_pose.position.x)
            #y=float(self.robot_pose.position.y)
            #z=float(self.robot_pose.position.z)
            #xo=float(self.robot_pose.orientation.x)
            #yo=float(self.robot_pose.orientation.y)
            #zo=float(self.robot_pose.orientation.z)
            #wo=float(self.robot_pose.orientation.w)
            #p = Point(x, y, z)
            #q = Quaternion(xo, yo, zo, wo)
            #skel = Pose(p,q)
            #bag.write('skeleton', skel)

            # save skeleton data in text file
            f1 = open(d + 'skeleton/skl_' + f_str + '.txt', 'w')
            f1.write('time:' + str(self.inc_sk.time.secs) + '.' +
                     str(self.inc_sk.time.nsecs) + '\n')

            #%todo: fix the 2d positions later - they're not used atm
            for i in self.inc_sk.joints:
                p = i.pose.position

                x2d = int(p.x * self.fx / p.z + self.cx)
                y2d = int(p.y * self.fy / p.z + self.cy)
                f1.write(i.name + "," + str(x2d) + "," + str(y2d) + "," +
                         str(p.x) + "," + str(p.y) + "," + str(p.z) + "," +
                         str(i.confidence) + '\n')
                # f1.write('position\n')
                # f1.write('x:'+str(i.pose.position.x)+'\n')
                # f1.write('y:'+str(i.pose.position.y)+'\n')
                # f1.write('z:'+str(i.pose.position.z)+'\n')
                # f1.write('orientation\n')
                # f1.write('x:'+str(i.pose.orientation.x)+'\n')
                # f1.write('y:'+str(i.pose.orientation.y)+'\n')
                # f1.write('z:'+str(i.pose.orientation.z)+'\n')
                # f1.write('w:'+str(i.pose.orientation.w)+'\n')
                # f1.write('confidence:'+str(i.confidence)+'\n')
            f1.close()

            # update frame number
            if self.inc_sk.uuid in self.sk_mapping:
                self.sk_mapping[self.inc_sk.uuid]['frame'] += 1

            #publish the gaze request of person on every detection:
            # if self.inc_sk.joints[0].name == 'head':
            #     head = Header(frame_id='head_xtion_depth_optical_frame')
            #     look_at_pose = PoseStamped(header = head, pose=self.inc_sk.joints[0].pose)
            #     self.publish_consent_pose.publish(look_at_pose)
            # #self.gazeClient.send_goal(self.gazegoal)

    def incremental_callback(self, msg):
        """accumulate the multiple skeleton messages until user goes out of scene"""

        if self._flag_robot and self._flag_rgb and self._flag_depth:

            if msg.uuid in self.sk_mapping:
                if self.sk_mapping[msg.uuid]["state"] is 'Tracking' and len(self.accumulate_data[msg.uuid]) < self.max_num_frames \
                and msg.joints[0].pose.position.z > self.dist_thresh:

                    self.sk_mapping[msg.uuid]["msgs_recieved"] += 1
                    if self.sk_mapping[msg.uuid][
                            "msgs_recieved"] % self.reduce_frame_rate_by == 0:
                        self.accumulate_data[msg.uuid].append(msg)
                        robot_msg = robot_message(robot_pose=self.robot_pose,
                                                  PTU_pan=self.ptu_pan,
                                                  PTU_tilt=self.ptu_tilt)
                        self.accumulate_robot[msg.uuid].append(robot_msg)
                        self._dump_images(msg)
                        # print msg.userID, msg.uuid, len(self.accumulate_data[msg.uuid])

    def new_user_detected(self, msg):
        date = str(datetime.datetime.now().date())
        self.sk_mapping[msg.uuid] = {
            "state": 'Tracking',
            "frame": 1,
            "msgs_recieved": 1,
            "date": date
        }
        self.accumulate_data[msg.uuid] = []
        self.accumulate_robot[msg.uuid] = []

    def state_callback(self, msg):
        """Reads the state messages from the openNi tracker"""
        # print msg.uuid, msg.userID, msg.message
        if msg.message == "Tracking":
            self.new_user_detected(msg)
        elif msg.message == "Out of Scene" and msg.uuid in self.sk_mapping:
            self.sk_mapping[msg.uuid]["state"] = "Out of Scene"
        elif msg.message == "Visible" and msg.uuid in self.sk_mapping:
            self.sk_mapping[msg.uuid]["state"] = "Tracking"
        elif msg.message == "Stopped tracking" and msg.uuid in self.accumulate_data:
            if len(self.accumulate_data[msg.uuid]) != 0:
                self._publish_complete_data(
                    msg.userID, msg.uuid)  #only publish if data captured

    def robot_callback(self, msg):
        self.robot_pose = msg
        if not self.restrict_to_rois:
            if self._flag_robot == 0: self._flag_robot = 1
        else:
            in_a_roi = 0
            for key, polygon in self.rois.items():
                if polygon.contains(Point([msg.position.x, msg.position.y])):
                    in_a_roi = 1
                    self.roi = key
                    self._flag_robot = 1
            if in_a_roi == 0:
                self._flag_robot = 0

            # print self._flag_robot
            # print "robot in ROI:", in_roi
            # print ' >robot not in roi'

    def ptu_callback(self, msg):
        self.ptu_pan, self.ptu_tilt = msg.position

    def node_callback(self, msg):
        self.current_node = msg.data
        if self._flag_node == 0:
            print ' >current node received'
            self._flag_node = 1

    def map_callback(self, msg):
        # get the topological map name
        self.map_info = msg.map
        self.topo_listerner.unregister()

    def rgb_callback(self, msg):
        # self.rgb_msg = msg
        rgb = self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
        self.rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        if self._flag_rgb is 0:
            print ' >rgb image received'
            self._flag_rgb = 1

    #def rgb_sk_callback(self, msg):
    #    # self.rgb_sk_msg = msg
    #    rgb_sk = self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
    #    self.rgb_sk = cv2.cvtColor(rgb_sk, cv2.COLOR_RGB2BGR)
    #    if self._flag_rgb_sk is 0:
    #        print ' >rgb skel image received'
    #        self._flag_rgb_sk = 1

    def depth_callback(self, msg):
        # self.depth_msg = msg
        depth_image = self.cv_bridge.imgmsg_to_cv2(
            msg, desired_encoding="passthrough")
        depth_array = np.array(depth_image, dtype=np.float32)
        ind = np.argwhere(np.isnan(depth_array))
        depth_array[ind[:, 0], ind[:, 1]] = 4.0
        depth_array *= 255 / 4.0
        self.xtion_img_d_rgb = depth_array.astype(np.uint8)

        if self._flag_depth is 0:
            print ' >depth image received'
            self._flag_depth = 1
Пример #19
0
class Learning_server(object):
    def __init__(self, name="learn_human_activities"):

        # Start server
        rospy.loginfo("%s action server" % name)
        self._action_name = name
        self._as = actionlib.SimpleActionServer(self._action_name,
                                                LearningActivitiesAction,
                                                execute_cb=self.execute,
                                                auto_start=False)
        self._as.start()

        self.recordings = "no_consent"
        # self.recordings = "ecai_Recorded_Data"
        self.path = '/home/' + getpass.getuser() + '/SkeletonDataset/'
        self.ol = Offline_ActivityLearning(self.path, self.recordings)
        self.batch_size = self.ol.config['events']['batch_size']
        self.learn_store = MessageStoreProxy(database='message_store',
                                             collection='activity_learning')
        self.msg_store = MessageStoreProxy(
            database='message_store', collection='activity_learning_stats')
        #self.cpm_flag = rospy.get_param("~use_cpm", False)

        self.last_learn_date = ""
        self.last_learn_success = bool()

    def cond(self):
        if self._as.is_preempt_requested() or (
                rospy.Time.now() - self.start).secs > self.duration.secs:
            return True
        return False

    def query_msg_store(self):
        """Queries the database and gets some data to learn on."""
        query = {
            "cpm": False,
            "qsrs": False,
            "activity": False,
            "temporal": False
        }
        if self.ol.config["events"]["use_cpm"] == True:
            query["cpm"] = True

        #query = {"cpm":True}
        result = self.learn_store.query(type=HumanActivities._type,
                                        message_query=query,
                                        limit=self.batch_size)
        uuids = []
        for (ret, meta) in result:
            uuids.append(ret)
        print "query = %s. Ret: %s:%s" % (query, len(result), len(uuids))
        return uuids

    def execute(self, goal):

        print "\nLearning Goal: %s seconds." % goal.duration.secs
        print "batch size max: %s" % self.ol.config['events']['batch_size']

        self.duration = goal.duration
        self.start = rospy.Time.now()
        self.end = rospy.Time.now()

        self.ol.get_soma_rois()  #get SOMA ROI Info
        self.ol.get_soma_objects()  #get SOMA Objects Info
        #self.get_last_learn_date()
        self.get_last_oLDA_msg()

        learnt_anything = False

        while not self.cond():
            uuids_to_process = self.query_msg_store()
            if len(uuids_to_process) == 0:
                print ">> no uuids remaining to process"
                break

            dates = set([r.date for r in uuids_to_process])

            #to get the folder names vs uuid
            uuids_dict = {}
            for date in dates:
                for file_name in sorted(os.listdir(
                        os.path.join(self.path, self.recordings, date)),
                                        reverse=False):
                    k = file_name.split("_")[-1]
                    uuids_dict[k] = file_name

            batch = []
            for ret in uuids_to_process:
                if self.cond(): break

                try:
                    file_name = uuids_dict[ret.uuid]
                except KeyError:
                    print "--no folder for: %s" % ret.uuid
                    continue
                if self.ol.get_events(
                        ret.date,
                        file_name):  #convert skeleton into world frame coords
                    self.ol.encode_qsrs_sequentially(
                        ret.date, file_name)  #encode the observation into QSRs
                    #ret.qsrs = True
                    #self.learn_store.update(message=ret, message_query={"uuid":ret.uuid}, upsert=True)
                    batch.append(file_name)
                    learn_date = ret.date
                self.end = rospy.Time.now()
            if self.cond(): break

            #restrict the learning to only use this batch of uuids
            if len(batch) == 0: continue  #break
            self.ol.batch = batch
            # print "batch uuids: ", batch
            print "learning date:", learn_date

            if self.cond(): break
            # self.ol.make_temp_histograms_online(learn_date, self.last_learn_date)  # create histograms with local code book
            new_olda_ret, ret = self.ol.make_histograms_online(
                learn_date,
                self.last_olda_ret)  # create histograms with local code book
            gamma_uuids = ret[0]
            feature_space = (ret[1], ret[2])

            # if self.cond(): break
            # gamma_uuids, feature_space = self.ol.make_term_doc_matrix(learn_date)  # create histograms with gloabl code book

            if self.cond(): break
            new_olda_ret, gamma = self.ol.online_lda_activities(
                learn_date, feature_space,
                new_olda_ret)  # run the new feature space into oLDA
            #self.update_last_learning(learn_date, True)

            self.update_learned_uuid_topics(uuids_to_process, gamma_uuids,
                                            gamma)
            self.last_olda_ret = new_olda_ret
            rospy.loginfo("completed learning loop: %s" % learn_date)
            learnt_anything = True
            self.end = rospy.Time.now()

        if self._as.is_preempt_requested():
            rospy.loginfo('%s: Preempted' % self._action_name)
            self._as.set_preempted(LearningActivitiesResult())

        elif (rospy.Time.now() - self.start).secs > self.duration.secs:
            rospy.loginfo('%s: Timed out' % self._action_name)
            self._as.set_preempted(LearningActivitiesResult())

        else:
            rospy.loginfo('%s: Completed' % self._action_name)
            self._as.set_succeeded(LearningActivitiesResult())

        if learnt_anything:
            #print type(self.last_olda_ret.code_book)
            self.ol.olda_msg_store.insert(message=self.last_olda_ret)
            print "oLDA output pushed to msg store"
        return

    def update_learned_uuid_topics(self, uuids_to_process, uuids, gamma):
        for ret in uuids_to_process:
            #print ret.uuid, ret.topics #uuids.index(ret.uuid)
            try:
                ind = uuids.index(ret.uuid)
                ret.topics = gamma[ind]
                ret.activity = True
            except ValueError:
                pass
                #print "--not learned on"
            ret.qsrs = True
            self.learn_store.update(message=ret,
                                    message_query={"uuid": ret.uuid},
                                    upsert=True)

    def get_uuids_to_process(self, folder):
        return [
            uuid for uuid in sorted(os.listdir(
                os.path.join(self.path, self.recordings, folder)),
                                    reverse=False)
        ]

    def get_dates_to_process(self):
        """ Find the sequence of date folders (on disc) which have not been processed into QSRs.
        ret: self.not_processed_dates - List of date folders to use
        """
        for (ret, meta) in self.msg_store.query(QSRProgress._type):
            if ret.type != "QSRProgress": continue
            if ret.last_date_used >= self.qsr_progress_date:
                self.qsr_progress_date = ret.last_date_used
                self.qsr_progress_uuid = ret.uuid

        print "qsr progress date: ", self.qsr_progress_date, self.qsr_progress_uuid
        self.not_processed_dates = []
        for date in sorted(os.listdir(os.path.join(self.path,
                                                   self.recordings)),
                           reverse=False):
            if date >= self.qsr_progress_date:
                self.not_processed_dates.append(date)
        print "dates to process:", self.not_processed_dates

    def update_qsr_progress(self, date, uuid):
        query = {"type": "QSRProgress"}
        date_ran = str(datetime.datetime.now().date())
        msg = QSRProgress(type="QSRProgress",
                          date_ran=date_ran,
                          last_date_used=date,
                          uuid=uuid)
        self.msg_store.update(message=msg, message_query=query, upsert=True)

    def update_last_learning(self, date, success):
        #%todo: add success rate?
        self.last_learn_date = date
        date_ran = str(datetime.datetime.now().date())
        msg = LastKnownLearningPoint(type="oLDA",
                                     date_ran=date_ran,
                                     last_date_used=self.last_learn_date,
                                     success=success)
        query = {"type": "oLDA", "date_ran": date_ran}
        self.msg_store.update(message=msg, message_query=query, upsert=True)

    def get_last_learn_date(self):
        """ Find the last time the learning was run - i.e. where the oLDA is to update
        """
        for (ret, meta) in self.msg_store.query(LastKnownLearningPoint._type):
            if ret.type != "oLDA": continue
            if ret.last_date_used > self.last_learn_date:
                self.last_learn_date = ret.last_date_used
                self.last_learn_success = ret.success
        print "Last learned date: ", self.last_learn_date

    def get_last_oLDA_msg(self):
        """ Find the last learning msg on mongo
        """
        self.last_olda_ret = oLDA()
        last_date, last_time = "", ""
        for (ret, meta) in self.ol.olda_msg_store.query(oLDA._type):
            if ret.date > last_date and ret.time > last_time:
                last_date = ret.date
                last_time = ret.time
                self.last_olda_ret = ret
        print "Last learned date: ", last_date
class SkeletonManager(object):
    def __init__(self):
        self.baseFrame = '/tracker_depth_frame'
        self.joints = [
            'head',
            'neck',
            'torso',
            'left_shoulder',
            'left_elbow',
            'left_hand',
            'left_hip',
            'left_knee',
            'left_foot',
            'right_shoulder',
            'right_elbow',
            'right_hand',
            'right_hip',
            'right_knee',
            'right_foot',
        ]

        self.data = {}  #c urrent tf frame data for 15 joints
        self.accumulate_data = {}  # accumulates multiple tf frames
        self.users = {}  # keeps the tracker state message, timepoint and UUID
        self.map_info = "don't know"  # topological map name
        self.current_node = "don't care"  # topological node waypoint
        self.robot_pose = Pose()  # pose of the robot

        # logging to mongo:
        self._with_logging = rospy.get_param("~log_skeleton", "false")
        self._message_store = rospy.get_param("~message_store",
                                              "people_skeleton")

        # listeners:
        self.tf_listener = tf.TransformListener()
        #self.uuid_listener = rospy.Subscriber("/people", user_ID, self.uuid_callback)
        rospy.Subscriber("skeleton_data/state", skeleton_tracker_state,
                         self.tracker_state_callback)
        rospy.Subscriber("/current_node",
                         String,
                         callback=self.node_callback,
                         queue_size=10)
        rospy.Subscriber("/robot_pose",
                         Pose,
                         callback=self.robot_callback,
                         queue_size=10)
        self.topo_listerner = rospy.Subscriber("/topological_map",
                                               TopologicalMap,
                                               self.map_callback,
                                               queue_size=10)

        # publishers:
        self.publish_incr = rospy.Publisher('skeleton_data/incremental',
                                            skeleton_message,
                                            queue_size=10)
        self.publish_comp = rospy.Publisher('skeleton_data/complete',
                                            skeleton_complete,
                                            queue_size=10)
        self.rate = rospy.Rate(15.0)

        # initialise data to recieve tf data
        self._initialise_data()

        # initialise mongodb client
        if self._with_logging:
            rospy.loginfo("Connecting to mongodb...%s" % self._message_store)
            self._store_client = MessageStoreProxy(
                collection=self._message_store)

    def _initialise_data(self):
        #to cope with upto 10 people in the scene
        for subj in xrange(1, 9):
            self.data[subj] = {}
            self.data[subj]['flag'] = 0
            self.users[subj] = {"message": "No message"}

            for i in self.joints:
                self.data[subj][i] = dict()
                #self.data[subj][i]['value'] = [0,0,0]
                #self.data[subj][i]['value'] = [0,0,0]
                self.data[subj][i]['t_old'] = 0

    def _get_tf_data(self):
        while not rospy.is_shutdown():
            for subj in xrange(1, 9):
                joints_found = True
                for i in self.joints:
                    if self.tf_listener.frameExists(
                            self.baseFrame) and joints_found is True:
                        try:
                            tp = self.tf_listener.getLatestCommonTime(
                                self.baseFrame,
                                "tracker/user_%d/%s" % (subj, i))
                            if tp != self.data[subj][i]['t_old']:
                                self.data[subj][i]['t_old'] = tp
                                self.data[subj]['flag'] = 1
                                (self.data[subj][i]['value'], self.data[subj][i]['rot']) = \
                                    self.tf_listener.lookupTransform(self.baseFrame, "tracker/user_%d/%s" % (subj, i), rospy.Time(0))

                        except (tf.Exception, tf.LookupException,
                                tf.ConnectivityException,
                                tf.ExtrapolationException):
                            joints_found = False
                            self.data[subj][
                                'flag'] = 0  #don't publish part of this Users skeleton
                            continue

                #If the tracker_state is 'Out of Scene' publish the accumulated skeleton
                if self.users[subj][
                        "message"] == "Out of Scene" and subj in self.accumulate_data:
                    self._publish_complete_data(subj)
                    self.data[subj]['flag'] = 0

                #print "h ", self.data[subj]['flag'], self.users[subj]["message"]

            #For all subjects, publish the incremental skeleton and accumulate into self.data also.
            list_of_subs = [
                subj for subj in self.data if self.data[subj]['flag'] == 1
            ]
            #print ">>>", list_of_subs
            for subj in list_of_subs:
                if self.users[subj]["message"] != "New":
                    continue  # this catches cases where a User leaves the scene but they still have /tf data

                #print ">", subj
                incr_msg = skeleton_message()
                incr_msg.userID = subj
                for i in self.joints:
                    joint = joint_message()
                    joint.name = i
                    joint.time = self.data[subj][i]['t_old']

                    position = Point(x = self.data[subj][i]['value'][0], \
                               y = self.data[subj][i]['value'][1], z = self.data[subj][i]['value'][2])
                    rot = Quaternion(x=self.data[subj][i]['rot'][0],
                                     y=self.data[subj][i]['rot'][1],
                                     z=self.data[subj][i]['rot'][2],
                                     w=self.data[subj][i]['rot'][3])

                    joint.pose.position = position
                    joint.pose.orientation = rot
                    incr_msg.joints.append(joint)
                self.data[subj]['flag'] = 0

                #publish the instant frame message on /incremental topic
                self.publish_incr.publish(incr_msg)

                #accumulate the messages
                if self.users[subj]["message"] == "New":
                    self._accumulate_data(subj, incr_msg)
                elif self.users[subj]["message"] == "No message":
                    print "Just published this user. They are not back yet, get over it."
                else:
                    raise RuntimeError(
                        "this should never have occured; why is message not `New` or `Out of Scene' ??? "
                    )

            self.rate.sleep()

    def _accumulate_data(self, subj, current_msg):
        # accumulate the multiple skeleton messages until user goes out of scene
        if current_msg.userID in self.accumulate_data:
            self.accumulate_data[current_msg.userID].append(current_msg)
        else:
            self.accumulate_data[current_msg.userID] = [current_msg]

    def _publish_complete_data(self, subj):
        # when user goes "out of scene" publish their accumulated data
        st = self.accumulate_data[subj][0].joints[0].time
        en = self.accumulate_data[subj][-1].joints[-1].time

        msg = skeleton_complete(uuid = self.users[subj]["uuid"], \
                                skeleton_data = self.accumulate_data[subj], \
                                number_of_detections = len(self.accumulate_data[subj]), \
                                map_name = self.map_info, current_topo_node = self.current_node, \
                                start_time = st, end_time = en, robot_pose = self.robot_pose )

        self.publish_comp.publish(msg)
        rospy.loginfo("User #%s: published %s msgs as %s" %
                      (subj, len(self.accumulate_data[subj]), msg.uuid))

        # remove the user from the users dictionary and the accumulated data dict.
        self.users[subj]["message"] = "No message"
        del self.accumulate_data[subj]
        del self.users[subj]["uuid"]

        if self._with_logging:
            query = {"uuid": msg.uuid}
            #self._store_client.insert(traj_msg, meta)
            self._store_client.update(message=msg,
                                      message_query=query,
                                      upsert=True)

    def tracker_state_callback(self, msg):
        # get the tracker state message and UUID of tracker user
        self.users[msg.userID]["uuid"] = msg.uuid
        self.users[msg.userID]["message"] = msg.message
        self.users[msg.userID]["timepoint"] = msg.timepoint

    def robot_callback(self, msg):
        self.robot_pose = msg

    def node_callback(self, msg):
        self.current_node = msg.data

    def map_callback(self, msg):
        # get the topological map name
        self.map_info = msg.map
        self.topo_listerner.unregister()

    def publish_skeleton(self):
        self._get_tf_data()
class SkeletonManager(object):
    """To deal with Skeleton messages once they are published as incremental msgs by OpenNI2."""

    def __init__(self, record_rgb=False):

        self.record_rgb = record_rgb  # to deal with the anonymous setting at tsc
        self.accumulate_data = {}  # accumulates multiple skeleton msg
        self.accumulate_robot = {}  # accumulates multiple skeleton msg
        self.sk_mapping = {}  # does something in for the image logging

        self.map_info = "don't know"  # topological map name
        self.current_node = "don't care"  # topological node waypoint
        self.robot_pose = Pose()  # pose of the robot
        self.ptu_pan = self.ptu_tilt = 0.0

        # directory to store the data
        self.date = str(datetime.datetime.now().date())

        self.dir1 = "/home/" + getpass.getuser() + "/SkeletonDataset/no_consent/" + self.date + "/"
        if not os.path.exists(self.dir1):
            print "  -create folder:", self.dir1
            os.makedirs(self.dir1)

        # flags to make sure we received every thing
        self._flag_robot = 0
        self._flag_node = 0
        self._flag_rgb = 0
        # self._flag_rgb_sk = 0
        self._flag_depth = 0

        # open cv stuff
        self.cv_bridge = CvBridge()

        # Get rosparams
        self._with_logging = rospy.get_param("~log_skeleton", "false")
        self._message_store = rospy.get_param("~message_store", "people_skeleton")
        self._database = rospy.get_param("~database", "message_store")
        self.camera = "head_xtion"

        # listeners
        rospy.Subscriber("skeleton_data/incremental", skeleton_message, self.incremental_callback)
        rospy.Subscriber(
            "/" + self.camera + "/rgb/image_color", sensor_msgs.msg.Image, callback=self.rgb_callback, queue_size=10
        )
        # rospy.Subscriber('/'+self.camera+'/rgb/sk_tracks', sensor_msgs.msg.Image, callback=self.rgb_sk_callback, queue_size=10)
        rospy.Subscriber("/" + self.camera + "/depth/image", sensor_msgs.msg.Image, self.depth_callback, queue_size=10)
        rospy.Subscriber("/robot_pose", Pose, callback=self.robot_callback, queue_size=10)
        rospy.Subscriber("/current_node", String, callback=self.node_callback, queue_size=1)
        rospy.Subscriber("/ptu/state", sensor_msgs.msg.JointState, callback=self.ptu_callback, queue_size=1)
        self.topo_listerner = rospy.Subscriber("/topological_map", TopologicalMap, self.map_callback, queue_size=10)
        rospy.Subscriber("skeleton_data/state", skeleton_tracker_state, self.state_callback)

        # publishers:
        # self.publish_incr = rospy.Publisher('skeleton_data/incremental', skeleton_message, queue_size = 10)
        self.publish_comp = rospy.Publisher("skeleton_data/complete", SkeletonComplete, queue_size=10)
        self.rate = rospy.Rate(15.0)

        # only publish the skeleton data when the person is far enough away (distance threshold)
        # maximum number of frames for one detection
        self.max_num_frames = 3000
        self.dist_thresh = 0
        self.dist_flag = 1

        # mongo store
        if self._with_logging:
            rospy.loginfo("Connecting to mongodb...%s" % self._message_store)
            self._store_client = MessageStoreProxy(collection=self._message_store, database=self._database)

    def convert_to_world_frame(self, pose, robot_msg):
        """Convert a single camera frame coordinate into a map frame coordinate"""
        fx = 525.0
        fy = 525.0
        cx = 319.5
        cy = 239.5

        y, z, x = pose.x, pose.y, pose.z

        xr = robot_msg.robot_pose.position.x
        yr = robot_msg.robot_pose.position.y
        zr = robot_msg.robot_pose.position.z
        ax = robot_msg.robot_pose.orientation.x
        ay = robot_msg.robot_pose.orientation.y
        az = robot_msg.robot_pose.orientation.z
        aw = robot_msg.robot_pose.orientation.w
        roll, pr, yawr = euler_from_quaternion([ax, ay, az, aw])

        yawr += robot_msg.PTU_pan
        pr += robot_msg.PTU_tilt

        # transformation from camera to map
        rot_y = np.matrix([[np.cos(pr), 0, np.sin(pr)], [0, 1, 0], [-np.sin(pr), 0, np.cos(pr)]])
        rot_z = np.matrix([[np.cos(yawr), -np.sin(yawr), 0], [np.sin(yawr), np.cos(yawr), 0], [0, 0, 1]])
        rot = rot_z * rot_y

        pos_r = np.matrix([[xr], [yr], [zr + 1.66]])  # robot's position in map frame
        pos_p = np.matrix([[x], [-y], [-z]])  # person's position in camera frame

        map_pos = rot * pos_p + pos_r  # person's position in map frame
        x_mf = map_pos[0, 0]
        y_mf = map_pos[1, 0]
        z_mf = map_pos[2, 0]

        # print ">>" , x_mf, y_mf, z_mf
        return Point(x_mf, y_mf, z_mf)

    def _publish_complete_data(self, subj, uuid, vis=False):
        """when user goes "out of scene" publish their accumulated data"""
        # print ">> publishing these: ", uuid, len(self.accumulate_data[uuid])

        st = self.accumulate_data[uuid][0].time
        en = self.accumulate_data[uuid][-1].time
        # print ">>", self.accumulate_data[uuid][0].joints[0].pose

        first_pose = self.accumulate_data[uuid][0].joints[0].pose.position
        robot_msg = self.accumulate_robot[uuid][0]
        first_map_point = self.convert_to_world_frame(first_pose, robot_msg)

        vis = True
        if vis:
            print ">>>"
            print "storing: ", uuid, type(uuid)
            print "date: ", self.date  # , type(self.date)
            print "number of detectons: ", len(self.accumulate_data[uuid])  # , type(len(self.accumulate_data[uuid]))
            print "map info: %s. (x,y) Position: (%s,%s)" % (
                self.map_info,
                first_map_point.x,
                first_map_point.y,
            )  # , type(self.map_info)
            print "current node: ", self.current_node, type(self.current_node)
            print "start/end rostime:", st, en  # type(st), en, type(en)

        msg = SkeletonComplete(
            uuid=uuid,
            date=self.date,
            time=self.sk_mapping[uuid]["time"],
            skeleton_data=self.accumulate_data[uuid],
            number_of_detections=len(self.accumulate_data[uuid]),
            map_name=self.map_info,
            current_topo_node=self.current_node,
            start_time=st,
            end_time=en,
            robot_data=self.accumulate_robot[uuid],
            human_map_point=first_map_point,
        )

        self.publish_comp.publish(msg)
        rospy.loginfo("User #%s: published %s msgs as %s" % (subj, len(self.accumulate_data[uuid]), msg.uuid))

        if self._with_logging:
            query = {"uuid": msg.uuid}
            # self._store_client.insert(traj_msg, meta)
            self._store_client.update(message=msg, message_query=query, upsert=True)

        # remove the user from the users dictionary and the accumulated data dict.
        del self.accumulate_data[uuid]
        del self.sk_mapping[uuid]

    def _dump_images(self, msg):
        """For each incremental skeleton - dump an rgb/depth image to file"""

        self.inc_sk = msg
        if str(datetime.datetime.now().date()) != self.date:
            print "new day!"
            self.date = str(datetime.datetime.now().date())
            self.dir1 = "/home/" + getpass.getuser() + "/SkeletonDataset/no_consent/" + self.date + "/"
            print "checking if folder exists:", self.dir1
            if not os.path.exists(self.dir1):
                print "  -create folder:", self.dir1
                os.makedirs(self.dir1)

        if self.sk_mapping[msg.uuid]["state"] is "Tracking" and self.sk_mapping[msg.uuid]["frame"] is 1:

            self.sk_mapping[msg.uuid]["time"] = str(datetime.datetime.now().time()).split(".")[0]
            t = self.sk_mapping[msg.uuid]["time"] + "_"
            print "  -new skeleton detected with id:", msg.uuid
            new_dir = self.dir1 + self.date + "_" + t + msg.uuid  # +'_'+waypoint
            # print "new", new_dir

            if not os.path.exists(new_dir):
                os.makedirs(new_dir)
                os.makedirs(new_dir + "/depth")
                os.makedirs(new_dir + "/robot")
                os.makedirs(new_dir + "/skeleton")
                if self.record_rgb:
                    os.makedirs(new_dir + "/rgb")
                    # os.makedirs(new_dir+'/rgb_sk')

                # create the empty bag file (closed in /skeleton_action)
                # self.bag_file = rosbag.Bag(new_dir+'/detection.bag', 'w')

        t = self.sk_mapping[self.inc_sk.uuid]["time"] + "_"
        new_dir = self.dir1 + self.date + "_" + t + self.inc_sk.uuid  # +'_'+waypoint
        if os.path.exists(new_dir):
            # setup saving dir and frame
            d = new_dir + "/"
            f = self.sk_mapping[self.inc_sk.uuid]["frame"]
            if f < 10:
                f_str = "0000" + str(f)
            elif f < 100:
                f_str = "000" + str(f)
            elif f < 1000:
                f_str = "00" + str(f)
            elif f < 10000:
                f_str = "0" + str(f)
            elif f < 100000:
                f_str = str(f)

            # save images
            if self.record_rgb:
                cv2.imwrite(d + "rgb/rgb_" + f_str + ".jpg", self.rgb)
                # cv2.imwrite(d+'rgb_sk/sk_'+f_str+'.jpg', self.rgb_sk)
            cv2.imwrite(d + "depth/depth_" + f_str + ".jpg", self.xtion_img_d_rgb)

            # try:
            #     self.bag_file.write('rgb', self.rgb_msg)
            #     self.bag_file.write('depth', self.depth_msg)
            #     self.bag_file.write('rgb_sk', self.rgb_sk_msg)
            # except:
            #     rospy.logwarn("Can not write rgb, depth, and rgb_sk to a bag file.")

            x = float(self.robot_pose.position.x)
            y = float(self.robot_pose.position.y)
            z = float(self.robot_pose.position.z)
            xo = float(self.robot_pose.orientation.x)
            yo = float(self.robot_pose.orientation.y)
            zo = float(self.robot_pose.orientation.z)
            wo = float(self.robot_pose.orientation.w)
            p = Point(x, y, z)
            q = Quaternion(xo, yo, zo, wo)
            robot = Pose(p, q)

            pan = float(self.ptu_pan)
            tilt = float(self.ptu_tilt)

            # try:
            #     self.bag_file.write('robot', robot)
            # except:
            #    rospy.logwarn("cannot write the robot position to bag. Has the bag been closed already?")

            # save robot data  in text file
            f1 = open(d + "robot/robot_" + f_str + ".txt", "w")
            f1.write("position\n")
            f1.write("x:" + str(x) + "\n")
            f1.write("y:" + str(y) + "\n")
            f1.write("z:" + str(z) + "\n")
            f1.write("orientation\n")
            f1.write("x:" + str(xo) + "\n")
            f1.write("y:" + str(yo) + "\n")
            f1.write("z:" + str(zo) + "\n")
            f1.write("w:" + str(wo) + "\n")
            f1.write("ptu_state\n")
            f1.write("ptu_pan:" + str(pan) + "\n")
            f1.write("ptu_tilt:" + str(tilt) + "\n")
            f1.close()

            # save skeleton data in bag file
            # x=float(self.robot_pose.position.x)
            # y=float(self.robot_pose.position.y)
            # z=float(self.robot_pose.position.z)
            # xo=float(self.robot_pose.orientation.x)
            # yo=float(self.robot_pose.orientation.y)
            # zo=float(self.robot_pose.orientation.z)
            # wo=float(self.robot_pose.orientation.w)
            # p = Point(x, y, z)
            # q = Quaternion(xo, yo, zo, wo)
            # skel = Pose(p,q)
            # bag.write('skeleton', skel)

            # save skeleton data in text file
            f1 = open(d + "skeleton/skl_" + f_str + ".txt", "w")
            f1.write("time:" + str(self.inc_sk.time.secs) + "." + str(self.inc_sk.time.nsecs) + "\n")
            for i in self.inc_sk.joints:
                f1.write(i.name + "\n")
                f1.write("position\n")
                f1.write("x:" + str(i.pose.position.x) + "\n")
                f1.write("y:" + str(i.pose.position.y) + "\n")
                f1.write("z:" + str(i.pose.position.z) + "\n")
                f1.write("orientation\n")
                f1.write("x:" + str(i.pose.orientation.x) + "\n")
                f1.write("y:" + str(i.pose.orientation.y) + "\n")
                f1.write("z:" + str(i.pose.orientation.z) + "\n")
                f1.write("w:" + str(i.pose.orientation.w) + "\n")
                f1.write("confidence:" + str(i.confidence) + "\n")
            f1.close()

            # update frame number
            if self.inc_sk.uuid in self.sk_mapping:
                self.sk_mapping[self.inc_sk.uuid]["frame"] += 1

            # publish the gaze request of person on every detection:
            # if self.inc_sk.joints[0].name == 'head':
            #     head = Header(frame_id='head_xtion_depth_optical_frame')
            #     look_at_pose = PoseStamped(header = head, pose=self.inc_sk.joints[0].pose)
            #     self.publish_consent_pose.publish(look_at_pose)
            # #self.gazeClient.send_goal(self.gazegoal)

    def incremental_callback(self, msg):
        """accumulate the multiple skeleton messages until user goes out of scene"""
        if self._flag_robot and self._flag_rgb and self._flag_depth:
            if msg.uuid in self.sk_mapping:
                if self.sk_mapping[msg.uuid]["state"] is "Tracking":
                    if len(self.accumulate_data[msg.uuid]) < self.max_num_frames:
                        self.accumulate_data[msg.uuid].append(msg)
                        robot_msg = robot_message(
                            robot_pose=self.robot_pose, PTU_pan=self.ptu_pan, PTU_tilt=self.ptu_tilt
                        )
                        self.accumulate_robot[msg.uuid].append(robot_msg)
                        self._dump_images(msg)
                        # print msg.userID, msg.uuid, len(self.accumulate_data[msg.uuid])

    def new_user_detected(self, msg):
        self.sk_mapping[msg.uuid] = {"state": "Tracking", "frame": 1}
        self.accumulate_data[msg.uuid] = []
        self.accumulate_robot[msg.uuid] = []

    def state_callback(self, msg):
        """Reads the state messages from the openNi tracker"""
        # print msg.uuid, msg.userID, msg.message
        if msg.message == "Tracking":
            self.new_user_detected(msg)
        elif msg.message == "Out of Scene" and msg.uuid in self.sk_mapping:
            self.sk_mapping[msg.uuid]["state"] = "Out of Scene"
        elif msg.message == "Visible" and msg.uuid in self.sk_mapping:
            self.sk_mapping[msg.uuid]["state"] = "Tracking"
        elif msg.message == "Stopped tracking" and msg.uuid in self.accumulate_data:
            if len(self.accumulate_data[msg.uuid]) != 0:
                self._publish_complete_data(msg.userID, msg.uuid)  # only publish if data captured

    def robot_callback(self, msg):
        self.robot_pose = msg
        if self._flag_robot == 0:
            print " >robot pose received"
            self._flag_robot = 1

    def ptu_callback(self, msg):
        self.ptu_pan, self.ptu_tilt = msg.position

    def node_callback(self, msg):
        self.current_node = msg.data
        if self._flag_node == 0:
            print " >current node received"
            self._flag_node = 1

    def map_callback(self, msg):
        # get the topological map name
        self.map_info = msg.map
        self.topo_listerner.unregister()

    def rgb_callback(self, msg):
        # self.rgb_msg = msg
        rgb = self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
        self.rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        if self._flag_rgb is 0:
            print " >rgb image received"
            self._flag_rgb = 1

    # def rgb_sk_callback(self, msg):
    #    # self.rgb_sk_msg = msg
    #    rgb_sk = self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
    #    self.rgb_sk = cv2.cvtColor(rgb_sk, cv2.COLOR_RGB2BGR)
    #    if self._flag_rgb_sk is 0:
    #        print ' >rgb skel image received'
    #        self._flag_rgb_sk = 1

    def depth_callback(self, msg):
        # self.depth_msg = msg
        depth_image = self.cv_bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
        depth_array = np.array(depth_image, dtype=np.float32)
        cv2.normalize(depth_array, depth_array, 0, 1, cv2.NORM_MINMAX)
        self.xtion_img_d_rgb = depth_array * 255
        if self._flag_depth is 0:
            print " >depth image received"
            self._flag_depth = 1
class SkeletonManager(object):
   
    def __init__(self):
        self.baseFrame = '/tracker_depth_frame'
        self.joints = [
                'head',
                'neck',
                'torso',
                'left_shoulder',
                'left_elbow',
                'left_hand',
                'left_hip',
                'left_knee',
                'left_foot',
                'right_shoulder',
                'right_elbow',
                'right_hand',
                'right_hip',
                'right_knee',
                'right_foot',
                ]

        self.data = {} #c urrent tf frame data for 15 joints 
        self.accumulate_data = {} # accumulates multiple tf frames
        self.users = {} # keeps the tracker state message, timepoint and UUID
        self.map_info = "don't know"  # topological map name
        self.current_node = "don't care"  # topological node waypoint
        self.robot_pose = Pose()   # pose of the robot
        
        # logging to mongo:
        self._with_logging = rospy.get_param("~log_skeleton", "false")
        self._message_store = rospy.get_param("~message_store", "people_skeleton")
        
        # listeners:
        self.tf_listener = tf.TransformListener()
        #self.uuid_listener = rospy.Subscriber("/people", user_ID, self.uuid_callback)       
        rospy.Subscriber("skeleton_data/state", skeleton_tracker_state, self.tracker_state_callback)
        rospy.Subscriber("/current_node", String, callback=self.node_callback, queue_size=10)
        rospy.Subscriber("/robot_pose", Pose, callback=self.robot_callback, queue_size=10)
        self.topo_listerner = rospy.Subscriber("/topological_map", TopologicalMap, self.map_callback, queue_size = 10)
        
        # publishers:
        self.publish_incr = rospy.Publisher('skeleton_data/incremental', skeleton_message, queue_size = 10)
        self.publish_comp = rospy.Publisher('skeleton_data/complete', skeleton_complete, queue_size = 10)
        self.rate = rospy.Rate(15.0)

        # initialise data to recieve tf data
        self._initialise_data()
        
        # initialise mongodb client
        if self._with_logging:
            rospy.loginfo("Connecting to mongodb...%s" % self._message_store)
            self._store_client = MessageStoreProxy(collection=self._message_store)

            
    def _initialise_data(self):
        #to cope with upto 10 people in the scene
        for subj in xrange(1,9):
            self.data[subj] = {}
            self.data[subj]['flag'] = 0
            self.users[subj] = {"message": "No message"}
            
            for i in self.joints:
                self.data[subj][i] = dict()
                #self.data[subj][i]['value'] = [0,0,0]
                #self.data[subj][i]['value'] = [0,0,0]
                self.data[subj][i]['t_old'] = 0


    def _get_tf_data(self):
        while not rospy.is_shutdown():
            for subj in xrange(1,9):
                joints_found = True
                for i in self.joints:
                    if self.tf_listener.frameExists(self.baseFrame) and joints_found is True:
                        try:
                            tp = self.tf_listener.getLatestCommonTime(self.baseFrame,  "tracker/user_%d/%s" % (subj, i))
                            if tp != self.data[subj][i]['t_old']:
                                self.data[subj][i]['t_old'] = tp
                                self.data[subj]['flag'] = 1
                                (self.data[subj][i]['value'], self.data[subj][i]['rot']) = \
                                    self.tf_listener.lookupTransform(self.baseFrame, "tracker/user_%d/%s" % (subj, i), rospy.Time(0))
                                                                        
                        except (tf.Exception, tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                            joints_found = False
                            self.data[subj]['flag'] = 0  #don't publish part of this Users skeleton
                            continue
                            
                #If the tracker_state is 'Out of Scene' publish the accumulated skeleton
                if self.users[subj]["message"] == "Out of Scene" and subj in self.accumulate_data:
                    self._publish_complete_data(subj)
                    self.data[subj]['flag'] = 0
                    
                #print "h ", self.data[subj]['flag'], self.users[subj]["message"]
                    
            
            #For all subjects, publish the incremental skeleton and accumulate into self.data also.
            list_of_subs = [subj for subj in self.data if self.data[subj]['flag'] == 1]
            #print ">>>", list_of_subs
            for subj in list_of_subs:
                if self.users[subj]["message"] != "New": 
                    continue  # this catches cases where a User leaves the scene but they still have /tf data
                
                #print ">", subj
                incr_msg = skeleton_message()
                incr_msg.userID = subj
                for i in self.joints:
                    joint = joint_message()
                    joint.name = i
                    joint.time = self.data[subj][i]['t_old']
                    
                    position = Point(x = self.data[subj][i]['value'][0], \
                               y = self.data[subj][i]['value'][1], z = self.data[subj][i]['value'][2]) 
                    rot = Quaternion(x = self.data[subj][i]['rot'][0], y = self.data[subj][i]['rot'][1],
                               z = self.data[subj][i]['rot'][2], w = self.data[subj][i]['rot'][3])

                    joint.pose.position = position
                    joint.pose.orientation = rot
                    incr_msg.joints.append(joint)
                self.data[subj]['flag'] = 0
                
                #publish the instant frame message on /incremental topic
                self.publish_incr.publish(incr_msg)
                
                #accumulate the messages
                if self.users[subj]["message"] == "New":
                    self._accumulate_data(subj, incr_msg)
                elif self.users[subj]["message"] == "No message":
                    print "Just published this user. They are not back yet, get over it."
                else:
                    raise RuntimeError("this should never have occured; why is message not `New` or `Out of Scene' ??? ")

            self.rate.sleep()


    def _accumulate_data(self, subj, current_msg):
        # accumulate the multiple skeleton messages until user goes out of scene
        if current_msg.userID in self.accumulate_data:
            self.accumulate_data[current_msg.userID].append(current_msg)
        else:
            self.accumulate_data[current_msg.userID] = [current_msg]


    def _publish_complete_data(self, subj):
        # when user goes "out of scene" publish their accumulated data
        st = self.accumulate_data[subj][0].joints[0].time
        en = self.accumulate_data[subj][-1].joints[-1].time
        
        msg = skeleton_complete(uuid = self.users[subj]["uuid"], \
                                skeleton_data = self.accumulate_data[subj], \
                                number_of_detections = len(self.accumulate_data[subj]), \
                                map_name = self.map_info, current_topo_node = self.current_node, \
                                start_time = st, end_time = en, robot_pose = self.robot_pose )

        self.publish_comp.publish(msg)
        rospy.loginfo("User #%s: published %s msgs as %s" % (subj, len(self.accumulate_data[subj]), msg.uuid))

        # remove the user from the users dictionary and the accumulated data dict.
        self.users[subj]["message"] = "No message"
        del self.accumulate_data[subj]
        del self.users[subj]["uuid"]
        
        if self._with_logging:
            query = {"uuid" : msg.uuid}
            #self._store_client.insert(traj_msg, meta)
            self._store_client.update(message=msg, message_query=query, upsert=True)


    def tracker_state_callback(self, msg):
        # get the tracker state message and UUID of tracker user
        self.users[msg.userID]["uuid"] = msg.uuid
        self.users[msg.userID]["message"] = msg.message
        self.users[msg.userID]["timepoint"] = msg.timepoint

    def robot_callback(self, msg):
        self.robot_pose = msg

    def node_callback(self, msg):
        self.current_node = msg.data

    def map_callback(self, msg):
        # get the topological map name
        self.map_info = msg.map
        self.topo_listerner.unregister()



    def publish_skeleton(self):
        self._get_tf_data()
Пример #23
0
#!/usr/bin/env python

import rospy
from mongodb_store.message_store import MessageStoreProxy
from ubd_scene_image_comparator.msg import UbdSceneImgLog

if __name__ == '__main__':
    rospy.init_node("ubd_scene_reset_annotator")
    rospy.loginfo("Resetting all annotated logs...")
    db = MessageStoreProxy(collection="ubd_scene_log")
    logs = db.query(UbdSceneImgLog._type, {"annotated": True}, limit=10)
    while not rospy.is_shutdown() and len(logs) > 0:
        rospy.loginfo("Resetting %d entries" % len(logs))
        for log in logs:
            log[0].annotated = False
            db.update(
                log[0],
                message_query={"header.stamp.secs": log[0].header.stamp.secs})
        logs = db.query(UbdSceneImgLog._type, {"annotated": True}, limit=10)
    rospy.loginfo("Done...")
    rospy.loginfo(
        "Please remove entries on ubd_scene_accuracy if you want to have fresh learnt accuracy"
    )
    rospy.spin()
class dataReader(object):

	def __init__(self):

		self.skeleton_data = {}  #keeps the published complete skeleton in a dictionary. key = uuid
		self.rate = rospy.Rate(15.0)

		## listeners:
		rospy.Subscriber("skeleton_data/complete", skeleton_complete, self.skeleton_callback)

		## Logging params:
		self._with_mongodb = rospy.get_param("~with_mongodb", "true")
		self._logging = rospy.get_param("~logging", "false")
		self._message_store = rospy.get_param("~message_store_prefix", "people_skeleton")

		if self._logging:
			msg_store = self._message_store + "_world_state"
			rospy.loginfo("Connecting to mongodb...%s" % msg_store)
			self._store_client_world = MessageStoreProxy(collection=msg_store)

			msg_store = self._message_store + "_qstag"
			self._store_client_qstag = MessageStoreProxy(collection=msg_store)


		## QSR INFO:
		self._which_qsr = rospy.get_param("~which_qsr", "rcc3")
		#self.which_qsr = ["qtcbs", "argd"]

		self.set_params()
		self.cln = QSRlib_ROS_Client()


	def set_params(self):
		quantisation_factor = 0.01
		validate = False
		no_collapse = True
		argd_params = {"Touch": 0.5, "Near": 2, "Far": 3}
		qstag_params = {"min_rows" : 1, "max_rows" : 1, "max_eps" : 5}
		qsrs_for = [("head", "right_hand"), ("head", "left_hand"), ("left_hand", "right_hand")]
		object_types = {"right_hand": "hand", "left_hand": "hand", "head":"head"}
		self.dynamic_args = {
						"qtcbs": {"quantisation_factor": quantisation_factor,
								  "validate": validate,
								  "no_collapse": no_collapse,
							      "qsrs_for": qsrs_for},

						"rcc3" : {"qsrs_for": qsrs_for},
						"argd": {"qsr_relations_and_values" : argd_params},
						"qstag": {"object_types" : object_types, "params" : qstag_params},
						"filters": {"median_filter" : {"window": 3}}
						}


	def _create_qsrs(self):
		while not rospy.is_shutdown():

			for uuid, msg_data in self.skeleton_data.items():
				if msg_data["flag"] != 1: continue
				print ">> recieving worlds for:", uuid
				qsrlib_response_message = self._call_qsrLib(uuid, msg_data)

				if self._logging:
					print msg_data.keys()
					self.upload_qsrs_to_mongo(uuid, qsrlib_response_message.qstag, msg_data)
				print ">>>", qsrlib_response_message.qstag.episodes
				print ">", qsrlib_response_message.qstag.graphlets.histogram
				#print ">>>", qsrlib_response_message.qstag.graph
				del self.skeleton_data[uuid]

			self.rate.sleep()



	def skeleton_callback(self, msg):
		self.skeleton_data[msg.uuid] = {
					"flag": 1,
					"detections" : msg.number_of_detections,
					"map" : msg.map_name,
					"current_node" : msg.current_topo_node,
					"start_time": msg.start_time,
					"end_time": msg.end_time,
					"world": self.convert_skeleton_to_world(msg.skeleton_data)}


	def convert_skeleton_to_world(self, data, use_every=1):
		world = World_Trace()

		joint_states = {'head' : [], 'neck' : [],  'torso' : [], 'left_shoulder' : [],'left_elbow' : [],
				'left_hand' : [],'left_hip' : [], 'left_knee' : [],'left_foot' : [],'right_shoulder' : [],
				'right_elbow' : [],'right_hand' : [],'right_hip' : [],'right_knee' : [],'right_foot' : []}

		for tp, msg in enumerate(data):
			for i in msg.joints:
				o = Object_State(name=i.name, timestamp=tp, x=i.pose.position.x,\
				                y = i.pose.position.y, z=i.pose.position.z, xsize=0.1, ysize=0.1, zsize=0.1)
				joint_states[i.name].append(o)

		for joint_data in joint_states.values():
			world.add_object_state_series(joint_data)
		return world


	def _call_qsrLib(self, uuid, msg_data):

		qsrlib_request_message = QSRlib_Request_Message(which_qsr=self._which_qsr, input_data=msg_data["world"], \
		                        dynamic_args=self.dynamic_args)
		req = self.cln.make_ros_request_message(qsrlib_request_message)
		if self._logging:
			print "logging the world trace"
			msg = QSRlibMongo(uuid = uuid, data = pickle.dumps(msg_data["world"]),
				start_time = msg_data["start_time"], end_time = msg_data["end_time"])
			query = {"uuid" : uuid}
			self._store_client_world.update(message=msg, message_query=query, upsert=True)

		res = self.cln.request_qsrs(req)
		qsrlib_response_message = pickle.loads(res.data)
		return qsrlib_response_message


	def upload_qsrs_to_mongo(self, uuid, qstag, msg_data):
		print "logging the QSTAG"
		eps = pickle.dumps(qstag.episodes)
		graph = pickle.dumps(qstag.graph)
		obs = [node['name'] for node in qstag.object_nodes]

		print ">>", qstag.graphlets.graphlets

		msg = QsrsToMongo(uuid = uuid, which_qsr = self._which_qsr,
			episodes=eps, igraph=graph, histogram=qstag.graphlets.histogram,
			code_book = qstag.graphlets.code_book, start_time= msg_data["start_time"],
			map_name = msg_data["map"], current_node = msg_data["current_node"],
			number_of_detections = msg_data["detections"], objects = obs,
			end_time = msg_data["end_time"]
			)

		query = {"uuid" : uuid}
		self._store_client_qstag.update(message=msg, message_query=query, upsert=True)
class TRAN(object):

    def __init__(self, name):
        self.stored_uuids = list()
        self._uncertain_uuids = list()
        self.have_stored_uuids = list()
        self.trajectories = OfflineTrajectories()
        self.upbods = MessageStoreProxy(collection='upper_bodies').query(
            LoggingUBD._type, sort_query=[("$natural", 1)]
        )
        self._store_client = MessageStoreProxy(collection="trajectory_types")
        self.bridge = CvBridge()
        self.window = None
        self._traj_type = -1

    # create window for the image to show, and the button to click
    def _create_window(self, img):
        # main window
        self.window = Tkinter.Tk()
        self.window.title("Trajectory Annotator")

        # image frame in the top part of the main window
        imgtk = ImageTk.PhotoImage(image=Image.fromarray(img))
        Tkinter.LabelFrame(self.window).pack()
        Tkinter.Label(self.window, image=imgtk).pack()

        # button frame in the bottom part of the main window
        buttons_frame = Tkinter.LabelFrame(self.window).pack(side=Tkinter.BOTTOM, expand="yes")
        # human
        human_button = Tkinter.Button(
            buttons_frame, text="Human", fg="black", command=self._human_button_cb
        )
        human_button.pack(side=Tkinter.LEFT)
        # non-human
        human_button = Tkinter.Button(
            buttons_frame, text="Non-Human", fg="black", command=self._nonhuman_button_cb
        )
        human_button.pack(side=Tkinter.RIGHT)

        # window main loop
        self.window.mainloop()

    # call back for human button
    def _human_button_cb(self):
        self.window.destroy()
        self._traj_type = 1

    # call back for non-human button
    def _nonhuman_button_cb(self):
        self.window.destroy()
        self._traj_type = 0

    # check in database whether the uuid exists
    def _check_mongo_for_uuid(self, uuids):
        for uuid in uuids:
            logs = self._store_client.query(
                TrajectoryType._type, message_query={"uuid": uuid}
            )
            if len(logs) and uuid not in self.have_stored_uuids:
                self.have_stored_uuids.append(uuid)

    # hidden annotation function for presenting image and get the vote
    def _annotate(self, ubd, index, uuids, annotation):
        stop = False
        self._check_mongo_for_uuid(uuids)

        if len(uuids) > 0 and not (set(uuids).issubset(self.stored_uuids) or set(uuids).issubset(self.have_stored_uuids)):
            if len(ubd[0].ubd_rgb) == len(ubd[0].ubd_pos):
                b, g, r = cv2.split(
                    self.bridge.imgmsg_to_cv2(ubd[0].ubd_rgb[index])
                )
                self._create_window(cv2.merge((r, g, b)))

                if int(self._traj_type) in [0, 1]:
                    for uuid in [i for i in uuids if i not in self.stored_uuids]:
                        rospy.loginfo("%s is stored..." % uuid)
                        annotation.update({uuid: int(self._traj_type)})
                        self.stored_uuids.append(uuid)
                        if len(uuids) > 1:
                            self._uncertain_uuids.append(uuid)

                    self._uncertain_uuids = [
                        i for i in self._uncertain_uuids if i not in uuids
                    ]
                else:
                    stop = True
            else:
                rospy.logwarn("UBD_RGB has %d data, but UBD_POS has %d data" % (len(ubd[0].ubd_rgb), len(ubd[0].ubd_pos)))
        # if all uuids have been classified before, then this removes
        # all doubts about those uuids
        elif set(uuids).issubset(self.stored_uuids):
            self._uncertain_uuids = [
                i for i in self._uncertain_uuids if i not in uuids
            ]

        return stop, annotation

    # annotation function
    def annotate(self):
        stop = False
        annotation = dict()
        for i in self.upbods:
            for j in range(len(i[0].ubd_pos)):
                uuids = self._find_traj_frm_pos(
                    i[0].header, i[0].ubd_pos[j], i[0].robot
                )
                stop, annotation = self._annotate(i, j, uuids, annotation)
                self._traj_type = -1
            if stop:
                break

        # storing the data
        counter = 1
        for uuid, value in annotation.iteritems():
            header = Header(counter, rospy.Time.now(), '')
            traj_type = 'human'
            if not value:
                traj_type = 'non-human'
            anno_msg = TrajectoryType(header, uuid, traj_type)
            if uuid in self.have_stored_uuids:
                self._store_client.update(
                    anno_msg, message_query='{"uuid":"%s"}' % uuid
                )
            else:
                self._store_client.insert(anno_msg)
            counter += 1

    # function to provide corresponding uuids based on time, human position, and
    # robot's position from UBD
    def _find_traj_frm_pos(self, header, point, robot):
        uuids = list()
        for uuid, traj in self.trajectories.traj.iteritems():
            stamps = [i[0].header.stamp for i in traj.humrobpose]
            index = self._header_index(header.stamp, stamps)
            points = [i[1].position for i in traj.humrobpose]
            index = self._point_index(robot.position, points, index)
            points = [i[0].pose.position for i in traj.humrobpose]
            index = self._point_index(point, points, index)
            if len(index) != 0:
                uuids.append(uuid.encode('ascii', 'ignore'))
        return uuids

    # function that returns indexes of time stamps whenever time stamp from UBD
    # matches time stamps from trajectories
    def _header_index(self, stamp, stamps):
        index = list()
        for i in range(len(stamps)):
            if stamps[0] > stamp:
                break
            if (stamps[i] - stamp).secs in [0, -1]:
                index.append(i)
        return index

    # function that returns indexes of human positions whenever the position
    # provided by UBD matches the positions from trajectories
    def _point_index(self, point, points, index=list()):
        index2 = list()
        dist = 0.1
        for i in index:
            if abs(point.x-points[i].x) < dist and abs(point.y-points[i].y) < dist:
                index2.append(i)
        return index2
class TrajectoryOccurrenceFrequencies(object):
    def __init__(self,
                 soma_map,
                 soma_config,
                 minute_interval=1,
                 window_interval=10,
                 periodic_type="weekly"):
        """
            Initialize a set of trajectory occurrence frequency classified by regions, days, hours, minute intervals respectively.
            Hours will be set by default to [0-23]. Days are set as days of a week [0-6] where Monday is 0.
            Argument periodic_type can be set either 'weekly' or 'monthly'.
        """
        self.soma = soma_map
        self.soma_config = soma_config
        self.periodic_type = periodic_type
        if self.periodic_type == "weekly":
            self.periodic_days = [i for i in range(7)]
        else:
            self.periodic_days = [i for i in range(31)]
        self.minute_interval = minute_interval
        self.window_interval = window_interval
        self.ms = MessageStoreProxy(collection="occurrence_rates")
        self.reinit()

    def reinit(self):
        """
            Reinitialising tof to empty.
        """
        self.tof = dict()

    def load_tof(self):
        """
            Load trajectory occurrence frequency from mongodb occurrence_rates collection.
        """
        rospy.loginfo(
            "Retrieving trajectory occurrence frequencies from database...")
        query = {
            "soma": self.soma,
            "soma_config": self.soma_config,
            "periodic_type": self.periodic_type,
            "duration.secs": self.window_interval * 60
        }
        logs = self.ms.query(OccurrenceRate._type, message_query=query)
        if len(logs) == 0:
            rospy.logwarn(
                "No data for %s with config %s and periodicity type %s" %
                (self.soma, self.soma_config, self.periodic_type))
            return

        for i in logs:
            if i[0].region_id not in self.tof:
                self.init_region_tof(i[0].region_id)
            end_hour = (i[0].hour +
                        ((i[0].minute + self.window_interval) / 60)) % 24
            end_minute = (i[0].minute + self.window_interval) % 60
            key = "%s-%s" % (
                datetime.time(i[0].hour, i[0].minute).isoformat()[:-3],
                datetime.time(end_hour, end_minute).isoformat()[:-3])
            if key in self.tof[i[0].region_id][i[0].day]:
                self.tof[i[0].region_id][
                    i[0].day][key].occurrence_shape = i[0].occurrence_shape
                self.tof[i[0].region_id][
                    i[0].day][key].occurrence_scale = i[0].occurrence_scale
                self.tof[i[0].region_id][i[0].day][key].set_occurrence_rate(
                    i[0].occurrence_rate)
        rospy.loginfo("Retrieving is complete...")

    def update_tof_daily(self, curr_day_data, prev_day_data, curr_date):
        """
            Update trajectory occurrence frequency for one day. Updating the current day,
            tof needs information regarding the number of trajectory from the previous day as well.
            The form for curr_day_data and prev_day_data is {reg{date[hour{minute}]}}.
        """
        rospy.loginfo("Daily update for trajectory occurrence frequency...")
        for reg, hourly_traj in curr_day_data.iteritems():
            date = curr_date.day
            if self.periodic_type == "weekly":
                date = curr_date.weekday()
            prev_day_n_min_traj = previous_n_minutes_trajs(
                prev_day_data[reg], self.window_interval, self.minute_interval)
            self._update_tof(reg, date, hourly_traj, prev_day_n_min_traj)
        rospy.loginfo("Daily update is complete...")

    def _update_tof(self, reg, date, hourly_traj, prev_n_min_traj):
        length = (self.window_interval / self.minute_interval)
        temp_data = prev_n_min_traj + [-1]
        pointer = length - 1
        for hour, mins_traj in enumerate(hourly_traj):
            minutes = sorted(mins_traj)
            for mins in minutes:
                traj = mins_traj[mins]
                temp_data[pointer % length] = traj
                pointer += 1
                if reg not in self.tof:
                    self.init_region_tof(reg)
                if sum(temp_data) == (-1 * length):
                    continue
                else:
                    total_traj = length / float(
                        length + sum([i for i in temp_data if i == -1]))
                    total_traj = math.ceil(
                        total_traj * sum([i for i in temp_data if i != -1]))
                temp = [
                    hour + (mins - self.window_interval) / 60,
                    (mins - self.window_interval) % 60
                ]
                hour = (hour + (mins / 60)) % 24
                key = "%s-%s" % (
                    datetime.time(temp[0] % 24, temp[1]).isoformat()[:-3],
                    datetime.time(hour, mins % 60).isoformat()[:-3])
                self.tof[reg][date][key].update_lambda([total_traj])

    def init_region_tof(self, reg):
        """
            Initialize trajectory occurrence frequency for one whole region.
            {region: daily_tof}
        """
        daily_tof = dict()
        for j in self.periodic_days:
            hourly_tof = dict()
            for i in range(24 * (60 / self.minute_interval) + 1):
                hour = i / (60 / self.minute_interval)
                minute = (self.minute_interval * i) % 60
                temp = [
                    hour + (minute - self.window_interval) / 60,
                    (minute - self.window_interval) % 60
                ]
                key = "%s-%s" % (
                    datetime.time(temp[0] % 24, temp[1]).isoformat()[:-3],
                    datetime.time(hour % 24, minute).isoformat()[:-3])
                hourly_tof.update(
                    # {key: Lambda(self.window_interval / float(60))}
                    {key: Lambda()})

            daily_tof.update({j: hourly_tof})
        self.tof.update({reg: daily_tof})

    def store_tof(self):
        """
            Store self.tof into mongodb in occurrence_rates collection
        """
        rospy.loginfo("Storing to database...")
        for reg, daily_tof in self.tof.iteritems():
            for day, hourly_tof in daily_tof.iteritems():
                for window_time, lmbd in hourly_tof.iteritems():
                    self._store_tof(reg, day, window_time, lmbd)
        rospy.loginfo("Storing is complete...")

    # helper function of store_tof
    def _store_tof(self, reg, day, window_time, lmbd):
        start_time, end_time = string.split(window_time, "-")
        start_hour, start_min = string.split(start_time, ":")
        end_hour, end_min = string.split(end_time, ":")
        occu_msg = OccurrenceRate(self.soma, self.soma_config,
                                  reg.encode("ascii"), day, int(start_hour),
                                  int(start_min),
                                  rospy.Duration(self.window_interval * 60),
                                  lmbd.occurrence_shape, lmbd.occurrence_scale,
                                  lmbd.get_occurrence_rate(),
                                  self.periodic_type)
        query = {
            "soma": self.soma,
            "soma_config": self.soma_config,
            "region_id": reg,
            "day": day,
            "hour": int(start_hour),
            "minute": int(start_min),
            "duration.secs": rospy.Duration(self.window_interval * 60).secs,
            "periodic_type": self.periodic_type
        }
        # as we use MAP, then the posterior probability mode (gamma mode) is the
        # one we save. However, if gamma map is less than default (initial value) or -1
        # (result from an update to gamma where occurrence_shape < 1), we decide to ignore
        # them.
        temp = Lambda()
        if lmbd.get_occurrence_rate() > temp.get_occurrence_rate():
            if len(self.ms.query(OccurrenceRate._type,
                                 message_query=query)) > 0:
                self.ms.update(occu_msg, message_query=query)
            else:
                self.ms.insert(occu_msg)
class RegionObservationTimeManager(object):

    def __init__(self, soma_map, soma_config):
        self.soma_map = soma_map
        self.soma_config = soma_config
        self.ms = MessageStoreProxy(collection="region_observation_time")
        self.gs = GeoSpatialStoreProxy('geospatial_store', 'soma')
        self.roslog = pymongo.MongoClient(
            rospy.get_param("mongodb_host", "localhost"),
            rospy.get_param("mongodb_port", 62345)
        ).roslog.robot_pose
        self.reinit()

    def reinit(self):
        self.region_observation_duration = dict()
        self._month_year_observation_taken = dict()

    def load_from_mongo(self, days, minute_interval=60):
        """
            Load robot-region observation time from database (mongodb) and store them in
            self.region_observation_duration.
            Returning (region observation time, total duration of observation)
        """
        roi_region_daily = dict()
        total_duration = 0
        self.minute_interval = minute_interval
        for i in days:
            end_date = i + datetime.timedelta(hours=24)
            rospy.loginfo(
                "Load region observation time from %s to %s..." % (str(i), str(end_date))
            )
            query = {
                "soma": self.soma_map, "soma_config": self.soma_config,
                "start_from.secs": {
                    "$gte": time.mktime(i.timetuple()),
                    "$lt": time.mktime(end_date.timetuple())
                }
            }
            roi_reg_hourly = dict()
            for log in self.ms.query(RegionObservationTime._type, message_query=query):
                end = datetime.datetime.fromtimestamp(log[0].until.secs)
                if log[0].region_id not in roi_reg_hourly:
                    temp = list()
                    start = datetime.datetime.fromtimestamp(log[0].start_from.secs)
                    interval = (end.minute + 1) - start.minute
                    if interval != minute_interval:
                        continue
                    for j in range(24):
                        group_mins = {k*interval: 0 for k in range(1, (60/interval)+1)}
                        temp.append(group_mins)
                        roi_reg_hourly.update({log[0].region_id: temp})
                roi_reg_hourly[log[0].region_id][end.hour][end.minute+1] = log[0].duration.secs
                roi_reg_hourly[log[0].region_id][end.hour][end.minute+1] += 0.000000001 * log[0].duration.nsecs
                total_duration += log[0].duration.secs
            roi_region_daily.update({i.day: roi_reg_hourly})
        self.region_observation_duration = roi_region_daily
        return roi_region_daily, total_duration

    def store_to_mongo(self):
        """
            Store region observation time from self.region_observation_duration to mongodb.
            It will store soma map, soma configuration, region_id, the starting and end time where
            robot sees a region in some interval, and the duration of how long the robot during
            the interval.
        """
        rospy.loginfo("Storing region observation time to region_observation_time collection...")
        data = self.region_observation_duration
        try:
            minute_interval = self.minute_interval
        except:
            rospy.logwarn("Minute interval is not defined, please call either load_from_mongo or calculate_region_observation_duration first")
            return

        for day, daily_data in data.iteritems():
            for reg, reg_data in daily_data.iteritems():
                for hour, hourly_data in enumerate(reg_data):
                    for minute, duration in hourly_data.iteritems():
                        date_until = datetime.datetime(
                            self._month_year_observation_taken[day][1],
                            self._month_year_observation_taken[day][0],
                            day, hour, minute-1,
                            59
                        )
                        until = time.mktime(date_until.timetuple())
                        start_from = until - (60 * minute_interval) + 1
                        msg = RegionObservationTime(
                            self.soma_map, self.soma_config, reg,
                            rospy.Time(start_from), rospy.Time(until),
                            rospy.Duration(duration)
                        )
                        self._store_to_mongo(msg)

    def _store_to_mongo(self, msg):
        query = {
            "soma": msg.soma, "soma_config": msg.soma_config,
            "region_id": msg.region_id, "start_from.secs": msg.start_from.secs,
            "until.secs": msg.until.secs
        }
        if msg.duration.nsecs > 0:
            if len(self.ms.query(RegionObservationTime._type, message_query=query)) > 0:
                self.ms.update(msg, message_query=query)
            else:
                self.ms.insert(msg)

    def calculate_region_observation_duration(self, days, minute_interval=60):
        """
            Calculating the region observation duration for particular days, splitted by
            minute_interval.
            Returns the ROIs the robot has monitored at each logged robot pose
            for particular days specified up to minutes interval.
        """
        rospy.loginfo('Calculation region observation duration...')
        roi_region_daily = dict()
        self.minute_interval = minute_interval

        for i in days:
            loaded_roi_reg_day = self.load_from_mongo([i], minute_interval)
            if loaded_roi_reg_day[1] == 0:
                end_date = i + datetime.timedelta(hours=24)
                roi_region_hourly = self._get_robot_region_stay_duration(i, end_date, minute_interval)
                roi_region_daily.update({i.day: roi_region_hourly})
            else:
                roi_region_daily.update({i.day: loaded_roi_reg_day[0][i.day]})
            self._month_year_observation_taken.update({i.day: (i.month, i.year)})
        self.region_observation_duration = roi_region_daily
        return roi_region_daily

    # hidden function for get_robot_region_stay_duration
    def _get_robot_region_stay_duration(self, start_date, end_date, minute_interval=60):
        sampling_rate = 10
        roi_temp_list = dict()
        rospy.loginfo("Querying from " + str(start_date) + " to " + str(end_date))

        query = {
            "_id": {"$exists": "true"},
            "_meta.inserted_at": {"$gte": start_date, "$lt": end_date}
        }

        for i, pose in enumerate(self.roslog.find(query)):
            if i % sampling_rate == 0:
                _, _, yaw = euler_from_quaternion(
                    [0, 0, pose['orientation']['z'], pose['orientation']['w']]
                )
                coords = robot_view_cone(pose['position']['x'], pose['position']['y'], yaw)
                # coords = robot_view_area(pose['position']['x'], pose['position']['y'], yaw)
                langitude_latitude = list()
                for pt in coords:
                    langitude_latitude.append(self.gs.coords_to_lnglat(pt[0], pt[1]))
                langitude_latitude.append(self.gs.coords_to_lnglat(coords[0][0], coords[0][1]))

                for j in self.gs.observed_roi(langitude_latitude, self.soma_map, self.soma_config):
                    region = str(j['soma_roi_id'])
                    hour = pose['_meta']['inserted_at'].time().hour
                    minute = pose['_meta']['inserted_at'].time().minute

                    # Region Knowledge per hour. Bin them by hour and minute_interval.
                    if region not in roi_temp_list:
                        temp = list()
                        for i in range(24):
                            group_mins = {
                                i*minute_interval: 0 for i in range(1, (60/minute_interval)+1)
                            }
                            temp.append(group_mins)
                        roi_temp_list[region] = temp
                    index = ((minute / minute_interval) + 1) * minute_interval
                    roi_temp_list[region][hour][index] += 1

        roi_temp_list = self._normalizing(roi_temp_list, sampling_rate)
        rospy.loginfo("Region observation duration for the query is " + str(roi_temp_list))
        return roi_temp_list

    def _normalizing(self, roi_temp_list, sampling_rate):
        """
            Checking if all robot region relation based on its stay duration is capped
            by minute_interval * 60 (total seconds). If it is not, then normalization is applied
        """
        regions = roi_temp_list.keys()
        _hourly_poses = [0] * 24
        for i in range(24):
            for region in regions:
                _hourly_poses[i] += sum(roi_temp_list[region][i].values())

        normalizing = sum([1 for i in _hourly_poses if i > 3601]) > 0
        max_hourly_poses = max(_hourly_poses)

        for reg, hourly_poses in roi_temp_list.iteritems():
            if normalizing:
                for ind, val in enumerate(hourly_poses):
                    for minute, seconds in val.iteritems():
                        roi_temp_list[reg][ind][minute] = 3600 / float(max_hourly_poses) * seconds
        return roi_temp_list
Пример #28
0
class SkeletonImageLogger(object):
    """Used to store rgb images/skeleton data recorded during the deployment
       Also needs to request consent from the person who was stored, before keeping it.
    """

    def __init__(self, detection_threshold = 1000, camera='head_xtion', database='message_store', collection='consent_images'):
        self.nav_goal_waypoint = None
        self.camera = camera
        self.baseFrame = '/'+self.camera+'_depth_optical_frame'
        self.joints = ['head', 'neck', 'torso', 'left_shoulder', 'left_elbow', 'left_hand',
                'left_hip', 'left_knee', 'left_foot', 'right_shoulder', 'right_elbow',
                'right_hand', 'right_hip', 'right_knee', 'right_foot']

        # directory to store the data
        self.date = str(datetime.datetime.now().date())
        # self.dir1 = '/home/lucie02/Datasets/Lucie/'+self.date+'/'

        self.dir1 = '/home/' + getpass.getuser() + '/SkeletonDataset/pre_consent/' + self.date+'/'
        print 'checking if folder exists:', self.dir1
        if not os.path.exists(self.dir1):
            print '  -create folder:',self.dir1
            os.makedirs(self.dir1)

        self.filepath = os.path.join(roslib.packages.get_pkg_dir("skeleton_tracker"), "config")
        try:
            self.config = yaml.load(open(os.path.join(self.filepath, 'config.ini'), 'r'))
        except:
            print "no config file found"

        # PTU state - based upon current_node callback
        self.ptu_action_client = actionlib.SimpleActionClient('/SetPTUState', PtuGotoAction)
        self.ptu_action_client.wait_for_server()

        # get the last skeleton recorded
        self.sk_mapping = {}

        # flags to make sure we recived every thing
        self._flag_robot = 0
        self._flag_rgb = 0
        self._flag_rgb_sk = 0
        self._flag_depth = 0
        self.request_sent_flag = 0
        self.after_a_number_of_frames = detection_threshold
        self.consent_ret = None

        # opencv stuff
        self.cv_bridge = CvBridge()

        # mongo store
        self.msg_store = MessageStoreProxy(collection=collection, database=database)

        # publishers
        self.publish_consent_req = rospy.Publisher('skeleton_data/consent_req', String, queue_size = 10)
        self.publish_consent_req.publish("init")
        self.publish_consent_pose = rospy.Publisher('skeleton_data/consent_pose', PoseStamped, queue_size = 10, latch=True)

        # listeners
        # rospy.Subscriber("/current_node", String, callback=self.curr_node_callback, queue_size=1)
        # rospy.Subscriber("/closest_node", String, callback=self.close_node_callback, queue_size=1)
        rospy.Subscriber("/robot_pose", Pose, callback=self.robot_callback, queue_size=10)
        # rospy.Subscriber('skeleton_data/incremental', skeleton_message,callback=self.incremental_callback, queue_size = 10)
        rospy.Subscriber('skeleton_data/complete', skeleton_complete,callback=self.complete_callback, queue_size = 10)
        rospy.Subscriber("/skeleton_data/consent_ret", String, callback=self.consent_ret_callback, queue_size=1)
        rospy.Subscriber('/'+self.camera+'/rgb/image_color', sensor_msgs.msg.Image, callback=self.rgb_callback, queue_size=10)
        rospy.Subscriber('/'+self.camera+'/rgb/sk_tracks', sensor_msgs.msg.Image, callback=self.rgb_sk_callback, queue_size=10)
        rospy.Subscriber('/'+self.camera+'/rgb/white_sk_tracks', sensor_msgs.msg.Image, callback=self.white_sk_callback, queue_size=10)
        rospy.Subscriber('/'+self.camera+'/depth/image' , sensor_msgs.msg.Image, self.depth_callback, queue_size=10)

		# PTU state
        self.ptu_action_client = actionlib.SimpleActionClient('/SetPTUState', PtuGotoAction)
        self.ptu_action_client.wait_for_server()

        # gazing action server
        self.gaze_client()

        # topo nav move
        self.nav_client()

        # speak
        self.speak()


    def robot_callback(self, msg):
        self.robot_pose = msg
        if self._flag_robot == 0:
            print 'robot pose recived'
            self._flag_robot = 1


    def callback(self, msg, waypoint):
        self.inc_sk = msg
        if str(datetime.datetime.now().date()) != self.date:
            print 'new day!'
            self.date = str(datetime.datetime.now().date())
            self.dir1 = '/home/' + getpass.getuser() + '/SkeletonDataset/pre_consent/'+self.date+'/'
            print 'checking if folder exists:',self.dir1
            if not os.path.exists(self.dir1):
                print '  -create folder:',self.dir1
                os.makedirs(self.dir1)
        # print self.inc_sk.uuid
        if self._flag_robot and self._flag_rgb and self._flag_rgb_sk:
            if self.inc_sk.uuid not in self.sk_mapping:
                self.sk_mapping[self.inc_sk.uuid] = {}
                self.sk_mapping[self.inc_sk.uuid]['frame'] = 1
                self.sk_mapping[self.inc_sk.uuid]['time'] = str(datetime.datetime.now().time()).split('.')[0]+'_'
                t = self.sk_mapping[self.inc_sk.uuid]['time']
                print '  -new skeleton detected with id:', self.inc_sk.uuid
                # print '  -creating folder:',t+self.inc_sk.uuid
                if not os.path.exists(self.dir1+t+self.inc_sk.uuid):
                    os.makedirs(self.dir1+t+self.inc_sk.uuid)
                    os.makedirs(self.dir1+t+self.inc_sk.uuid+'/rgb')
                    os.makedirs(self.dir1+t+self.inc_sk.uuid+'/depth')
                    os.makedirs(self.dir1+t+self.inc_sk.uuid+'/rgb_sk')
                    os.makedirs(self.dir1+t+self.inc_sk.uuid+'/robot')
                    os.makedirs(self.dir1+t+self.inc_sk.uuid+'/skeleton')

                    # create the empty bag file (closed in /skeleton_action)
                    self.bag_file = rosbag.Bag(self.dir1+t+self.inc_sk.uuid+'/detection.bag', 'w')

            t = self.sk_mapping[self.inc_sk.uuid]['time']
            if os.path.exists(self.dir1+t+self.inc_sk.uuid):
                # setup saving dir and frame
                d = self.dir1+t+self.inc_sk.uuid+'/'
                f = self.sk_mapping[self.inc_sk.uuid]['frame']
                if f < 10:          f_str = '0000'+str(f)
                elif f < 100:          f_str = '000'+str(f)
                elif f < 1000:          f_str = '00'+str(f)
                elif f < 10000:          f_str = '0'+str(f)
                elif f < 100000:          f_str = str(f)

                # save rgb image
                # todo: make these rosbags sometime in the future
                cv2.imwrite(d+'rgb/rgb_'+f_str+'.jpg', self.rgb)
                cv2.imwrite(d+'depth/depth_'+f_str+'.jpg', self.xtion_img_d_rgb)
                cv2.imwrite(d+'rgb_sk/sk_'+f_str+'.jpg', self.rgb_sk)

                try:
                    self.bag_file.write('rgb', self.rgb_msg)
                    self.bag_file.write('depth', self.depth_msg)
                    self.bag_file.write('rgb_sk', self.rgb_sk_msg)
                except:
                    rospy.logwarn("Can not write rgb, depth, and rgb_sk to a bag file.")

                # save robot_pose in bag file
                x=float(self.robot_pose.position.x)
                y=float(self.robot_pose.position.y)
                z=float(self.robot_pose.position.z)
                xo=float(self.robot_pose.orientation.x)
                yo=float(self.robot_pose.orientation.y)
                zo=float(self.robot_pose.orientation.z)
                wo=float(self.robot_pose.orientation.w)
                p = Point(x, y, z)
                q = Quaternion(xo, yo, zo, wo)
                robot = Pose(p,q)
                self.bag_file.write('robot', robot)

                # save robot_pose in text file
                f1 = open(d+'robot/robot_'+f_str+'.txt','w')
                f1.write('position\n')
                f1.write('x:'+str(x)+'\n')
                f1.write('y:'+str(y)+'\n')
                f1.write('z:'+str(z)+'\n')
                f1.write('orientation\n')
                f1.write('x:'+str(xo)+'\n')
                f1.write('y:'+str(yo)+'\n')
                f1.write('z:'+str(zo)+'\n')
                f1.write('w:'+str(wo)+'\n')
                f1.close()

                # save skeleton data in bag file
                #x=float(self.robot_pose.position.x)
                #y=float(self.robot_pose.position.y)
                #z=float(self.robot_pose.position.z)
                #xo=float(self.robot_pose.orientation.x)
                #yo=float(self.robot_pose.orientation.y)
                #zo=float(self.robot_pose.orientation.z)
                #wo=float(self.robot_pose.orientation.w)
                #p = Point(x, y, z)
                #q = Quaternion(xo, yo, zo, wo)
                #skel = Pose(p,q)
                #bag.write('skeleton', skel)


                # save skeleton datain text file
                f1 = open(d+'skeleton/skl_'+f_str+'.txt','w')
                # print self.inc_sk.joints[0]
                f1.write('time:'+str(self.inc_sk.joints[0].time.secs)+'.'+str(self.inc_sk.joints[0].time.nsecs)+'\n')
                for i in self.inc_sk.joints:
                    f1.write(i.name+'\n')
                    f1.write('position\n')
                    f1.write('x:'+str(i.pose.position.x)+'\n')
                    f1.write('y:'+str(i.pose.position.y)+'\n')
                    f1.write('z:'+str(i.pose.position.z)+'\n')
                    f1.write('orientation\n')
                    f1.write('x:'+str(i.pose.orientation.x)+'\n')
                    f1.write('y:'+str(i.pose.orientation.y)+'\n')
                    f1.write('z:'+str(i.pose.orientation.z)+'\n')
                    f1.write('w:'+str(i.pose.orientation.w)+'\n')
                f1.close()

                # update frame number
                if self.inc_sk.uuid in self.sk_mapping:
                    self.sk_mapping[self.inc_sk.uuid]['frame'] += 1

                #publish the gaze request of person on every detection:
                if self.inc_sk.joints[0].name == 'head':
                    head = Header(frame_id='head_xtion_depth_optical_frame')
                    look_at_pose = PoseStamped(header = head, pose=self.inc_sk.joints[0].pose)
                    self.publish_consent_pose.publish(look_at_pose)
                #self.gazeClient.send_goal(self.gazegoal)

                # all this should happen given a good number of detections:
                print "%s out of %d frames are obtained" % (self.sk_mapping[self.inc_sk.uuid]['frame'], self.after_a_number_of_frames)
                if self.sk_mapping[self.inc_sk.uuid]['frame'] == self.after_a_number_of_frames and self.request_sent_flag == 0:
                    print "storing the %sth image to mongo for the webserver..." % self.after_a_number_of_frames
                    # Skeleton on white background
                    query = {"_meta.image_type": "white_sk_image"}
                    white_sk_to_mongo =  self.msg_store.update(message=self.white_sk_msg, meta={'image_type':"white_sk_image"}, message_query=query, upsert=True)
                    # Skeleton on rgb background
                    query = {"_meta.image_type": "rgb_sk_image"}
                    rgb_sk_img_to_mongo = self.msg_store.update(message=self.rgb_sk_msg, meta={'image_type':"rgb_sk_image"}, message_query=query, upsert=True)
                    # Skeleton on depth background
                    query = {"_meta.image_type": "depth_image"}
                    depth_img_to_mongo = self.msg_store.update(message=self.depth_msg, meta={'image_type':"depth_image"}, message_query=query, upsert=True)

                    consent_msg = "Check_consent_%s" % (t)
                    print consent_msg
                    # I think this should be a service call - so it definetly returns a value.
                    self.request_sent_flag = 1
                    # move and speak: (if no target waypoint, go to original waypoint)
                    # self.reset_ptu()
                    try:
                        self.navgoal.target = self.config[waypoint]['target']
                    except:
                        self.navgoal.target = waypoint
                    if self.navgoal.target != waypoint:
                        self.nav_goal_waypoint = waypoint  #to return to after consent
                        self.navClient.send_goal(self.navgoal)
                        result = self.navClient.wait_for_result()
                        if not result:
                            self.go_back_to_where_I_came_from()

                    self.publish_consent_req.publish(consent_msg)
                    rospy.sleep(0.1)
                    if self.request_sent_flag:
                        self.speaker.send_goal(maryttsGoal(text=self.speech))
                    while self.consent_ret is None:
                        rospy.sleep(0.1)

                    # Move Eyes - look up and down to draw attension.
        return self.consent_ret

    def go_back_to_where_I_came_from(self):
        if self.nav_goal_waypoint is not None and self.nav_goal_waypoint != self.config[self.nav_goal_waypoint]['target']:
            try:
                self.navgoal.target = self.config[self.nav_goal_waypoint]['target']
            except:
                print "nav goal not set - staying at %s" % self.navgoal.target
            self.navClient.send_goal(self.navgoal)
            self.navClient.wait_for_result()

    def consent_ret_callback(self, msg):
        if self.request_sent_flag == 0: return
        print "got consent ret callback, %s" % msg
        self.consent_ret=msg
        # self.request_sent_flag = 0
        # when the request is returned, go back to previous waypoint
        self.speaker.send_goal(maryttsGoal(text="Thank you"))
        self.go_back_to_where_I_came_from()

    def reset_ptu(self):
        ptu_goal = PtuGotoGoal();
        ptu_goal.pan = 0
        ptu_goal.tilt = 0
        ptu_goal.pan_vel = 30
        ptu_goal.tilt_vel = 30
        self.ptu_action_client.send_goal(ptu_goal)

    def gaze_client(self):
        rospy.loginfo("Creating gaze client")
        _as = actionlib.SimpleActionClient('gaze_at_pose', strands_gazing.msg.GazeAtPoseAction)
        _as.wait_for_server()
        gazegoal = strands_gazing.msg.GazeAtPoseGoal()
        gazegoal.topic_name = '/skeleton_data/consent_pose'
        gazegoal.runtime_sec = 60
        _as.send_goal(gazegoal)

    def nav_client(self):
        rospy.loginfo("Creating nav client")
        self.navClient = actionlib.SimpleActionClient('topological_navigation', topological_navigation.msg.GotoNodeAction)
        self.navClient.wait_for_server()
        self.navgoal = topological_navigation.msg.GotoNodeGoal()

    def speak(self):
        self.speaker = actionlib.SimpleActionClient('/speak', maryttsAction)
        got_server = self.speaker.wait_for_server(rospy.Duration(1))
        while not got_server:
            rospy.loginfo("Data Consent is waiting for marytts action...")
            got_server = self.speaker.wait_for_server(rospy.Duration(1))
            if rospy.is_shutdown():
                return
        self.speech = "Please may I get your consent to store data I just recorded."

    def complete_callback(self, msg):
        # print '  -stopped logging user:'******'rgb recived'
            self._flag_rgb = 1

    def rgb_sk_callback(self, msg1):
        self.rgb_sk_msg = msg1
        rgb = self.cv_bridge.imgmsg_to_cv2(msg1, desired_encoding="passthrough")
        self.rgb_sk = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        if self._flag_rgb_sk == 0:
            print 'rgb+sk recived'
            self._flag_rgb_sk = 1

    def white_sk_callback(self, msg1):
        self.white_sk_msg = msg1

    def depth_callback(self, imgmsg):

        self.depth_msg = imgmsg
        depth_image = self.cv_bridge.imgmsg_to_cv2(imgmsg, desired_encoding="passthrough")
        depth_array = np.array(depth_image, dtype=np.float32)
        cv2.normalize(depth_array, depth_array, 0, 1, cv2.NORM_MINMAX)
        self.xtion_img_d_rgb = depth_array*255
        if self._flag_depth == 0:
            print 'depth recived'
            self._flag_depth = 1
#! /usr/bin/env python
import os, sys
import rospy
from mongodb_store.message_store import MessageStoreProxy
from activity_data.msg import HumanActivities

if __name__ == "__main__":
    rospy.init_node('insert_msgs_to_mongo')
    msg_store = MessageStoreProxy(database='message_store',
                                  collection='activity_learning')

    for (ret, meta) in msg_store.query(type=HumanActivities._type):
        print ret.uuid
        ret.qsrs = False
        ret.activity = False
        ret.topics = []
        ret.hier_topics = []
        # ret.cpm = False
        msg_store.update(message_query={"uuid": ret.uuid}, message=ret)
Пример #30
0
class dataReader(object):
    def __init__(self):

        self.skeleton_data = {
        }  #keeps the published complete skeleton in a dictionary. key = uuid
        self.rate = rospy.Rate(15.0)

        ## listeners:
        rospy.Subscriber("skeleton_data/complete", skeleton_complete,
                         self.skeleton_callback)

        ## Logging params:
        self._with_mongodb = rospy.get_param("~with_mongodb", "true")
        self._logging = rospy.get_param("~logging", "false")
        self._message_store = rospy.get_param("~message_store_prefix",
                                              "people_skeleton")

        if self._logging:
            msg_store = self._message_store + "_world_state"
            rospy.loginfo("Connecting to mongodb...%s" % msg_store)
            self._store_client_world = MessageStoreProxy(collection=msg_store)

            msg_store = self._message_store + "_qstag"
            self._store_client_qstag = MessageStoreProxy(collection=msg_store)

        ## QSR INFO:
        self._which_qsr = rospy.get_param("~which_qsr", "rcc3")
        #self.which_qsr = ["qtcbs", "argd"]

        self.set_params()
        self.cln = QSRlib_ROS_Client()

    def set_params(self):
        quantisation_factor = 0.01
        validate = False
        no_collapse = True
        argd_params = {"Touch": 0.5, "Near": 2, "Far": 3}
        qstag_params = {"min_rows": 1, "max_rows": 1, "max_eps": 5}
        qsrs_for = [("head", "right_hand"), ("head", "left_hand"),
                    ("left_hand", "right_hand")]
        object_types = {
            "right_hand": "hand",
            "left_hand": "hand",
            "head": "head"
        }
        self.dynamic_args = {
            "qtcbs": {
                "quantisation_factor": quantisation_factor,
                "validate": validate,
                "no_collapse": no_collapse,
                "qsrs_for": qsrs_for
            },
            "rcc3": {
                "qsrs_for": qsrs_for
            },
            "argd": {
                "qsr_relations_and_values": argd_params
            },
            "qstag": {
                "object_types": object_types,
                "params": qstag_params
            },
            "filters": {
                "median_filter": {
                    "window": 3
                }
            }
        }

    def _create_qsrs(self):
        while not rospy.is_shutdown():

            for uuid, msg_data in self.skeleton_data.items():
                if msg_data["flag"] != 1: continue
                print ">> recieving worlds for:", uuid
                qsrlib_response_message = self._call_qsrLib(uuid, msg_data)

                if self._logging:
                    print msg_data.keys()
                    self.upload_qsrs_to_mongo(uuid,
                                              qsrlib_response_message.qstag,
                                              msg_data)
                print ">>>", qsrlib_response_message.qstag.episodes
                print ">", qsrlib_response_message.qstag.graphlets.histogram
                #print ">>>", qsrlib_response_message.qstag.graph
                del self.skeleton_data[uuid]

            self.rate.sleep()

    def skeleton_callback(self, msg):
        self.skeleton_data[msg.uuid] = {
            "flag": 1,
            "detections": msg.number_of_detections,
            "map": msg.map_name,
            "current_node": msg.current_topo_node,
            "start_time": msg.start_time,
            "end_time": msg.end_time,
            "world": self.convert_skeleton_to_world(msg.skeleton_data)
        }

    def convert_skeleton_to_world(self, data, use_every=1):
        world = World_Trace()

        joint_states = {
            'head': [],
            'neck': [],
            'torso': [],
            'left_shoulder': [],
            'left_elbow': [],
            'left_hand': [],
            'left_hip': [],
            'left_knee': [],
            'left_foot': [],
            'right_shoulder': [],
            'right_elbow': [],
            'right_hand': [],
            'right_hip': [],
            'right_knee': [],
            'right_foot': []
        }

        for tp, msg in enumerate(data):
            for i in msg.joints:
                o = Object_State(name=i.name, timestamp=tp, x=i.pose.position.x,\
                                y = i.pose.position.y, z=i.pose.position.z, xsize=0.1, ysize=0.1, zsize=0.1)
                joint_states[i.name].append(o)

        for joint_data in joint_states.values():
            world.add_object_state_series(joint_data)
        return world

    def _call_qsrLib(self, uuid, msg_data):

        qsrlib_request_message = QSRlib_Request_Message(which_qsr=self._which_qsr, input_data=msg_data["world"], \
                                dynamic_args=self.dynamic_args)
        req = self.cln.make_ros_request_message(qsrlib_request_message)
        if self._logging:
            print "logging the world trace"
            msg = QSRlibMongo(uuid=uuid,
                              data=pickle.dumps(msg_data["world"]),
                              start_time=msg_data["start_time"],
                              end_time=msg_data["end_time"])
            query = {"uuid": uuid}
            self._store_client_world.update(message=msg,
                                            message_query=query,
                                            upsert=True)

        res = self.cln.request_qsrs(req)
        qsrlib_response_message = pickle.loads(res.data)
        return qsrlib_response_message

    def upload_qsrs_to_mongo(self, uuid, qstag, msg_data):
        print "logging the QSTAG"
        eps = pickle.dumps(qstag.episodes)
        graph = pickle.dumps(qstag.graph)
        obs = [node['name'] for node in qstag.object_nodes]

        print ">>", qstag.graphlets.graphlets

        msg = QsrsToMongo(uuid=uuid,
                          which_qsr=self._which_qsr,
                          episodes=eps,
                          igraph=graph,
                          histogram=qstag.graphlets.histogram,
                          code_book=qstag.graphlets.code_book,
                          start_time=msg_data["start_time"],
                          map_name=msg_data["map"],
                          current_node=msg_data["current_node"],
                          number_of_detections=msg_data["detections"],
                          objects=obs,
                          end_time=msg_data["end_time"])

        query = {"uuid": uuid}
        self._store_client_qstag.update(message=msg,
                                        message_query=query,
                                        upsert=True)
Пример #31
0
class skeleton_cpm():
    """docstring for cpm"""
    def __init__(self, cam, rem, pub, sav):

        # read camera calib
        #print '>>>>>>>>>',cam
        self.camera_calib = util.read_yaml_calib(cam)

        # remove rgb images
        self.rgb_remove = rem
        if self.rgb_remove:
            rospy.loginfo("remove rgb images from the dataset.")

        # save cpm images
        self.save_cpm_img = sav
        if self.save_cpm_img:
            rospy.loginfo("save cpm images.")

        # initialize published
        self.pub = pub
        if self.pub:
            rospy.loginfo("publish cpm images")
            self.image_pub = rospy.Publisher("/cpm_skeleton_image",
                                             Image,
                                             queue_size=1)
            self.bridge = CvBridge()
        else:
            rospy.loginfo("don't publish cpm images")

        # mongo stuff
        self.msg_store = MessageStoreProxy(database='message_store',
                                           collection='cpm_stats')
        self.msg_store_learning = MessageStoreProxy(
            database='message_store', collection='activity_learning')

        # open dataset folder
        self.directory = '/home/' + getpass.getuser(
        ) + '/SkeletonDataset/no_consent/'
        if not os.path.isdir(self.directory):
            rospy.loginfo(
                self.directory +
                " does not exist. Please make sure there is a dataset on this pc"
            )
            sys.exit(1)
        self.dates = sorted(os.listdir(self.directory))
        #self.delete_empty_dates()
        self.get_dates_to_process()
        self.empty_date = 0  # just a flag for empty date folder
        if not self.read_mongo_success:
            self.folder = 0
            self.userid = 0
            self._read_files()

        # cpm init stuff
        self.rospack = rospkg.RosPack()
        self.cpm_path = self.rospack.get_path('cpm_skeleton')
        self.conf = 1  # using config file 1, for the full body detector
        self.param, self.model = config_reader(self.conf)
        self.boxsize = self.model['boxsize']
        self.npart = self.model['np']
        self.limbs_names = [
            'head', 'neck', 'right_shoulder', 'right_elbow', 'right_hand',
            'left_shoulder', 'left_elbow', 'left_hand', 'right_hip',
            'right_knee', 'right_foot', 'left_hip', 'left_knee', 'left_foot'
        ]
        self.colors = [[0, 0, 255], [0, 170, 255], [0, 255, 170], [0, 255, 0],
                       [170, 255, 0], [255, 170, 0], [255, 0, 0],
                       [255, 0, 170], [170, 0, 255]]  # note BGR ...
        self.stickwidth = 6
        self.dist_threshold = 1.5  # less than 1.5 meters ignore the skeleton
        self.depth_thresh = .35  # any more different in depth than this with openni, use openni
        self.finished_processing = 0  # a flag to indicate that we finished processing allavailable  data
        self.threshold = 10  # remove any folder <= 10 detections
        self.cpm_stats_file = '/home/' + getpass.getuser(
        ) + '/SkeletonDataset/cpm_stats.txt'

        # action server
        self._as = actionlib.SimpleActionServer("cpm_action", cpmAction, \
                    execute_cb=self.execute_cb, auto_start=False)
        self._as.start()

    def _initiliase_cpm(self):
        sys.stdout = open(os.devnull, "w")
        if self.param['use_gpu']:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()
        caffe.set_device(self.param['GPUdeviceNumber'])  # set to your device!
        self.person_net = caffe.Net(self.model['deployFile_person'],
                                    self.model['caffemodel_person'],
                                    caffe.TEST)
        self.pose_net = caffe.Net(self.model['deployFile'],
                                  self.model['caffemodel'], caffe.TEST)
        sys.stdout = sys.__stdout__

    def _cpm_stats(self, start, duration, stop_flag_pre, stop_flag_dur):
        f1 = open(self.cpm_stats_file, 'a')
        f1.write('date=' + start.split(' ')[0])
        f1.write(', start=' + start.split(' ')[1])
        f1.write(', end=' + time.strftime("%H:%M:%S"))
        f1.write(', duration=' + str(duration))
        f1.write(', processed=' + str(self.processed))
        f1.write(', removed=' + str(self.removed))
        f1.write(', images=' + str(self.img_processed))
        if self.finished_processing:
            f1.write(', stopped=finisehd all data\n')
        elif stop_flag_pre:
            f1.write(', stopped=preempted\n')
        elif stop_flag_dur:
            f1.write(', stopped=duration\n')

    def execute_cb(self, goal):
        #print '>>-',self.empty_date
        self.processed = 0
        self.removed = 0
        self.img_processed = 0  # stats counter
        self._initiliase_cpm()
        stats_start = time.strftime("%d-%b-%Y %H:%M:%S")
        start = rospy.Time.now()
        end = rospy.Time.now()
        stop_flag_pre = 0
        stop_flag_dur = 0  # stop flags preempt and duration
        duration = goal.duration.secs
        while not self.finished_processing and not stop_flag_pre and not stop_flag_dur:
            #print self.empty_date
            if self.empty_date or self.rgb_files == []:
                rospy.loginfo("found an empty date folder")
                self.next()
            else:
                self.person_found_flag = 0
                for rgb, depth, skl in zip(self.rgb_files, self.dpt_files,
                                           self.skl_files):
                    if self._as.is_preempt_requested():
                        stop_flag_pre = 1
                        break
                    if (end - start).secs > duration:
                        stop_flag_dur = 1
                        break
                    self._process_images(rgb, depth, skl)
                    end = rospy.Time.now()
                    self.img_processed += 1  # counts the number of processed images
                if not stop_flag_pre and not stop_flag_dur:
                    self.processed += 1  # stats counter
                    if self.person_found_flag > self.threshold:
                        self.update_last_learning()
                        self.update_last_cpm_date()
                        if self.rgb_remove:
                            self._remove_rgb_images(
                            )  # remove rgb images from directory
                        self.next()  # stats counter
                    else:
                        rospy.loginfo(
                            'nothing interesting was detected, I will delete this folder!'
                        )
                        self.delete_last_learning()
                        self.remove_uuid_folder()
                        self.removed += 1
                        self.next()

        # after the action reset everything
        self._cpm_stats(stats_start, duration, stop_flag_pre, stop_flag_dur)
        self._as.set_succeeded(cpmActionResult())

    def _remove_rgb_images(self):
        rospy.loginfo('removing: ' + self.directory + self.dates[self.folder] +
                      '/' + self.files[self.userid] + '/rgb')
        shutil.rmtree(self.directory + self.dates[self.folder] + '/' +
                      self.files[self.userid] + '/rgb')

    def remove_uuid_folder(self):
        #print len(self.files)
        rospy.loginfo('removing: ' + self.directory + self.dates[self.folder] +
                      '/' + self.files[self.userid])
        shutil.rmtree(self.directory + self.dates[self.folder] + '/' +
                      self.files[self.userid])
        self.files = sorted(
            os.listdir(self.directory + self.dates[self.folder]))
        self.userid -= 1
        #print len(self.files)
        #sys.exit(1)

    def delete_last_learning(self):
        uuid = self.files[self.userid].split('_')[-1]
        query = {"uuid": uuid}
        result = self.msg_store_learning.query(type=HumanActivities._type,
                                               message_query=query)
        rospy.loginfo("I removed id from mongodb: " + uuid)
        for (ret, meta) in result:
            self.msg_store_learning.delete(message_id=str(meta['_id']))

    def update_last_cpm_date(self):
        msg = cpm_pointer()
        msg.type = "cpm_skeleton"
        msg.date_ran = time.strftime("%Y-%m-%d")
        msg.last_date_used = self.dates[self.folder]
        msg.uuid = self.files[self.userid]
        print "adding %s to cpm stats store" % msg.uuid
        query = {"type": msg.type}
        self.msg_store.update(message=msg, message_query=query, upsert=True)

    def update_last_learning(self):

        uuid = self.files[self.userid].split('_')[-1]
        query = {"uuid": uuid}
        result = self.msg_store_learning.query(type=HumanActivities._type,
                                               message_query=query)

        for cnt, (msg, meta) in enumerate(result):
            msg.cpm = True
            print "updating %s to activity learning store" % msg.uuid
            self.msg_store_learning.update(message=msg,
                                           message_query=query,
                                           upsert=True)

        if len(result) == 0:
            msg = HumanActivities()
            msg.date = self.dates[self.folder]
            msg.uuid = self.files[self.userid].split('_')[-1]
            msg.time = self.files[self.userid].split('_')[-2]
            msg.cpm = True
            self.msg_store_learning.insert(message=msg)
            print "adding %s to activity learning store" % msg.uuid

    #def update_last_learning(self):
    #    msg = HumanActivities()
    #    msg.date = self.dates[self.folder]
    #    msg.uuid = self.files[self.userid].split('_')[-1]
    #    msg.time = self.files[self.userid].split('_')[-2]
    #    msg.cpm = True
    #    print "adding %s to activity learning store" % msg.uuid
    #    query = {"uuid" : msg.uuid}
    #    self.msg_store_learning.update(message=msg, message_query=query, upsert=True)

    def get_dates_to_process(self):
        """ Find the sequence of date folders (on disc) which have not been processed into QSRs.
        ret: self.not_processed_dates - List of date folders to use
        """
        self.read_mongo_success = 0
        for (ret, meta) in self.msg_store.query(cpm_pointer._type):
            if ret.type != "cpm_skeleton": continue
            self.read_mongo_success = 1
            self.folder = self.dates.index(ret.last_date_used)
            self.files = sorted(
                os.listdir(self.directory + self.dates[self.folder]))
            self.userid = self.files.index(ret.uuid)
            rospy.loginfo("cpm progress date: " + ret.last_date_used + "," +
                          ret.uuid)
            self.next()

    def _read_files(self):
        self.empty_date = 0
        self.rgb_files = []
        self.files = sorted(
            os.listdir(self.directory + self.dates[self.folder]))
        if self.files != []:  # empty folders
            #print ">", self.folder
            #print ">>", self.directory+self.dates[0]
            #print ">", self.directory, self.folder, self.userid
            #print ">>", self.dates, self.files
            self.rgb_dir = self.directory + self.dates[
                self.folder] + '/' + self.files[self.userid] + '/rgb/'
            self.dpt_dir = self.directory + self.dates[
                self.folder] + '/' + self.files[self.userid] + '/depth/'
            self.skl_dir = self.directory + self.dates[
                self.folder] + '/' + self.files[self.userid] + '/skeleton/'
            self.cpm_dir = self.directory + self.dates[
                self.folder] + '/' + self.files[self.userid] + '/cpm_skeleton/'
            self.cpm_img_dir = self.directory + self.dates[
                self.folder] + '/' + self.files[self.userid] + '/cpm_images/'
            self.rgb_files = sorted(glob.glob(self.rgb_dir + "*.jpg"))
            self.dpt_files = sorted(glob.glob(self.dpt_dir + "*.jpg"))
            self.skl_files = sorted(glob.glob(self.skl_dir + "*.txt"))
            rospy.loginfo('Processing userid: ' + self.files[self.userid])
            if not os.path.isdir(self.cpm_dir):
                os.mkdir(self.cpm_dir)

            if not os.path.isdir(self.cpm_img_dir) and self.save_cpm_img:
                os.mkdir(self.cpm_img_dir)
        else:
            self.empty_date = 1
            rospy.loginfo(
                "I found an empty date folder, this should not happen.")
            #sys.exit(1)

    def next(self):
        self.userid += 1
        if self.userid >= len(self.files):
            self.userid = 0
            self.folder += 1
        if self.folder >= len(self.dates):
            rospy.loginfo("cpm finished processing all folders")
            self.finished_processing = 1
        else:
            self._read_files()
        #print '>>',self.userid
        #print '>>',self.folder
        #print '>>> test',self.empty_date

    def _process_images(self, test_image, test_depth, test_skl):
        # block 1
        self.test_skl = test_skl
        self.name = test_image.split('.')[0].split('/')[-1]
        start_time = time.time()
        img = cv.imread(test_image)
        depth = cv.imread(test_depth)
        self.scale = self.boxsize / (img.shape[0] * 1.0)
        imageToTest = cv.resize(img, (0, 0),
                                fx=self.scale,
                                fy=self.scale,
                                interpolation=cv.INTER_CUBIC)
        depthToTest = cv.resize(depth, (0, 0),
                                fx=self.scale,
                                fy=self.scale,
                                interpolation=cv.INTER_CUBIC)
        imageToTest_padded, pad = util.padRightDownCorner(imageToTest)

        # check distance threshold
        f1 = open(self.test_skl, 'r')
        self.openni_values, self.openni_time = util.get_openni_values(f1)
        x = []
        y = []
        if self.openni_values['torso']['z'] >= self.dist_threshold:
            # block 2
            self.person_net.blobs['image'].reshape(
                *(1, 3, imageToTest_padded.shape[0],
                  imageToTest_padded.shape[1]))
            self.person_net.reshape()
            #person_net.forward(); # dry run to avoid GPU synchronization later in caffe

            # block 3
            self.person_net.blobs['image'].data[...] = np.transpose(
                np.float32(imageToTest_padded[:, :, :, np.newaxis]),
                (3, 2, 0, 1)) / 256 - 0.5
            output_blobs = self.person_net.forward()
            person_map = np.squeeze(
                self.person_net.blobs[output_blobs.keys()[0]].data)

            # block 4
            person_map_resized = cv.resize(person_map, (0, 0),
                                           fx=8,
                                           fy=8,
                                           interpolation=cv.INTER_CUBIC)
            data_max = scipy.ndimage.filters.maximum_filter(
                person_map_resized, 3)
            maxima = (person_map_resized == data_max)
            diff = (data_max > 0.5)
            maxima[diff == 0] = 0
            x = np.nonzero(maxima)[1]
            y = np.nonzero(maxima)[0]
            # get the right person from openni
            x, y = util.get_correct_person(self.openni_values, self.scale,
                                           self.camera_calib, x, y)
            self.x = x
            self.y = y

        # block 5
        num_people = len(x)
        person_image = np.ones((self.model['boxsize'], self.model['boxsize'],
                                3, num_people)) * 128
        for p in range(num_people):
            x_max = x[p] + self.model['boxsize'] / 2
            x_min = x[p] - self.model['boxsize'] / 2
            y_max = y[p] + self.model['boxsize'] / 2
            y_min = y[p] - self.model['boxsize'] / 2
            if x_min < 0:
                xn_min = x_min * -1
                x_min = 0
            else:
                xn_min = 0
            if x_max > imageToTest.shape[1]:
                xn_max = self.model['boxsize'] - (x_max - imageToTest.shape[1])
                x_max = imageToTest.shape[1]
            else:
                xn_max = self.model['boxsize']
            if y_min < 0:
                yn_min = y_min * -1
                y_min = 0
            else:
                yn_min = 0
            if y_max > imageToTest.shape[0]:
                yn_max = self.model['boxsize'] - (y_max - imageToTest.shape[0])
                y_max = imageToTest.shape[0]
            else:
                yn_max = self.model['boxsize']
            person_image[yn_min:yn_max, xn_min:xn_max, :,
                         p] = imageToTest[y_min:y_max, x_min:x_max, :]

        # block 6
        gaussian_map = np.zeros((self.model['boxsize'], self.model['boxsize']))
        x_p = np.arange(self.model['boxsize'])
        y_p = np.arange(self.model['boxsize'])
        part1 = [(x_p - self.model['boxsize'] / 2) *
                 (x_p - self.model['boxsize'] / 2),
                 np.ones(self.model['boxsize'])]
        part2 = [
            np.ones(self.model['boxsize']), (y_p - self.model['boxsize'] / 2) *
            (y_p - self.model['boxsize'] / 2)
        ]
        dist_sq = np.transpose(np.matrix(part1)) * np.matrix(part2)
        exponent = dist_sq / 2.0 / self.model['sigma'] / self.model['sigma']
        gaussian_map = np.exp(-exponent)

        # block 7
        #pose_net.forward() # dry run to avoid GPU synchronization later in caffe
        output_blobs_array = [dict() for dummy in range(num_people)]
        for p in range(num_people):
            input_4ch = np.ones(
                (self.model['boxsize'], self.model['boxsize'], 4))
            input_4ch[:, :, 0:
                      3] = person_image[:, :, :,
                                        p] / 256.0 - 0.5  # normalize to [-0.5, 0.5]
            input_4ch[:, :, 3] = gaussian_map
            self.pose_net.blobs['data'].data[...] = np.transpose(
                np.float32(input_4ch[:, :, :, np.newaxis]), (3, 2, 0, 1))
            if self.conf == 4:
                output_blobs_array[p] = copy.deepcopy(
                    self.pose_net.forward()['Mconv5_stage4'])
            else:
                output_blobs_array[p] = copy.deepcopy(
                    self.pose_net.forward()['Mconv7_stage6'])

        # block 8
        for p in range(num_people):
            for part in [
                    0, 3, 7, 10, 12
            ]:  # sample 5 body parts: [head, right elbow, left wrist, right ankle, left knee]
                part_map = output_blobs_array[p][0, part, :, :]
                part_map_resized = cv.resize(
                    part_map, (0, 0), fx=4, fy=4,
                    interpolation=cv.INTER_CUBIC)  #only for displaying

        # block 9
        prediction = np.zeros((14, 2, num_people))
        for p in range(num_people):
            for part in range(14):
                part_map = output_blobs_array[p][0, part, :, :]
                part_map_resized = cv.resize(part_map, (0, 0),
                                             fx=8,
                                             fy=8,
                                             interpolation=cv.INTER_CUBIC)
                prediction[part, :,
                           p] = np.unravel_index(part_map_resized.argmax(),
                                                 part_map_resized.shape)
            # mapped back on full image
            prediction[:, 0,
                       p] = prediction[:, 0,
                                       p] - (self.model['boxsize'] / 2) + y[p]
            prediction[:, 1,
                       p] = prediction[:, 1,
                                       p] - (self.model['boxsize'] / 2) + x[p]

        # block 10
        limbs = self.model['limbs']
        canvas = imageToTest.copy()
        #canvas *= .5 # for transparency
        canvas = np.multiply(canvas, 0.2, casting="unsafe")
        if num_people:
            self.person_found_flag += 1  # this is used to prevent the deletion of the entire folder if noe person is found
            self._get_depth_data(prediction, depthToTest)
            for p in range(num_people):
                cur_canvas = imageToTest.copy(
                )  #np.zeros(canvas.shape,dtype=np.uint8)
                for l in range(limbs.shape[0]):
                    X = prediction[limbs[l, :] - 1, 0, p]
                    Y = prediction[limbs[l, :] - 1, 1, p]
                    mX = np.mean(X)
                    mY = np.mean(Y)
                    length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
                    angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
                    polygon = cv.ellipse2Poly(
                        (int(mY), int(mX)), (int(length / 2), self.stickwidth),
                        int(angle), 0, 360, 1)
                    cv.fillConvexPoly(cur_canvas, polygon, self.colors[l])
                    cv.fillConvexPoly(depthToTest, polygon, self.colors[l])
                cv.circle(cur_canvas, (int(self.x[0]), int(self.y[0])), 3,
                          (250, 0, 210), -1)
                canvas = np.add(canvas,
                                np.multiply(cur_canvas, 0.8, casting="unsafe"),
                                casting="unsafe")  # for transparency
            print 'image ' + self.name + ' processed in: %2.3f' % (
                time.time() - start_time), "person found"
        else:
            print 'image ' + self.name + ' processed in: %2.3f' % (
                time.time() - start_time), "person not found"
        vis = np.concatenate((canvas.astype(np.uint8), depthToTest), axis=1)
        # saving cpm images
        if self.save_cpm_img:
            cv.imwrite(self.cpm_img_dir + self.name + '.jpg', vis)

        # publishing cpm images
        if self.pub:
            sys.stdout = open(os.devnull, "w")
            msg = self.bridge.cv2_to_imgmsg(vis, "bgr8")
            sys.stdout = sys.__stdout__
            self.image_pub.publish(msg)
            #cv.imwrite('/home/strands/SkeletonDataset/cpm_images/cpm_'+self.name+'.jpg',vis)
            #### Create CompressedIamge ####
            #msg = CompressedImage()
            #msg.header.stamp = rospy.Time.now()
            #msg.format = "jpeg"
            #msg.data = vis  # np.array(cv.imencode('.jpg', vis)[1]).tostring()
            #### Publish new image
            #self.image_pub.publish(msg)

    def _get_depth_data(self, prediction, depthToTest):
        [fx, fy, cx, cy] = self.camera_calib
        cpm_file = self.cpm_dir + 'cpm_' + self.test_skl.split('/')[-1]
        f1 = open(cpm_file, 'w')
        f1.write(self.openni_time)
        for part, jname in enumerate(self.limbs_names):
            x2d = np.min([int(prediction[part, 0, 0]), 367])
            y2d = np.min([int(prediction[part, 1, 0]), 490])
            depth_val = depthToTest[x2d, y2d, 0]
            z = (.4) / (20.0) * (depth_val - 60.0) + 2.7
            if np.abs(z - self.openni_values[jname]['z']) > self.depth_thresh:
                z = self.openni_values[jname]['z']
            x = (y2d / self.scale - cx) * z / fx
            y = (x2d / self.scale - cy) * z / fy
            f1.write(jname + ',' + str(x2d) + ',' + str(y2d) + ',' + str(x) +
                     ',' + str(y) + ',' + str(z) + '\n')
        # add the torso position
        x2d = np.min([int(self.y[0]), 367])
        y2d = np.min([int(self.x[0]), 490])
        depth_val = depthToTest[x2d, y2d, 0]
        z = (.4) / (20.0) * (depth_val - 60.0) + 2.7
        if np.abs(z - self.openni_values[jname]['z']) > self.depth_thresh:
            z = self.openni_values[jname]['z']
        x = (y2d - cx) * z / fx
        y = (x2d - cy) * z / fy
        f1.write('torso' + ',' + str(y2d) + ',' + str(x2d) + ',' + str(x) +
                 ',' + str(y) + ',' + str(z) + '\n')
        f1.close()
class TrajectoryOccurrenceFrequencies(object):

    def __init__(
        self, soma_map, soma_config, minute_interval=1,
        window_interval=10, periodic_type="weekly"
    ):
        """
            Initialize a set of trajectory occurrence frequency classified by regions, days, hours, minute intervals respectively.
            Hours will be set by default to [0-23]. Days are set as days of a week [0-6] where Monday is 0.
            Argument periodic_type can be set either 'weekly' or 'monthly'.
        """
        self.soma = soma_map
        self.soma_config = soma_config
        self.periodic_type = periodic_type
        if self.periodic_type == "weekly":
            self.periodic_days = [i for i in range(7)]
        else:
            self.periodic_days = [i for i in range(31)]
        self.minute_interval = minute_interval
        self.window_interval = window_interval
        self.ms = MessageStoreProxy(collection="occurrence_rates")
        self.reinit()

    def reinit(self):
        self.tof = dict()

    def load_tof(self):
        """
            Load trajectory occurrence frequency from mongodb occurrence_rates collection.
        """
        rospy.loginfo("Retrieving trajectory occurrence frequencies from database...")
        query = {
            "soma": self.soma, "soma_config": self.soma_config,
            "periodic_type": self.periodic_type
        }
        logs = self.ms.query(OccurrenceRate._type, message_query=query)
        if len(logs) == 0:
            rospy.logwarn(
                "No data for %s with config %s and periodicity type %s" % (
                    self.soma, self.soma_config, self.periodic_type
                )
            )
            return

        for i in logs:
            same_hour = (i[0].end_hour == i[0].start_hour)
            within_interval = (i[0].end_hour == (i[0].start_hour+1) % 24) and (i[0].end_minute - i[0].start_minute) % 60 == self.window_interval
            if same_hour or within_interval:
                if i[0].region_id not in self.tof:
                    self.init_region_tof(i[0].region_id)
                key = "%s-%s" % (
                    datetime.time(i[0].start_hour, i[0].start_minute).isoformat()[:-3],
                    datetime.time(i[0].end_hour, i[0].end_minute).isoformat()[:-3]
                )
                if key in self.tof[i[0].region_id][i[0].day]:
                    self.tof[i[0].region_id][i[0].day][key].occurrence_shape = i[0].occurrence_shape
                    self.tof[i[0].region_id][i[0].day][key].occurrence_scale = i[0].occurrence_scale
                    self.tof[i[0].region_id][i[0].day][key].set_occurrence_rate(i[0].occurrence_rate)
        rospy.loginfo("Retrieving is complete...")

    def update_tof_daily(self, curr_day_data, prev_day_data, curr_date):
        """
            Update trajectory occurrence frequency for one day. Updating the current day,
            tof needs information regarding the number of trajectory from the previous day as well.
            The form for curr_day_data and prev_day_data is {reg{date[hour{minute}]}}.
        """
        rospy.loginfo("Daily update for trajectory occurrence frequency...")
        for reg, hourly_traj in curr_day_data.iteritems():
            date = curr_date.day
            if self.periodic_type == "weekly":
                date = curr_date.weekday()
            prev_day_n_min_traj = previous_n_minutes_trajs(
                prev_day_data[reg], self.window_interval, self.minute_interval
            )
            self._update_tof(reg, date, hourly_traj, prev_day_n_min_traj)
        rospy.loginfo("Daily update is complete...")

    def _update_tof(self, reg, date, hourly_traj, prev_n_min_traj):
        length = (self.window_interval / self.minute_interval)
        temp_data = prev_n_min_traj + [-1]
        pointer = length - 1
        for hour, mins_traj in enumerate(hourly_traj):
            minutes = sorted(mins_traj)
            for mins in minutes:
                traj = mins_traj[mins]
                temp_data[pointer % length] = traj
                pointer += 1
                if reg not in self.tof:
                    self.init_region_tof(reg)
                if sum(temp_data) == (-1 * length):
                    continue
                else:
                    total_traj = length / float(length + sum([i for i in temp_data if i == -1]))
                    total_traj = math.ceil(total_traj * sum([i for i in temp_data if i != -1]))
                temp = [
                    hour + (mins - self.window_interval) / 60,
                    (mins - self.window_interval) % 60
                ]
                hour = (hour + (mins/60)) % 24
                key = "%s-%s" % (
                    datetime.time(temp[0] % 24, temp[1]).isoformat()[:-3],
                    datetime.time(hour, mins % 60).isoformat()[:-3]
                )
                self.tof[reg][date][key].update_lambda([total_traj])

    def init_region_tof(self, reg):
        """
            Initialize trajectory occurrence frequency for one whole region.
            {region: daily_tof}
        """
        daily_tof = dict()
        for j in self.periodic_days:
            hourly_tof = dict()
            for i in range(24 * (60 / self.minute_interval) + 1):
                hour = i / (60 / self.minute_interval)
                minute = (self.minute_interval * i) % 60
                temp = [
                    hour + (minute - self.window_interval) / 60,
                    (minute - self.window_interval) % 60
                ]
                key = "%s-%s" % (
                    datetime.time(temp[0] % 24, temp[1]).isoformat()[:-3],
                    datetime.time(hour % 24, minute).isoformat()[:-3]
                )
                hourly_tof.update(
                    # {key: Lambda(self.window_interval / float(60))}
                    {key: Lambda()}
                )

            daily_tof.update({j: hourly_tof})
        self.tof.update({reg: daily_tof})

    def store_tof(self):
        """
            Store self.tof into mongodb in occurrence_rates collection
        """
        rospy.loginfo("Storing to database...")
        for reg, daily_tof in self.tof.iteritems():
            for day, hourly_tof in daily_tof.iteritems():
                for window_time, lmbd in hourly_tof.iteritems():
                        self._store_tof(reg, day, window_time, lmbd)
        rospy.loginfo("Storing is complete...")

    # helper function of store_tof
    def _store_tof(self, reg, day, window_time, lmbd):
        start_time, end_time = string.split(window_time, "-")
        start_hour, start_min = string.split(start_time, ":")
        end_hour, end_min = string.split(end_time, ":")
        occu_msg = OccurrenceRate(
            self.soma, self.soma_config, reg.encode("ascii"), day,
            int(start_hour), int(end_hour), int(start_min), int(end_min),
            lmbd.occurrence_shape, lmbd.occurrence_scale, lmbd.get_occurrence_rate(), self.periodic_type
        )
        query = {
            "soma": self.soma, "soma_config": self.soma_config,
            "region_id": reg, "day": day, "start_hour": int(start_hour),
            "end_hour": int(end_hour), "start_minute": int(start_min), "end_minute": int(end_min),
            "periodic_type": self.periodic_type
        }
        # as we use MAP, then the posterior probability mode (gamma mode) is the
        # one we save. However, if gamma map is less than default (initial value) or -1
        # (result from an update to gamma where occurrence_shape < 1), we decide to ignore
        # them.
        # temp = Lambda(self.window_interval / float(60))
        temp = Lambda()
        if lmbd.get_occurrence_rate() > temp.get_occurrence_rate():
            if len(self.ms.query(OccurrenceRate._type, message_query=query)) > 0:
                self.ms.update(occu_msg, message_query=query)
            else:
                self.ms.insert(occu_msg)
class RegionObservationTimeManager(object):

    def __init__(self, soma_map, soma_config):
        self.soma_map = soma_map
        self.soma_config = soma_config
        self.ms = MessageStoreProxy(collection="region_observation_time")
        self.gs = GeoSpatialStoreProxy('geospatial_store', 'soma')
        self.roslog = pymongo.MongoClient(
            rospy.get_param("mongodb_host", "localhost"),
            rospy.get_param("mongodb_port", 62345)
        ).roslog.robot_pose
        self.reinit()

    def reinit(self):
        """
            Reinitialising region_observation_duration to an empty dictionary
        """
        self.region_observation_duration = dict()
        self._month_year_observation_taken = dict()

    def load_from_mongo(self, days, minute_interval=60):
        """
            Load robot-region observation time from database (mongodb) and store them in
            self.region_observation_duration.
            Returning (region observation time, total duration of observation)
        """
        roi_region_daily = dict()
        total_duration = 0
        self.minute_interval = minute_interval
        for i in days:
            end_date = i + datetime.timedelta(hours=24)
            rospy.loginfo(
                "Load region observation time from %s to %s..." % (str(i), str(end_date))
            )
            query = {
                "soma": self.soma_map, "soma_config": self.soma_config,
                "start_from.secs": {
                    "$gte": time.mktime(i.timetuple()),
                    "$lt": time.mktime(end_date.timetuple())
                }
            }
            roi_reg_hourly = dict()
            for log in self.ms.query(RegionObservationTime._type, message_query=query):
                end = datetime.datetime.fromtimestamp(log[0].until.secs)
                if log[0].region_id not in roi_reg_hourly:
                    temp = list()
                    start = datetime.datetime.fromtimestamp(log[0].start_from.secs)
                    interval = (end.minute + 1) - start.minute
                    if interval != minute_interval:
                        continue
                    for j in range(24):
                        group_mins = {k*interval: 0 for k in range(1, (60/interval)+1)}
                        temp.append(group_mins)
                        roi_reg_hourly.update({log[0].region_id: temp})
                roi_reg_hourly[log[0].region_id][end.hour][end.minute+1] = log[0].duration.secs
                roi_reg_hourly[log[0].region_id][end.hour][end.minute+1] += 0.000000001 * log[0].duration.nsecs
                total_duration += log[0].duration.secs
            roi_region_daily.update({i.day: roi_reg_hourly})
        self.region_observation_duration = roi_region_daily
        return roi_region_daily, total_duration

    def store_to_mongo(self):
        """
            Store region observation time from self.region_observation_duration to mongodb.
            It will store soma map, soma configuration, region_id, the starting and end time where
            robot sees a region in some interval, and the duration of how long the robot during
            the interval.
        """
        rospy.loginfo("Storing region observation time to region_observation_time collection...")
        data = self.region_observation_duration
        try:
            minute_interval = self.minute_interval
        except:
            rospy.logwarn("Minute interval is not defined, please call either load_from_mongo or calculate_region_observation_duration first")
            return

        for day, daily_data in data.iteritems():
            for reg, reg_data in daily_data.iteritems():
                for hour, hourly_data in enumerate(reg_data):
                    for minute, duration in hourly_data.iteritems():
                        date_until = datetime.datetime(
                            self._month_year_observation_taken[day][1],
                            self._month_year_observation_taken[day][0],
                            day, hour, minute-1,
                            59
                        )
                        until = time.mktime(date_until.timetuple())
                        start_from = until - (60 * minute_interval) + 1
                        msg = RegionObservationTime(
                            self.soma_map, self.soma_config, reg,
                            rospy.Time(start_from), rospy.Time(until),
                            rospy.Duration(duration)
                        )
                        self._store_to_mongo(msg)

    def _store_to_mongo(self, msg):
        query = {
            "soma": msg.soma, "soma_config": msg.soma_config,
            "region_id": msg.region_id, "start_from.secs": msg.start_from.secs,
            "until.secs": msg.until.secs
        }
        if msg.duration.nsecs > 0:
            if len(self.ms.query(RegionObservationTime._type, message_query=query)) > 0:
                self.ms.update(msg, message_query=query)
            else:
                self.ms.insert(msg)

    def calculate_region_observation_duration(self, days, minute_interval=60):
        """
            Calculating the region observation duration for particular days, splitted by
            minute_interval.
            Returns the ROIs the robot has monitored at each logged robot pose
            for particular days specified up to minutes interval.
        """
        rospy.loginfo('Calculation region observation duration...')
        roi_region_daily = dict()
        self.minute_interval = minute_interval

        for i in days:
            loaded_roi_reg_day = self.load_from_mongo([i], minute_interval)
            if loaded_roi_reg_day[1] == 0:
                end_date = i + datetime.timedelta(hours=24)
                roi_region_hourly = self._get_robot_region_stay_duration(i, end_date, minute_interval)
                roi_region_daily.update({i.day: roi_region_hourly})
            else:
                roi_region_daily.update({i.day: loaded_roi_reg_day[0][i.day]})
            self._month_year_observation_taken.update({i.day: (i.month, i.year)})
        self.region_observation_duration = roi_region_daily
        return roi_region_daily

    # hidden function for get_robot_region_stay_duration
    def _get_robot_region_stay_duration(self, start_date, end_date, minute_interval=60):
        sampling_rate = 10
        roi_temp_list = dict()
        rospy.loginfo("Querying from " + str(start_date) + " to " + str(end_date))

        query = {
            "_id": {"$exists": "true"},
            "_meta.inserted_at": {"$gte": start_date, "$lt": end_date}
        }

        for i, pose in enumerate(self.roslog.find(query)):
            if i % sampling_rate == 0:
                _, _, yaw = euler_from_quaternion(
                    [0, 0, pose['orientation']['z'], pose['orientation']['w']]
                )
                coords = robot_view_cone(pose['position']['x'], pose['position']['y'], yaw)
                # coords = robot_view_area(pose['position']['x'], pose['position']['y'], yaw)
                langitude_latitude = list()
                for pt in coords:
                    langitude_latitude.append(self.gs.coords_to_lnglat(pt[0], pt[1]))
                langitude_latitude.append(self.gs.coords_to_lnglat(coords[0][0], coords[0][1]))

                for j in self.gs.observed_roi(langitude_latitude, self.soma_map, self.soma_config):
                    region = str(j['soma_roi_id'])
                    hour = pose['_meta']['inserted_at'].time().hour
                    minute = pose['_meta']['inserted_at'].time().minute

                    # Region Knowledge per hour. Bin them by hour and minute_interval.
                    if region not in roi_temp_list:
                        temp = list()
                        for i in range(24):
                            group_mins = {
                                i*minute_interval: 0 for i in range(1, (60/minute_interval)+1)
                            }
                            temp.append(group_mins)
                        roi_temp_list[region] = temp
                    index = ((minute / minute_interval) + 1) * minute_interval
                    roi_temp_list[region][hour][index] += 1

        roi_temp_list = self._normalizing(roi_temp_list, sampling_rate)
        rospy.loginfo("Region observation duration for the query is " + str(roi_temp_list))
        return roi_temp_list

    def _normalizing(self, roi_temp_list, sampling_rate):
        """
            Checking if all robot region relation based on its stay duration is capped
            by minute_interval * 60 (total seconds). If it is not, then normalization is applied
        """
        regions = roi_temp_list.keys()
        _hourly_poses = [0] * 24
        for i in range(24):
            for region in regions:
                _hourly_poses[i] += sum(roi_temp_list[region][i].values())

        normalizing = sum([1 for i in _hourly_poses if i > 3601]) > 0
        max_hourly_poses = max(_hourly_poses)

        for reg, hourly_poses in roi_temp_list.iteritems():
            if normalizing:
                for ind, val in enumerate(hourly_poses):
                    for minute, seconds in val.iteritems():
                        roi_temp_list[reg][ind][minute] = 3600 / float(max_hourly_poses) * seconds
        return roi_temp_list
class DetectionImageAnnotator(object):
    def __init__(self,
                 config,
                 path="/localhome/%s/Pictures" % getpass.getuser()):
        self.path = path
        self.regions, self.map = get_soma_info(config)
        self.topo_map = None
        self._get_waypoints()
        self.topo_map = {
            wp.name:
            Polygon([[wp.pose.position.x + i.x, wp.pose.position.y + i.y]
                     for i in wp.verts])
            for wp in self.topo_map.nodes
        }
        self._stop = False
        self._img = Image()
        self.activity = {
            roi: {wp: {
                "Present": 0,
                "Absent": 0
            }
                  for wp in self.topo_map}
            for roi in self.regions.keys()
        }
        self.ubd = {
            roi:
            {wp: {
                "TP": 0,
                "FN": 0,
                "FP": 0,
                "TN": 0
            }
             for wp in self.topo_map}
            for roi in self.regions.keys()
        }
        self.cd = {
            roi:
            {wp: {
                "TP": 0,
                "FN": 0,
                "FP": 0,
                "TN": 0
            }
             for wp in self.topo_map}
            for roi in self.regions.keys()
        }
        self.leg = {
            roi:
            {wp: {
                "TP": 0,
                "FN": 0,
                "FP": 0,
                "TN": 0
            }
             for wp in self.topo_map}
            for roi in self.regions.keys()
        }
        self.wp_history = dict()
        self.roi_activity = {roi: list() for roi in self.regions.keys()}
        self.roi_non_activity = {roi: list() for roi in self.regions.keys()}
        self.roi_cd = {roi: list() for roi in self.regions.keys()}
        # self.roi_non_cd = {roi: list() for roi in self.regions.keys()}
        self.roi_ubd = {roi: list() for roi in self.regions.keys()}
        # self.roi_non_ubd = {roi: list() for roi in self.regions.keys()}
        self.roi_leg = {roi: list() for roi in self.regions.keys()}
        # self.roi_non_leg = {roi: list() for roi in self.regions.keys()}
        self._db = MessageStoreProxy(collection="ubd_scene_log")
        self._db_store = MessageStoreProxy(collection="ubd_scene_accuracy")
        self._db_change = MessageStoreProxy(collection="change_detection")
        self._db_ubd = pymongo.MongoClient(
            rospy.get_param("mongodb_host", "localhost"),
            rospy.get_param("mongodb_port", 62345)).message_store.upper_bodies
        self._trajvis = TrajectoryVisualisation(
            rospy.get_name() + "/leg",
            {"start_time.secs": rospy.Time.now().secs})
        # visualisation stuff
        self._cd = list()
        self._cd_len = 0
        self._ubd = list()
        self._ubd_len = 0
        self._robot_pos = list()
        self._robot_pos_len = 0
        self._pub = rospy.Publisher(rospy.get_name(), Image, queue_size=10)
        self._pub_cd_marker = rospy.Publisher(rospy.get_name() + "/cd_marker",
                                              MarkerArray,
                                              queue_size=10)
        self._pub_ubd_marker = rospy.Publisher(rospy.get_name() +
                                               "/ubd_marker",
                                               MarkerArray,
                                               queue_size=10)
        self._pub_robot_marker = rospy.Publisher(rospy.get_name() +
                                                 "/robot_marker",
                                                 MarkerArray,
                                                 queue_size=10)
        self.load_accuracy()
        rospy.Timer(rospy.Duration(0.1), self.pub_img)

    def _topo_map_cb(self, topo_map):
        self.topo_map = topo_map

    def _get_waypoints(self):
        topo_sub = rospy.Subscriber("/topological_map", TopologicalMap,
                                    self._topo_map_cb, None, 10)
        rospy.loginfo("Getting information from /topological_map...")
        while self.topo_map is None:
            rospy.sleep(0.1)
        topo_sub.unregister()

    def load_accuracy(self):
        logs = self._db_store.query(UbdSceneAccuracy._type,
                                    message_query={"map": self.map},
                                    sort_query=[("header.stamp.secs", -1)],
                                    limit=len(self.regions.keys()) *
                                    len(self.topo_map))
        used_rois = list()
        for log in logs:
            log = log[0]
            if (log.region_id, log.region_config) in used_rois:
                continue
            self.activity[log.region_id][
                log.region_config]["Present"] = log.activity_present
            self.activity[log.region_id][
                log.region_config]["Absent"] = log.activity_absent
            self.ubd[log.region_id][log.region_config]["TP"] = log.ubd_tp
            self.ubd[log.region_id][log.region_config]["FN"] = log.ubd_fn
            self.ubd[log.region_id][log.region_config]["FP"] = log.ubd_fp
            self.ubd[log.region_id][log.region_config]["TN"] = log.ubd_tn
            self.cd[log.region_id][log.region_config]["TP"] = log.cd_tp
            self.cd[log.region_id][log.region_config]["FN"] = log.cd_fn
            self.cd[log.region_id][log.region_config]["FP"] = log.cd_fp
            self.cd[log.region_id][log.region_config]["TN"] = log.cd_tn
            self.leg[log.region_id][log.region_config]["TP"] = log.leg_tp
            self.leg[log.region_id][log.region_config]["FN"] = log.leg_fn
            self.leg[log.region_id][log.region_config]["FP"] = log.leg_fp
            self.leg[log.region_id][log.region_config]["TN"] = log.leg_tn
            used_rois.append((log.region_id, log.region_config))
        # rospy.loginfo("Loading...")
        # rospy.loginfo("Activity: %s" % str(self.activity))
        # rospy.loginfo("UBD: %s" % str(self.ubd))
        # rospy.loginfo("Scene: %s" % str(self.cd))
        # rospy.loginfo("Leg: %s" % str(self.leg))
        try:
            self.roi_activity = yaml.load(
                open("%s/roi_activity.yaml" % self.path, "r"))
            self.roi_non_activity = yaml.load(
                open("%s/roi_non_activity.yaml" % self.path, "r"))
            self.roi_cd = yaml.load(open("%s/roi_cd.yaml" % self.path, "r"))
            # self.roi_non_cd = yaml.load(open("%s/roi_non_cd.yaml" % self.path, "r"))
            self.roi_ubd = yaml.load(open("%s/roi_ubd.yaml" % self.path, "r"))
            # self.roi_non_ubd = yaml.load(open("%s/roi_non_ubd.yaml" % self.path, "r"))
            self.roi_leg = yaml.load(open("%s/roi_leg.yaml" % self.path, "r"))
            # self.roi_non_leg = yaml.load(open("%s/roi_non_leg.yaml" % self.path, "r"))
            self.wp_history = yaml.load(
                open("%s/wp_history.yaml" % self.path, "r"))
        except IOError:
            self.roi_activity = {roi: list() for roi in self.regions.keys()}
            self.roi_non_activity = {
                roi: list()
                for roi in self.regions.keys()
            }
            self.roi_cd = {roi: list() for roi in self.regions.keys()}
            # self.roi_non_cd = {roi: list() for roi in self.regions.keys()}
            self.roi_ubd = {roi: list() for roi in self.regions.keys()}
            # self.roi_non_ubd = {roi: list() for roi in self.regions.keys()}
            self.roi_leg = {roi: list() for roi in self.regions.keys()}
            # self.roi_non_leg = {roi: list() for roi in self.regions.keys()}
            self.wp_history = dict()

    def save_accuracy(self):
        header = Header(1, rospy.Time.now(), "")
        rospy.loginfo("Storing...")
        # rospy.loginfo("Activity: %s" % str(self.activity))
        # rospy.loginfo("UBD: %s" % str(self.ubd))
        # rospy.loginfo("Scene: %s" % str(self.cd))
        # rospy.loginfo("Leg: %s" % str(self.leg))
        for roi in self.regions.keys():
            for wp in self.topo_map:
                log = UbdSceneAccuracy(
                    header, self.activity[roi][wp]["Present"],
                    self.activity[roi][wp]["Absent"], self.ubd[roi][wp]["TP"],
                    self.ubd[roi][wp]["FN"], self.ubd[roi][wp]["FP"],
                    self.ubd[roi][wp]["TN"], self.cd[roi][wp]["TP"],
                    self.cd[roi][wp]["FN"], self.cd[roi][wp]["FP"],
                    self.cd[roi][wp]["TN"], self.leg[roi][wp]["TP"],
                    self.leg[roi][wp]["FN"], self.leg[roi][wp]["FP"],
                    self.leg[roi][wp]["TN"], roi, wp, self.map)
                self._db_store.insert(log)
        with open("%s/roi_activity.yaml" % self.path, 'w') as f:
            f.write(yaml.dump(self.roi_activity))
        with open("%s/roi_non_activity.yaml" % self.path, 'w') as f:
            f.write(yaml.dump(self.roi_non_activity))
        with open("%s/roi_cd.yaml" % self.path, 'w') as f:
            f.write(yaml.dump(self.roi_cd))
        with open("%s/roi_ubd.yaml" % self.path, 'w') as f:
            f.write(yaml.dump(self.roi_ubd))
        with open("%s/roi_leg.yaml" % self.path, 'w') as f:
            f.write(yaml.dump(self.roi_leg))
        with open("%s/wp_history.yaml" % self.path, 'w') as f:
            f.write(yaml.dump(self.wp_history))

    def pub_img(self, timer_event):
        self._pub.publish(self._img)
        cd_markers = self._draw_detections(self._cd, "cd", self._cd_len)
        self._pub_cd_marker.publish(cd_markers)
        self._cd_len = len(self._cd)
        ubd_markers = self._draw_detections(self._ubd, "ubd", self._ubd_len)
        self._pub_ubd_marker.publish(ubd_markers)
        self._ubd_len = len(self._ubd)
        robot_markers = self._draw_detections(self._robot_pos, "robot",
                                              self._robot_pos_len)
        self._pub_robot_marker.publish(robot_markers)
        self._robot_pos_len = len(self._robot_pos)

    def _draw_detections(self, centroids, type="cd", length=0):
        markers = MarkerArray()
        for ind, centroid in enumerate(centroids):
            marker = Marker()
            marker.header.frame_id = "/map"
            marker.header.stamp = rospy.Time.now()
            marker.ns = rospy.get_name() + "_marker"
            marker.action = Marker.ADD
            marker.pose.position = centroid
            marker.pose.orientation.w = 1.0
            marker.id = ind
            if type == "cd":
                marker.type = Marker.SPHERE
                marker.color.b = 1.0
            elif type == "ubd":
                marker.type = Marker.CUBE
                marker.color.g = 1.0
            else:
                marker.type = Marker.CYLINDER
                marker.color.r = 1.0
            marker.scale.x = 0.2
            marker.scale.y = 0.2
            marker.scale.z = 0.2
            marker.color.a = 1.0
            markers.markers.append(marker)
        if len(centroids) == 0:
            for ind in range(length):
                marker = Marker()
                marker.header.frame_id = "/map"
                marker.header.stamp = rospy.Time.now()
                marker.ns = rospy.get_name() + "_marker"
                marker.action = Marker.DELETE
                marker.id = ind
                if type == "cd":
                    marker.type = Marker.SPHERE
                else:
                    marker.type = Marker.CUBE
                marker.color.a = 0.0
                markers.markers.append(marker)
        return markers

    def _ubd_annotation(self,
                        log,
                        activity_rois=list(),
                        nonactivity_rois=list(),
                        wp=""):
        ubd_rois = list()
        timestamp = log[0].header.stamp
        for centroid in self._ubd:
            point = create_line_string([centroid.x, centroid.y])
            for roi, region in self.regions.iteritems():
                if is_intersected(region, point):
                    ubd_rois.append(roi)
                    self.roi_ubd[roi].append((timestamp.secs / 60) * 60)
        for roi in activity_rois:
            if roi in ubd_rois:
                self.ubd[roi][wp]["TP"] += 1
            else:
                self.ubd[roi][wp]["FN"] += 1
        for roi in nonactivity_rois:
            if roi in ubd_rois:
                self.ubd[roi][wp]["FP"] += 1
            else:
                self.ubd[roi][wp]["TN"] += 1

    def _cd_annotation(self,
                       log,
                       activity_rois=list(),
                       nonactivity_rois=list(),
                       wp=""):
        cd_rois = list()
        timestamp = log[0].header.stamp
        for centroid in self._cd:
            point = create_line_string([centroid.x, centroid.y])
            for roi, region in self.regions.iteritems():
                if is_intersected(region, point):
                    cd_rois.append(roi)
                    self.roi_cd[roi].append((timestamp.secs / 60) * 60)
        for roi in activity_rois:
            if roi in cd_rois:
                self.cd[roi][wp]["TP"] += 1
            else:
                self.cd[roi][wp]["FN"] += 1
        for roi in nonactivity_rois:
            if roi in cd_rois:
                self.cd[roi][wp]["FP"] += 1
            else:
                self.cd[roi][wp]["TN"] += 1

    def _leg_vis(self, log):
        start_time = (log[0].header.stamp.secs / 60) * 60
        end_time = start_time + 60
        query = {
            "$or": [{
                "end_time.secs": {
                    "$gte": start_time,
                    "$lt": end_time
                }
            }, {
                "start_time.secs": {
                    "$gte": start_time,
                    "$lt": end_time
                }
            }, {
                "start_time.secs": {
                    "$lt": start_time
                },
                "end_time.secs": {
                    "$gte": end_time
                }
            }]
        }
        self._trajvis.update_query(query=query)
        for traj in self._trajvis.trajs.traj.itervalues():
            self._trajvis.visualize_trajectory(traj)

    def _leg_annotation(self,
                        log,
                        activity_rois=list(),
                        nonactivity_rois=list(),
                        wp=""):
        leg_rois = list()
        timestamp = log[0].header.stamp
        for traj in self._trajvis.trajs.traj.itervalues():
            trajectory = traj.get_trajectory_message()
            points = [[pose.pose.position.x, pose.pose.position.y]
                      for pose in trajectory.trajectory]
            points = create_line_string(points)
            for roi, region in self.regions.iteritems():
                if is_intersected(region, points):
                    leg_rois.append(roi)
                    self.roi_leg[roi].append((timestamp.secs / 60) * 60)
        for roi in activity_rois:
            if roi in leg_rois:
                self.leg[roi][wp]["TP"] += 1
            else:
                self.leg[roi][wp]["FN"] += 1
        for roi in nonactivity_rois:
            if roi in leg_rois:
                self.leg[roi][wp]["FP"] += 1
            else:
                self.leg[roi][wp]["TN"] += 1

    def _activity_annotation(self, log):
        act_rois = list()
        rospy.logwarn(
            "Please wait until the new image appears before answering...")
        rospy.logwarn("All questions are based on the image topic %s" %
                      rospy.get_name())
        timestamp = log[0].header.stamp
        datestamp = datetime.datetime.fromtimestamp(timestamp.secs)
        wp = list()
        for pose in self._robot_pos:
            point = create_line_string([pose.x, pose.y])
            for wp_name, wp_region in self.topo_map.iteritems():
                if is_intersected(wp_region, point):
                    wp.append(wp_name)
        if wp == list():
            inpt = ""
            while inpt not in self.topo_map.keys():
                text = "Manual, From which region did the robot observed around %s? \n" % str(
                    datestamp)
                text += "Please select from %s:" % str(self.topo_map.keys())
                inpt = raw_input(text)
            wp = inpt
        elif len(list(set(wp))) > 1:
            inpt = ""
            while inpt not in self.topo_map.keys():
                text = "From which region did the robot observed around %s? \n" % str(
                    datestamp)
                text += "Please select from %s:" % str(list(set(wp)))
                inpt = raw_input(text)
            wp = inpt
        else:
            wp = wp[0]
        self.wp_history.update({(timestamp.secs / 60) * 60: wp})
        # listing all regions which activity happened from the image
        text = "Which regions did the activity happen within a minute around %s? \n" % str(
            datestamp)
        text += "Please select from %s, " % str(self.regions.keys())
        text += "and write in the following format '[reg_1,...,reg_n]'\n"
        text += "(Press ENTER if no activity is observed): "
        inpt = raw_input(text)
        inpt = inpt.replace(" ", "")
        inpt = inpt.replace("[", "")
        inpt = inpt.replace("]", "")
        act_rois = inpt.split(",")
        if '' in act_rois and len(act_rois) == 1:
            act_rois = list()
        for roi in act_rois:
            self.activity[roi][wp]["Present"] += 1
            self.roi_activity[roi].append((timestamp.secs / 60) * 60)
        # listing all regions which no activity happening from the image
        text = "Which regions with no activity within a minute around %s? \n" % str(
            datestamp)
        text += "Please select from %s, " % str(self.regions.keys())
        text += "and write in the following format '[reg_1,...,reg_n]'\n"
        text += "(Press ENTER if all regions had an activity): "
        inpt = raw_input(text)
        inpt = inpt.replace(" ", "")
        inpt = inpt.replace("[", "")
        inpt = inpt.replace("]", "")
        nact_rois = inpt.split(",")
        if '' in nact_rois and len(nact_rois) == 1:
            nact_rois = list()
        for roi in nact_rois:
            self.activity[roi][wp]["Absent"] += 1
            self.roi_non_activity[roi].append((timestamp.secs / 60) * 60)
        return act_rois, nact_rois, wp

    def get_cd_pos(self, stamp):
        robot = list()
        detections = list()
        start_time = (stamp.secs / 60) * 60
        end_time = start_time + 60
        logs = self._db_change.query(
            ChangeDetectionMsg._type,
            {"header.stamp.secs": {
                "$gte": start_time,
                "$lt": end_time
            }})
        for log in logs:
            detections.extend(log[0].object_centroids)
            robot.append(log[0].robot_pose.position)
        return detections, robot

    def get_ubd_pos(self, stamp):
        robot = list()
        detections = list()
        start_time = (stamp.secs / 60) * 60
        end_time = start_time + 60
        query = {
            "header.stamp.secs": {
                "$gte": start_time,
                "$lt": end_time
            },
            "$where": "this.ubd_pos.length > 0"
        }
        project = {
            "header.stamp.secs": 1,
            "ubd_pos": 1,
            "robot.position.x": 1,
            "robot.position.y": 1,
            "robot.position.z": 1
        }
        # logs = self._db.query(
        logs = self._db_ubd.find(query, project).sort("header.stamp.secs",
                                                      pymongo.ASCENDING)
        for log in logs:
            temp = list()
            for i in log["ubd_pos"]:
                temp.append(Point(x=i["x"], y=i["y"], z=i["z"]))
            detections.extend(temp)
            robot.append(
                Point(x=log["robot"]["position"]["x"],
                      y=log["robot"]["position"]["y"],
                      z=log["robot"]["position"]["z"]))
        return detections, robot

    def annotate(self):
        while not rospy.is_shutdown() and not self._stop:
            logs = self._db.query(UbdSceneImgLog._type, {"annotated": False},
                                  limit=30,
                                  sort_query=[("header.stamp.secs", 1)])
            rospy.loginfo("Getting %d logs from ubd_scene_log collection" %
                          len(logs))
            # projection_query={"robot_data": 0, "skeleton_data": 0}
            for log in logs:
                self._img = log[0].image
                self._ubd, ubd_robot = self.get_ubd_pos(log[0].header.stamp)
                self._cd, cd_robot = self.get_cd_pos(log[0].header.stamp)
                self._robot_pos = ubd_robot + cd_robot
                # self._ubd = log[0].ubd_pos
                # self._cd = log[0].cd_pos
                self._leg_vis(log)
                # annotation part
                act_rois, nact_rois, wp = self._activity_annotation(log)
                self._ubd_annotation(log, act_rois, nact_rois, wp)
                self._cd_annotation(log, act_rois, nact_rois, wp)
                self._leg_annotation(log, act_rois, nact_rois, wp)
                # resetting
                self._cd = list()
                self._ubd = list()
                rospy.sleep(1)
            inpt = raw_input("Stop now? [1/0]")
            try:
                inpt = int(inpt)
            except:
                self.save_accuracy()
                self._stop = True
            if inpt:
                self.save_accuracy()
                self._stop = True
            for log in logs:
                log[0].annotated = True
                self._db.update(log[0],
                                message_query={
                                    "header.stamp.secs":
                                    log[0].header.stamp.secs
                                })
        rospy.loginfo("Thanks!")