Esempio n. 1
0
 def start_offline_dashboard(sid, filepath):
     num_files = len(os.listdir(os.path.join(filepath, "rgb")))
     sio.emit("handleMaxFrames",
              num_files - 1)  # Minus 1 because filenames start at 00000
Esempio n. 2
0
 def retrain_detector(sid, settings={}):
     inference_json = LP.retrain_detector(settings)
     sio.emit("annotationRetrain", inference_json)
Esempio n. 3
0
 def update_log_settings(sid, new_values):
     self.log_settings["image_resolution"] = new_values["image_resolution"]
     self.log_settings["image_quality"] = new_values["image_quality"]
     sio.emit("image_settings", self.log_settings)
 def send_chat(self, chat):
     """Send chat from agent to player"""
     logging.info("Sending chat: {}".format(chat))
     sio.emit("showAssistantReply", {"agent_reply": "Agent: {}".format(chat)})
     self.memory.add_chat(self.memory.self_memid, chat)
     return self.cagent.send_chat(chat)
Esempio n. 5
0
    def log(self, rgb_depth, detections, humans, old_rgb_depth):
        """Log all relevant data from the perceptual models for the dashboard.

        All the data here is sent to the dashboard using a socketio emit event. This data
        is then used by the dashboard for different debugging visualizations. Add any data
        that you would want to fetch on the dashboard here.

        Args:
            rgb_depth (RGBDepth): the current RGBDepth frame. This frame might be different from
            old_rgb_depth, which is the RGBDepth frame for which SlowPerception has been run
            detections (list[Detections]): list of all detections
            humans (list[Human]): list of all humans detected
            old_rgb_depth (RGBDepth): RGBDepth frame for which detections and humans are being sent.
            This is the frame for which SlowPerception has been run.

        """
        if hasattr(sio, "mock"):
            return

        sio.emit("image_settings", self.log_settings)
        resolution = self.log_settings["image_resolution"]
        quality = self.log_settings["image_quality"]

        serialized_image = rgb_depth.to_struct(resolution, quality)

        if old_rgb_depth is not None:
            serialized_object_image = old_rgb_depth.to_struct(resolution, quality)
        else:
            serialized_object_image = -1

        height, width = rgb_depth.rgb.shape[0], rgb_depth.rgb.shape[1]
        scale = float(resolution) / height
        new_height, new_width = resolution, int(resolution * float(width) / height)

        serialized_objects = [x.to_struct() for x in detections] if detections is not None else []
        serialized_humans = [x.to_struct() for x in humans] if humans is not None else []

        sio.emit("rgb", serialized_image["rgb"])
        sio.emit(
            "depth",
            {
                "depthImg": serialized_image["depth_img"],
                "depthMax": serialized_image["depth_max"],
                "depthMin": serialized_image["depth_min"],
            },
        )

        sio.emit(
            "objects",
            {
                "image": serialized_object_image,
                "objects": serialized_objects,
                "height": new_height,
                "width": new_width,
                "scale": scale,
            },
        )
        sio.emit(
            "humans",
            {
                "image": serialized_object_image,
                "humans": serialized_humans,
                "height": new_height,
                "width": new_width,
                "scale": scale,
            },
        )