def show_params_graph(self): self.viz.close(env="amf_params") print("Start running through the video") frame_length = len(self.frame_list) progress_count = 1 # Run Through Video for frame in self.frame_list: print(f"{int(progress_count/frame_length*100)} %", end="\r") resized_frame = frame_resize(frame.copy()) mog2_mask = self.apply_mog2(resized_frame) fd_mask = self.apply_fd(resized_frame) mog2_mask_metadata = self.amf.calculate_mask_metadata(mog2_mask) fd_mask_metadata = self.amf.calculate_mask_metadata(fd_mask) self.store_params( mog2_mask, fd_mask, mog2_mask_metadata.total, mog2_mask_metadata.avg, mog2_mask_metadata.std, len(mog2_mask_metadata.contours), len(fd_mask_metadata.contours), self.amf.calculate_variance(mog2_mask_metadata.std), ) progress_count += 1 print("Completed running through the video") print("Start Showing AMF Parameters") self.init_viz() self.update_mask_size_graph() while True: time.sleep(1)
def playback(self): index = self.playback_start_frame while self.playback_flag: resized_frame = frame_resize(self.frame_list[index].copy()) mask = self.mask_list[index] motion_bbxes = self.amf.detect_motion(mask, 200) for bbx in motion_bbxes: box = BoundingBox(*bbx) self.amf.draw_detection_box(box, resized_frame) disp_image = frame_convert(resized_frame) self.playback_win = self.viz.image( disp_image, win="playback_window", opts=dict(width=320, height=250, caption=f"{index}"), env="amf_params", ) self.playback_mask_win = self.viz.image( self.mask_list[index], win="playback_mask_window", opts=dict(width=320, height=250, caption=f"{index}"), env="amf_params", ) time.sleep(1.0 / self.frame_fps) index += 1 if index >= len(self.frame_list): index = 0
def stream_video(self): for frame in self.frame_list: resized_frame = frame_resize(frame.copy()) display_frame = self.motion_detection(resized_frame) disp_image = frame_convert(display_frame) self.viz.image( disp_image, win="video_window", opts=dict( width=320, height=250, ), env="amf_stream", ) time.sleep(1.0 / self.frame_fps)
def show_ssc_graph(self): print("Show SSC Graph") ssc_prediction = [] for frame in self.frame_list: resized_frame = frame_resize(frame.copy()) frame_scene = self.predict_frame_scene(resized_frame) ssc_prediction.append(frame_scene) n = len(self.frame_list) x = np.linspace(0, n - 1, num=n) scene_y = np.array(ssc_prediction) self.viz.line( X=x, Y=scene_y, opts=dict(title="ssc prediction graph", showlegend=True), env="amf_ssc", win="ssc_prediction_graph", )
def trigger(context): if context["event_type"] != "PropertyUpdate": return if context["target"] != self.get_frame_panel.panel: return property_name = (context.get("pane_data").get("content")[ context.get("propertyId")].get("name")) if property_name == "Frame Number": self.frame_number = int(context.get("value")) elif property_name == "Bounding Box Threshold": self.viz_config["bounding_box_threshold"] = int( context.get("value")) resized_frame = frame_resize( self.frame_list[self.frame_number].copy()) mog2_disp_mask = self.mask_list[self.frame_number] mog2_motion_bbxs = self.amf.detect_motion( mog2_disp_mask, self.viz_config.get("bounding_box_threshold", 200)) fd_disp_mask = self.fd_mask_list[self.frame_number] fd_motion_bbxs = self.amf.detect_motion( fd_disp_mask, self.viz_config.get("bounding_box_threshold", 200)) for bbx in mog2_motion_bbxs: self.amf.draw_detection_box(BoundingBox(*bbx), resized_frame, (0, 255, 0)) for bbx in fd_motion_bbxs: self.amf.draw_detection_box(BoundingBox(*bbx), resized_frame, (255, 0, 0)) disp_image = frame_convert(resized_frame) self.show_frame_images(disp_image, mog2_disp_mask, fd_disp_mask, self.frame_number) self.get_frame_panel.panel = update("Get Frame Panel")
def split_video_filename(filename: str): return {"jumbo_id": filename.split("_")[0], "filename": filename} video_list = list(map(split_video_filename, video_list)) total_video = len(video_list) for count, video in enumerate(video_list): print(f"{video['filename']}: {int((count+1)/total_video*100)} %", end="\r") if not os.path.exists(f'data/{video["jumbo_id"]}'): os.makedirs(f'data/{video["jumbo_id"]}', 0o777) current_capture, metadata = cap_video(f'{path}{video["filename"]}') amf = AdvancedMotionFilter( ssc_model="model/scene_knn_model", frame_width=metadata["width"], frame_height=metadata["height"], ) i = 0 ret_bool = True while i < metadata["count"] and ret_bool: ret_bool, frame = current_capture.read() resized_frame = video_utils.frame_resize(frame.copy()) mask = amf.apply(resized_frame) Image.fromarray( mask.copy()).save(f'data/{video["jumbo_id"]}/{str(i)}.jpg') i += 1 current_capture.release()