def end(self): self._writer.end() # Write out video metadata file with open(self.abs_metadata_filename, "w") as f: json.dump(self._metadata, f) logging.info("Saved metadata file {}".format( self.abs_metadata_filename))
def save_event(self, event): logging.info("Saving event: {}".format(str(event))) try: self._db["events"].insert_one(event) except Exception as e: logging.error("Failed to save event: {}, error: {}" .format(event, e))
async def _timer_job(self): try: await asyncio.sleep(self._interval) self._func(*self._args, **self._kwargs) self._task = asyncio.ensure_future(self._timer_job()) except CancelledError: logging.info("AsyncTimer job has been cancelled.") raise
def on_read(self, params): logging.info("Getting Analyzer information, params: {}".format(params)) if isinstance(params, list): result = dict() for sid in params: result[sid] = self._get_analyzer_status(sid) else: # TODO: check if params is an ObjectID result = self._get_analyzer_status(params) return result
def init_worker(): worker = get_worker() if hasattr(worker, "name") and worker.name.startswith("IO_WORKER"): logging.info("Initializing worker: {}".format(worker.name)) # Initiralize notification service worker.je_notification = Notification(["nats://localhost:4222"]) worker.je_io_loop = asyncio.get_event_loop() # Initialize database service worker.je_database = Database(["mongodb://localhost:27017"], "jager_test") return "OK"
def on_delete(self, params): logging.info("Deleting Analyzer: {}".format(params)) try: # TODO: Need to make sure the allocated resources for # analyzer "sid" also been deleted completely if isinstance(params, list): for sid in params: self._delete_analyzer(sid) else: sid = params self._delete_analyzer(sid) except KeyError: raise RuntimeError("Invalid request foramt")
async def push(self, category, message): logging.info("Pushing notification: ({}: {})".format(category, message)) try: await self._nats.publish(CHANNEL_NAME, json.dumps( {"category": category, "message": message}).encode()) except ErrConnectionClosed: logging.error("Connection closed prematurely.") raise except ErrTimeout: logging.error("Timeout occurred when publishing" " event: {}".format(event)) raise except Exception as e: logging.error("Failed to publish notification: {}".format(e)) raise
def end(self, timestamp=None): self._writer.end() self._metadata[self._event_name]["end"] = float( timestamp if timestamp is not None else time.time()) # TODO: Add error handling for failure of writing to object store. # Write out video file to object store. self._obj_store.save_file_obj(self._video_key, self._tmp_filepath) # Remove the temporary video file. os.remove(self._tmp_filepath) logging.info("Saved video: {}".format(self._video_key)) # Write out video metadata to object store. self._obj_store.save_json_obj(self._metadata_key, self._metadata) logging.info("Saved video metadata: {}".format(self._metadata_key))
def on_update(self, update): logging.info("Updating Analyzer, params: {}".format(update)) try: sid = update["id"] params = update["params"] analyzer = self._analyzers[sid] if "name" in params: analyzer.name = params["name"] if "source" in params: analyzer.source = params["source"] if "pipelines" in params: analyzer.pipelines = params["pipelines"] except KeyError as e: raise RuntimeError("Invalid request format: missing " "field '{}'.".format(e.args[0])) except HotReconfigurationError as e: raise RuntimeError(str(e))
def save_event(event): logging.info("Saving event: {}".format(str(event))) try: result = db_client["jager_test"]["events"].insert_one({ "analyzerId": anal_id, "type": event.name, "timestamp": event.timestamp, "date": datetime.datetime.fromtimestamp(event.timestamp), "content": event.content }) except Exception as e: logging.error("Failed to save event: {}".format(e))
def analyzer_main_func(signal, cluster, anal_id, name, source, pipelines): logging.info("Starts running Analyzer: {}".format(name)) config = get_config()["apps"]["base"] src_reader = VideoStreamReader() try: src_reader.open(source["url"]) except ConnectionError: signal.send("source_down") raise else: video_info = src_reader.get_video_info() try: # TODO: Get the address of scheduler from the configuration # file. dask = Client(cluster.scheduler_address) pipelines = create_pipeline(anal_id, pipelines, video_info["frame_size"]) signal.send("ready") while True: frames = src_reader.read(batch_size=config["read_batch_size"]) motions = vp.detect_motion(frames, config["motion_threshold"]) for p in pipelines: p.run(frames, motions) if signal.poll() and signal.recv() == "stop": break except ConnectionError: logging.error( "Error occurred when trying to connect to source {}".format( source["url"])) # TODO: Should push a notification of this error signal.send("source_down") finally: src_reader.release() for p in pipelines: if hasattr(p, "release"): p.release() dask.close() logging.info("Analyzer terminated: {}".format(name))
def on_create(self, params): logging.info("Creating Analyzer, params: {}".format(params)) try: sid = params["id"] name = params["name"] source = params["source"] pipelines = params["pipelines"] # Create analyzer object self._analyzers[sid] = Analyzer(self._cluster, sid, name, source, pipelines) except KeyError as e: raise RuntimeError("Invalid request format: {}".format(e.args[0])) except ConnectionError: raise RuntimeError("Failed to establish connection to {}".format( source["url"])) except Exception as e: logging.error(e)
def push(self, category, content, timestamp=None): logging.info("Pushing notification: {}: {}".format(category, content)) if timestamp is None: timestamp = time.time() msg = { "timestamp": timestamp, "category": category, "content": content } try: self._nats.publish(CHANNEL_NAME, msg) except ErrConnectionClosed: logging.error("Connection closed prematurely.") raise except ErrTimeout: logging.error("Timeout occurred when publishing" " event: {}".format(event)) raise
def __init__(self, roi, triggers, frame_size, detect_threshold=0.25): try: # Get Dask client self._client = get_client() except ValueError: assert False, ("Should connect to Dask scheduler before" " initializing this object.") self._roi = roi self._roi_polygon = geometry.Polygon(self._roi) self.frame_size = frame_size self.triggers = triggers self.detect_threshold = detect_threshold self._category_index = load_category_index("./coco.labels") self._max_margin = 3 * 15 self._state = IntrusionDetector.STATE_NORMAL logging.info("Created an IntrusionDetector (roi: {}, triggers: {}" ", detect_threshold: {})".format(self._roi, self.triggers, self.detect_threshold))
async def _initialize_nats(self, nats_hosts): options = { "servers": nats_hosts, "io_loop": self._io_loop, "max_reconnect_attempts": 60, "reconnect_time_wait": 2, "disconnected_cb": self._nats_disconnected_cb, "reconnected_cb": self._nats_reconnected_cb, "error_cb": self._nats_error_cb, "closed_cb": self._nats_closed_cb } try: await self._nats.connect(**options) logging.info("NATS connection for Notification is established.") except ErrNoServers as e: logging.error(str(e)) raise except Exception as e: logging.error(str(e)) raise
def __init__(self, anal_id, roi, triggers, frame_size, detect_threshold=0.25): try: # Get Dask client self._client = get_client() except ValueError: raise RuntimeError("Should connect to Dask scheduler before" " initializing this object") # TODO: check_intrusion() should be modified to be abled to process # this roi format self._roi = (roi[0]["x"], roi[0]["y"], roi[1]["x"], roi[1]["y"]) self._triggers = triggers self._frame_size = frame_size self._detect_threshold = detect_threshold self._category_index = load_category_index("./coco.labels") self._state = IntrusionDetector.STATE_NORMAL # TODO: Should construct the options from configuration file. rel_out_dir = os.path.join("intrusion_detection", anal_id) abs_out_dir = os.path.expanduser( os.path.join("~/jagereye_shared", rel_out_dir)) ev_options = { "rel_out_dir": rel_out_dir, "abs_out_dir": abs_out_dir, "frame_size": frame_size, "roi": self._roi, "video_format": "mp4", "fps": 15, "margin": 3 } self._event_video_agent = EventVideoAgent(**ev_options) self._current_writer = None logging.info("Created an IntrusionDetector (roi: {}, triggers: {}" ", detect_threshold: {})".format(self._roi, self._triggers, self._detect_threshold))
def run(self, frames, motions): """Run Intrusion Detection pipeline. Args: frames: A list of raw video frames to be detected. motions: The motion of the input frames. It should be the output of video_proc.detect_motion(). """ detected = self._detector.run(frames, motions) for frame in detected: event = self._output_agent.process(frame) if event is not None: if event.action == EventVideoPolicy.START_RECORDING: timestamp = event.content["timestamp"] thumbnail_key = self._take_snapshot(timestamp, frame) self._output_event(event, thumbnail_key, frame.metadata["labels"]) elif event.action == EventVideoPolicy.STOP_RECORDING: logging.info("End of event video")
def _output(self, catched, motion, frames): event = None ev_frames = EventVideoFrames( frames, gen_metadata(frames, motion, catched, self._state)) if self._state == IntrusionDetector.STATE_NORMAL: self._event_video_agent.append_back_margin_queue(ev_frames) if any(catched): try: timestamp = frames[0].timestamp self._current_writer = self._event_video_agent.create( timestamp) logging.info("Creating event video: {}".format(timestamp)) except RuntimeError as e: logging.error(e) raise self._state = IntrusionDetector.STATE_ALERT_START elif self._state == IntrusionDetector.STATE_ALERT_START: self._current_writer.write(ev_frames, thumbnail=True) event = IntrusionDetectionEvent( timestamp=ev_frames.raw[0].timestamp, content={ "video": self._current_writer.rel_video_filename, "thumbnail": self._current_writer.rel_thumbnail_filename, "metadata": self._current_writer.rel_metadata_filename, "triggered": ev_frames.get_triggered() }) self._state = IntrusionDetector.STATE_ALERTING elif self._state == IntrusionDetector.STATE_ALERTING: if any(catched): self._current_writer.reset_front_margin() try: self._current_writer.write(ev_frames) except EndOfMarginError: logging.info("End of event video") self._event_video_agent.clear_back_margin_queue() self._current_writer = None self._state = IntrusionDetector.STATE_NORMAL else: assert False, "Unknown state: {}".format(self._state) return event
def cleanup(self): logging.info("Destroying Notification service") self._nats.close()
async def _nats_closed(self): logging.info("[NATS] connection is closed")
async def _nats_reconnected_cb(self): logging.info("[NATS] reconnected")
async def _nats_disconnected_cb(self): logging.info("[NATS] disconnected")
def analyzer_main_func(signal, cluster, anal_id, name, source, pipelines): logging.info("Starts running Analyzer: {}".format(name)) executor = ThreadPoolExecutor(max_workers=1) notification = Notification(["nats://localhost:4222"]) try: db_client = setup_db_client() except ConnectionFailure: return def save_event(event): logging.info("Saving event: {}".format(str(event))) try: result = db_client["jager_test"]["events"].insert_one({ "analyzerId": anal_id, "type": event.name, "timestamp": event.timestamp, "date": datetime.datetime.fromtimestamp(event.timestamp), "content": event.content }) except Exception as e: logging.error("Failed to save event: {}".format(e)) try: # TODO: Get the address of scheduler from the configuration # file. dask = Client(cluster.scheduler_address) src_reader = VideoStreamReader() src_reader.open(source["url"]) video_info = src_reader.get_video_info() pipelines = create_pipeline(anal_id, pipelines, video_info["frame_size"]) while True: frames = src_reader.read(batch_size=5) motion = vp.detect_motion(frames) results = [p.run(frames, motion) for p in pipelines] for event in results: if event is not None: notification_msg = { "type": event.name, "analyzerId": anal_id, "name": name } notification_msg.update(event.content) notification.push("Analyzer", notification_msg) executor.submit(save_event, event) if signal.poll() and signal.recv() == "stop": break except ConnectionBrokenError: logging.error( "Error occurred when trying to connect to source {}".format( source["url"])) # TODO: Should push a notifcation of this error signal.send("source_down") finally: src_reader.release() for p in pipelines: p.release() dask.close() executor.shutdown() logging.info("Analyzer terminated: {}".format(name))
def on_stop(self, sid): logging.info("Stopping Analyzer: {}".format(sid)) if sid not in self._analyzers: raise RuntimeError("Analyzer not found") else: self._analyzers[sid].stop()
def cleanup(self): logging.info("Destroying Database service") self._client.close()