def start_event_processor(self): self.event_processor = EventProcessor( self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event, ) self.event_processor.start()
def main(): # connect to mqtt and setup last will def on_connect(client, userdata, flags, rc): print("On connect called") if rc != 0: if rc == 3: print("MQTT Server unavailable") elif rc == 4: print("MQTT Bad username or password") elif rc == 5: print("MQTT Not authorized") else: print( "Unable to connect to MQTT: Connection refused. Error code: " + str(rc)) # publish a message to signal that the service is running client.publish(MQTT_TOPIC_PREFIX + '/available', 'online', retain=True) client = mqtt.Client(client_id=MQTT_CLIENT_ID) client.on_connect = on_connect client.will_set(MQTT_TOPIC_PREFIX + '/available', payload='offline', qos=1, retain=True) if not MQTT_USER is None: client.username_pw_set(MQTT_USER, password=MQTT_PASS) client.connect(MQTT_HOST, MQTT_PORT, 60) client.loop_start() plasma_process = start_plasma_store() ## # Setup config defaults for cameras ## for name, config in CONFIG['cameras'].items(): config['snapshots'] = { 'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True) } # Queue for cameras to push tracked objects to tracked_objects_queue = mp.SimpleQueue() # Queue for clip processing event_queue = mp.Queue() # Start the shared tflite process tflite_process = EdgeTPUProcess() # start the camera processes camera_processes = {} for name, config in CONFIG['cameras'].items(): # Merge the ffmpeg config with the global config ffmpeg = config.get('ffmpeg', {}) ffmpeg_input = get_ffmpeg_input(ffmpeg['input']) ffmpeg_global_args = ffmpeg.get('global_args', FFMPEG_DEFAULT_CONFIG['global_args']) ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', FFMPEG_DEFAULT_CONFIG['hwaccel_args']) ffmpeg_input_args = ffmpeg.get('input_args', FFMPEG_DEFAULT_CONFIG['input_args']) ffmpeg_output_args = ffmpeg.get('output_args', FFMPEG_DEFAULT_CONFIG['output_args']) if config.get('save_clips', {}).get('enabled', False): ffmpeg_output_args = [ "-f", "segment", "-segment_time", "10", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime", "1", "-c", "copy", "-an", "-map", "0", f"/cache/{name}-%Y%m%d%H%M%S.mp4" ] + ffmpeg_output_args ffmpeg_cmd = (['ffmpeg'] + ffmpeg_global_args + ffmpeg_hwaccel_args + ffmpeg_input_args + ['-i', ffmpeg_input] + ffmpeg_output_args + ['pipe:']) if 'width' in config and 'height' in config: frame_shape = (config['height'], config['width'], 3) else: frame_shape = get_frame_shape(ffmpeg_input) frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2] take_frame = config.get('take_frame', 1) detection_frame = mp.Value('d', 0.0) ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size) frame_queue = mp.SimpleQueue() camera_fps = EventsPerSecond() camera_fps.start() camera_capture = CameraCapture(name, ffmpeg_process, frame_shape, frame_queue, take_frame, camera_fps, detection_frame) camera_capture.start() camera_processes[name] = { 'camera_fps': camera_fps, 'take_frame': take_frame, 'process_fps': mp.Value('d', 0.0), 'detection_fps': mp.Value('d', 0.0), 'detection_frame': detection_frame, 'read_start': mp.Value('d', 0.0), 'ffmpeg_process': ffmpeg_process, 'ffmpeg_cmd': ffmpeg_cmd, 'frame_queue': frame_queue, 'frame_shape': frame_shape, 'capture_thread': camera_capture } camera_process = mp.Process( target=track_camera, args=(name, config, GLOBAL_OBJECT_CONFIG, frame_queue, frame_shape, tflite_process.detection_queue, tracked_objects_queue, camera_processes[name]['process_fps'], camera_processes[name]['detection_fps'], camera_processes[name]['read_start'], camera_processes[name]['detection_frame'])) camera_process.daemon = True camera_processes[name]['process'] = camera_process for name, camera_process in camera_processes.items(): camera_process['process'].start() print( f"Camera_process started for {name}: {camera_process['process'].pid}" ) event_processor = EventProcessor(CONFIG['cameras'], camera_processes, '/cache', '/clips', event_queue) event_processor.start() object_processor = TrackedObjectProcessor(CONFIG['cameras'], client, MQTT_TOPIC_PREFIX, tracked_objects_queue, event_queue) object_processor.start() camera_watchdog = CameraWatchdog(camera_processes, CONFIG['cameras'], tflite_process, tracked_objects_queue, plasma_process) camera_watchdog.start() # create a flask app that encodes frames a mjpeg on demand app = Flask(__name__) log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) @app.route('/') def ishealthy(): # return a healh return "Frigate is running. Alive and healthy!" @app.route('/debug/stack') def processor_stack(): frame = sys._current_frames().get(object_processor.ident, None) if frame: return "<br>".join(traceback.format_stack(frame)), 200 else: return "no frame found", 200 @app.route('/debug/print_stack') def print_stack(): pid = int(request.args.get('pid', 0)) if pid == 0: return "missing pid", 200 else: os.kill(pid, signal.SIGUSR1) return "check logs", 200 @app.route('/debug/stats') def stats(): stats = {} total_detection_fps = 0 for name, camera_stats in camera_processes.items(): total_detection_fps += camera_stats['detection_fps'].value capture_thread = camera_stats['capture_thread'] stats[name] = { 'camera_fps': round(capture_thread.fps.eps(), 2), 'process_fps': round(camera_stats['process_fps'].value, 2), 'skipped_fps': round(capture_thread.skipped_fps.eps(), 2), 'detection_fps': round(camera_stats['detection_fps'].value, 2), 'read_start': camera_stats['read_start'].value, 'pid': camera_stats['process'].pid, 'ffmpeg_pid': camera_stats['ffmpeg_process'].pid, 'frame_info': { 'read': capture_thread.current_frame, 'detect': camera_stats['detection_frame'].value, 'process': object_processor.camera_data[name]['current_frame_time'] } } stats['coral'] = { 'fps': round(total_detection_fps, 2), 'inference_speed': round(tflite_process.avg_inference_speed.value * 1000, 2), 'detection_start': tflite_process.detection_start.value, 'pid': tflite_process.detect_process.pid } rc = camera_watchdog.plasma_process.poll() stats['plasma_store_rc'] = rc return jsonify(stats) @app.route('/<camera_name>/<label>/best.jpg') def best(camera_name, label): if camera_name in CONFIG['cameras']: best_frame = object_processor.get_best(camera_name, label) if best_frame is None: best_frame = np.zeros((720, 1280, 3), np.uint8) best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR) ret, jpg = cv2.imencode('.jpg', best_frame) response = make_response(jpg.tobytes()) response.headers['Content-Type'] = 'image/jpg' return response else: return "Camera named {} not found".format(camera_name), 404 @app.route('/<camera_name>') def mjpeg_feed(camera_name): fps = int(request.args.get('fps', '3')) height = int(request.args.get('h', '360')) if camera_name in CONFIG['cameras']: # return a multipart response return Response( imagestream(camera_name, fps, height), mimetype='multipart/x-mixed-replace; boundary=frame') else: return "Camera named {} not found".format(camera_name), 404 def imagestream(camera_name, fps, height): while True: # max out at specified FPS time.sleep(1 / fps) frame = object_processor.get_current_frame(camera_name) if frame is None: frame = np.zeros((height, int(height * 16 / 9), 3), np.uint8) width = int(height * frame.shape[1] / frame.shape[0]) frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) ret, jpg = cv2.imencode('.jpg', frame) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n') app.run(host='0.0.0.0', port=WEB_PORT, debug=False) object_processor.join() plasma_process.terminate()
def main(): stop_event = threading.Event() # connect to mqtt and setup last will def on_connect(client, userdata, flags, rc): print("On connect called") if rc != 0: if rc == 3: print ("MQTT Server unavailable") elif rc == 4: print ("MQTT Bad username or password") elif rc == 5: print ("MQTT Not authorized") else: print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc)) # publish a message to signal that the service is running client.publish(MQTT_TOPIC_PREFIX+'/available', 'online', retain=True) if MQTT_ENABLED: client = mqtt.Client(client_id=MQTT_CLIENT_ID) client.on_connect = on_connect client.will_set(MQTT_TOPIC_PREFIX+'/available', payload='offline', qos=1, retain=True) if not MQTT_USER is None: client.username_pw_set(MQTT_USER, password=MQTT_PASS) client.connect(MQTT_HOST, MQTT_PORT, 60) client.loop_start() else: client = None ## # Setup config defaults for cameras ## for name, config in CONFIG['cameras'].items(): config['snapshots'] = { 'show_timestamp': config.get('snapshots', {}).get('show_timestamp', True), 'draw_zones': config.get('snapshots', {}).get('draw_zones', False), 'draw_bounding_boxes': config.get('snapshots', {}).get('draw_bounding_boxes', True) } config['zones'] = config.get('zones', {}) # Queue for cameras to push tracked objects to tracked_objects_queue = mp.Queue(maxsize=len(CONFIG['cameras'].keys())*2) # Queue for clip processing event_queue = mp.Queue() # create the detection pipes and shms out_events = {} camera_shms = [] for name in CONFIG['cameras'].keys(): out_events[name] = mp.Event() shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=300*300*3) shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4) camera_shms.append(shm_in) camera_shms.append(shm_out) detection_queue = mp.Queue() detectors = {} for name, detector in DETECTORS.items(): if detector['type'] == 'cpu': detectors[name] = EdgeTPUProcess(detection_queue, out_events=out_events, tf_device='cpu') if detector['type'] == 'edgetpu': detectors[name] = EdgeTPUProcess(detection_queue, out_events=out_events, tf_device=detector['device']) # create the camera processes camera_process_info = {} for name, config in CONFIG['cameras'].items(): # Merge the ffmpeg config with the global config ffmpeg = config.get('ffmpeg', {}) ffmpeg_input = get_ffmpeg_input(ffmpeg['input']) ffmpeg_global_args = ffmpeg.get('global_args', FFMPEG_DEFAULT_CONFIG['global_args']) ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', FFMPEG_DEFAULT_CONFIG['hwaccel_args']) ffmpeg_input_args = ffmpeg.get('input_args', FFMPEG_DEFAULT_CONFIG['input_args']) ffmpeg_output_args = ffmpeg.get('output_args', FFMPEG_DEFAULT_CONFIG['output_args']) if not config.get('fps') is None: ffmpeg_output_args = ["-r", str(config.get('fps'))] + ffmpeg_output_args if config.get('save_clips', {}).get('enabled', False): ffmpeg_output_args = [ "-f", "segment", "-segment_time", "10", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime", "1", "-c", "copy", "-an", "-map", "0", f"{os.path.join(CACHE_DIR, name)}-%Y%m%d%H%M%S.mp4" ] + ffmpeg_output_args ffmpeg_cmd = (['ffmpeg'] + ffmpeg_global_args + ffmpeg_hwaccel_args + ffmpeg_input_args + ['-i', ffmpeg_input] + ffmpeg_output_args + ['pipe:']) config['ffmpeg_cmd'] = ffmpeg_cmd if 'width' in config and 'height' in config: frame_shape = (config['height'], config['width'], 3) else: frame_shape = get_frame_shape(ffmpeg_input) config['frame_shape'] = frame_shape config['take_frame'] = config.get('take_frame', 1) camera_process_info[name] = { 'camera_fps': mp.Value('d', 0.0), 'skipped_fps': mp.Value('d', 0.0), 'process_fps': mp.Value('d', 0.0), 'detection_fps': mp.Value('d', 0.0), 'detection_frame': mp.Value('d', 0.0), 'read_start': mp.Value('d', 0.0), 'ffmpeg_pid': mp.Value('i', 0), 'frame_queue': mp.Queue(maxsize=2) } # merge global object config into camera object config camera_objects_config = config.get('objects', {}) # get objects to track for camera objects_to_track = camera_objects_config.get('track', GLOBAL_OBJECT_CONFIG.get('track', ['person'])) # get object filters object_filters = camera_objects_config.get('filters', GLOBAL_OBJECT_CONFIG.get('filters', {})) config['objects'] = { 'track': objects_to_track, 'filters': object_filters } capture_process = mp.Process(target=capture_camera, args=(name, config, camera_process_info[name], stop_event)) capture_process.daemon = True camera_process_info[name]['capture_process'] = capture_process camera_process = mp.Process(target=track_camera, args=(name, config, detection_queue, out_events[name], tracked_objects_queue, camera_process_info[name], stop_event)) camera_process.daemon = True camera_process_info[name]['process'] = camera_process # start the camera_processes for name, camera_process in camera_process_info.items(): camera_process['capture_process'].start() print(f"Camera capture process started for {name}: {camera_process['capture_process'].pid}") camera_process['process'].start() print(f"Camera process started for {name}: {camera_process['process'].pid}") event_processor = EventProcessor(CONFIG, camera_process_info, CACHE_DIR, CLIPS_DIR, event_queue, stop_event) event_processor.start() object_processor = TrackedObjectProcessor(CONFIG['cameras'], client, MQTT_TOPIC_PREFIX, WEBHOOK_URL, tracked_objects_queue, event_queue, stop_event) object_processor.start() frigate_watchdog = FrigateWatchdog(camera_process_info, CONFIG['cameras'], detectors, detection_queue, out_events, tracked_objects_queue, stop_event) frigate_watchdog.start() def receiveSignal(signalNumber, frame): print('Received:', signalNumber) stop_event.set() event_processor.join() object_processor.join() frigate_watchdog.join() for detector in detectors.values(): detector.stop() for shm in camera_shms: shm.close() shm.unlink() sys.exit() signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGINT, receiveSignal) # create a flask app that encodes frames a mjpeg on demand app = Flask(__name__) log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) @app.route('/') def ishealthy(): # return a healh return "Frigate is running. Alive and healthy!" @app.route('/debug/stack') def processor_stack(): frame = sys._current_frames().get(object_processor.ident, None) if frame: return "<br>".join(traceback.format_stack(frame)), 200 else: return "no frame found", 200 @app.route('/debug/print_stack') def print_stack(): pid = int(request.args.get('pid', 0)) if pid == 0: return "missing pid", 200 else: os.kill(pid, signal.SIGUSR1) return "check logs", 200 @app.route('/debug/stats') def stats(): stats = {} total_detection_fps = 0 for name, camera_stats in camera_process_info.items(): total_detection_fps += camera_stats['detection_fps'].value stats[name] = { 'camera_fps': round(camera_stats['camera_fps'].value, 2), 'process_fps': round(camera_stats['process_fps'].value, 2), 'skipped_fps': round(camera_stats['skipped_fps'].value, 2), 'detection_fps': round(camera_stats['detection_fps'].value, 2), 'pid': camera_stats['process'].pid, 'capture_pid': camera_stats['capture_process'].pid, 'frame_info': { 'detect': camera_stats['detection_frame'].value, 'process': object_processor.camera_data[name]['current_frame_time'] } } stats['detectors'] = {} for name, detector in detectors.items(): stats['detectors'][name] = { 'inference_speed': round(detector.avg_inference_speed.value*1000, 2), 'detection_start': detector.detection_start.value, 'pid': detector.detect_process.pid } stats['detection_fps'] = round(total_detection_fps, 2) return jsonify(stats) @app.route('/<camera_name>/<label>/best.jpg') def best(camera_name, label): if camera_name in CONFIG['cameras']: best_object = object_processor.get_best(camera_name, label) best_frame = best_object.get('frame') if best_frame is None: best_frame = np.zeros((720,1280,3), np.uint8) else: best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420) crop = bool(request.args.get('crop', 0, type=int)) if crop: region = best_object.get('region', [0,0,300,300]) best_frame = best_frame[region[1]:region[3], region[0]:region[2]] height = int(request.args.get('h', str(best_frame.shape[0]))) width = int(height*best_frame.shape[1]/best_frame.shape[0]) best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA) ret, jpg = cv2.imencode('.jpg', best_frame) response = make_response(jpg.tobytes()) response.headers['Content-Type'] = 'image/jpg' return response else: return "Camera named {} not found".format(camera_name), 404 @app.route('/<camera_name>') def mjpeg_feed(camera_name): fps = int(request.args.get('fps', '3')) height = int(request.args.get('h', '360')) if camera_name in CONFIG['cameras']: # return a multipart response return Response(imagestream(camera_name, fps, height), mimetype='multipart/x-mixed-replace; boundary=frame') else: return "Camera named {} not found".format(camera_name), 404 @app.route('/<camera_name>/latest.jpg') def latest_frame(camera_name): if camera_name in CONFIG['cameras']: # max out at specified FPS frame = object_processor.get_current_frame(camera_name) if frame is None: frame = np.zeros((720,1280,3), np.uint8) height = int(request.args.get('h', str(frame.shape[0]))) width = int(height*frame.shape[1]/frame.shape[0]) frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) ret, jpg = cv2.imencode('.jpg', frame) response = make_response(jpg.tobytes()) response.headers['Content-Type'] = 'image/jpg' return response else: return "Camera named {} not found".format(camera_name), 404 def imagestream(camera_name, fps, height): while True: # max out at specified FPS time.sleep(1/fps) frame = object_processor.get_current_frame(camera_name, draw=True) if frame is None: frame = np.zeros((height,int(height*16/9),3), np.uint8) width = int(height*frame.shape[1]/frame.shape[0]) frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR) ret, jpg = cv2.imencode('.jpg', frame) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n') app.run(host='0.0.0.0', port=WEB_PORT, debug=False) object_processor.join()
class FrigateApp(): def __init__(self): self.stop_event = mp.Event() self.config: FrigateConfig = None self.detection_queue = mp.Queue() self.detectors: Dict[str, EdgeTPUProcess] = {} self.detection_out_events: Dict[str, mp.Event] = {} self.detection_shms: List[mp.shared_memory.SharedMemory] = [] self.log_queue = mp.Queue() self.camera_metrics = {} def set_environment_vars(self): for key, value in self.config.environment_vars.items(): os.environ[key] = value def ensure_dirs(self): for d in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]: if not os.path.exists(d) and not os.path.islink(d): logger.info(f"Creating directory: {d}") os.makedirs(d) else: logger.debug(f"Skipping directory: {d}") tmpfs_size = self.config.clips.tmpfs_cache_size if tmpfs_size: logger.info(f"Creating tmpfs of size {tmpfs_size}") rc = os.system( f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}") if rc != 0: logger.error(f"Failed to create tmpfs, error code: {rc}") def init_logger(self): self.log_process = mp.Process(target=log_process, args=(self.log_queue, ), name='log_process') self.log_process.daemon = True self.log_process.start() root_configurer(self.log_queue) def init_config(self): config_file = os.environ.get('CONFIG_FILE', '/config/config.yml') self.config = FrigateConfig(config_file=config_file) for camera_name in self.config.cameras.keys(): # create camera_metrics self.camera_metrics[camera_name] = { 'camera_fps': mp.Value('d', 0.0), 'skipped_fps': mp.Value('d', 0.0), 'process_fps': mp.Value('d', 0.0), 'detection_enabled': mp.Value('i', self.config.cameras[camera_name].detect.enabled), 'detection_fps': mp.Value('d', 0.0), 'detection_frame': mp.Value('d', 0.0), 'read_start': mp.Value('d', 0.0), 'ffmpeg_pid': mp.Value('i', 0), 'frame_queue': mp.Queue(maxsize=2), } def check_config(self): for name, camera in self.config.cameras.items(): assigned_roles = list( set([r for i in camera.ffmpeg.inputs for r in i.roles])) if not camera.clips.enabled and 'clips' in assigned_roles: logger.warning( f"Camera {name} has clips assigned to an input, but clips is not enabled." ) elif camera.clips.enabled and not 'clips' in assigned_roles: logger.warning( f"Camera {name} has clips enabled, but clips is not assigned to an input." ) if not camera.record.enabled and 'record' in assigned_roles: logger.warning( f"Camera {name} has record assigned to an input, but record is not enabled." ) elif camera.record.enabled and not 'record' in assigned_roles: logger.warning( f"Camera {name} has record enabled, but record is not assigned to an input." ) if not camera.rtmp.enabled and 'rtmp' in assigned_roles: logger.warning( f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled." ) elif camera.rtmp.enabled and not 'rtmp' in assigned_roles: logger.warning( f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input." ) def set_log_levels(self): logging.getLogger().setLevel(self.config.logger.default) for log, level in self.config.logger.logs.items(): logging.getLogger(log).setLevel(level) if not 'geventwebsocket.handler' in self.config.logger.logs: logging.getLogger('geventwebsocket.handler').setLevel('ERROR') def init_queues(self): # Queues for clip processing self.event_queue = mp.Queue() self.event_processed_queue = mp.Queue() # Queue for cameras to push tracked objects to self.detected_frames_queue = mp.Queue( maxsize=len(self.config.cameras.keys()) * 2) def init_database(self): migrate_db = SqliteExtDatabase(self.config.database.path) # Run migrations del (logging.getLogger('peewee_migrate').handlers[:]) router = Router(migrate_db) router.run() migrate_db.close() self.db = SqliteQueueDatabase(self.config.database.path) models = [Event] self.db.bind(models) def init_stats(self): self.stats_tracking = stats_init(self.camera_metrics, self.detectors) def init_web_server(self): self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor, self.mqtt_client) def init_mqtt(self): self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics) def start_detectors(self): model_shape = (self.config.model.height, self.config.model.width) for name in self.config.cameras.keys(): self.detection_out_events[name] = mp.Event() try: self.detection_shms.append( mp.shared_memory.SharedMemory( name=name, create=True, size=self.config.model.height * self.config.model.width * 3)) except FileExistsError: self.detection_shms.append( mp.shared_memory.SharedMemory(name=name)) try: self.detection_shms.append( mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20 * 6 * 4)) except FileExistsError: self.detection_shms.append( mp.shared_memory.SharedMemory(name=f"out-{name}")) for name, detector in self.config.detectors.items(): if detector.type == 'cpu': self.detectors[name] = EdgeTPUProcess( name, self.detection_queue, self.detection_out_events, model_shape, 'cpu', detector.num_threads) if detector.type == 'edgetpu': self.detectors[name] = EdgeTPUProcess( name, self.detection_queue, self.detection_out_events, model_shape, detector.device, detector.num_threads) def start_detected_frames_processor(self): self.detected_frames_processor = TrackedObjectProcessor( self.config, self.mqtt_client, self.config.mqtt.topic_prefix, self.detected_frames_queue, self.event_queue, self.event_processed_queue, self.stop_event) self.detected_frames_processor.start() def start_camera_processors(self): model_shape = (self.config.model.height, self.config.model.width) for name, config in self.config.cameras.items(): camera_process = mp.Process( target=track_camera, name=f"camera_processor:{name}", args=(name, config, model_shape, self.detection_queue, self.detection_out_events[name], self.detected_frames_queue, self.camera_metrics[name])) camera_process.daemon = True self.camera_metrics[name]['process'] = camera_process camera_process.start() logger.info( f"Camera processor started for {name}: {camera_process.pid}") def start_camera_capture_processes(self): for name, config in self.config.cameras.items(): capture_process = mp.Process(target=capture_camera, name=f"camera_capture:{name}", args=(name, config, self.camera_metrics[name])) capture_process.daemon = True self.camera_metrics[name]['capture_process'] = capture_process capture_process.start() logger.info( f"Capture process started for {name}: {capture_process.pid}") def start_event_processor(self): self.event_processor = EventProcessor(self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event) self.event_processor.start() def start_event_cleanup(self): self.event_cleanup = EventCleanup(self.config, self.stop_event) self.event_cleanup.start() def start_recording_maintainer(self): self.recording_maintainer = RecordingMaintainer( self.config, self.stop_event) self.recording_maintainer.start() def start_stats_emitter(self): self.stats_emitter = StatsEmitter(self.config, self.stats_tracking, self.mqtt_client, self.config.mqtt.topic_prefix, self.stop_event) self.stats_emitter.start() def start_watchdog(self): self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event) self.frigate_watchdog.start() def start(self): self.init_logger() try: try: self.init_config() except Exception as e: print(f"Error parsing config: {e}") self.log_process.terminate() sys.exit(1) self.set_environment_vars() self.ensure_dirs() self.check_config() self.set_log_levels() self.init_queues() self.init_database() self.init_mqtt() except Exception as e: print(e) self.log_process.terminate() sys.exit(1) self.start_detectors() self.start_detected_frames_processor() self.start_camera_processors() self.start_camera_capture_processes() self.init_stats() self.init_web_server() self.start_event_processor() self.start_event_cleanup() self.start_recording_maintainer() self.start_stats_emitter() self.start_watchdog() # self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id) def receiveSignal(signalNumber, frame): self.stop() sys.exit() signal.signal(signal.SIGTERM, receiveSignal) server = pywsgi.WSGIServer(('127.0.0.1', 5001), self.flask_app, handler_class=WebSocketHandler) server.serve_forever() self.stop() def stop(self): logger.info(f"Stopping...") self.stop_event.set() self.detected_frames_processor.join() self.event_processor.join() self.event_cleanup.join() self.recording_maintainer.join() self.stats_emitter.join() self.frigate_watchdog.join() self.db.stop() for detector in self.detectors.values(): detector.stop() while len(self.detection_shms) > 0: shm = self.detection_shms.pop() shm.close() shm.unlink()
class FrigateApp: def __init__(self): self.stop_event = mp.Event() self.base_config: FrigateConfig = None self.config: FrigateConfig = None self.detection_queue = mp.Queue() self.detectors: Dict[str, EdgeTPUProcess] = {} self.detection_out_events: Dict[str, mp.Event] = {} self.detection_shms: List[mp.shared_memory.SharedMemory] = [] self.log_queue = mp.Queue() self.camera_metrics = {} def set_environment_vars(self): for key, value in self.config.environment_vars.items(): os.environ[key] = value def ensure_dirs(self): for d in [RECORD_DIR, CLIPS_DIR, CACHE_DIR]: if not os.path.exists(d) and not os.path.islink(d): logger.info(f"Creating directory: {d}") os.makedirs(d) else: logger.debug(f"Skipping directory: {d}") def init_logger(self): self.log_process = mp.Process( target=log_process, args=(self.log_queue,), name="log_process" ) self.log_process.daemon = True self.log_process.start() root_configurer(self.log_queue) def init_config(self): config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") user_config = FrigateConfig.parse_file(config_file) self.config = user_config.runtime_config for camera_name in self.config.cameras.keys(): # create camera_metrics self.camera_metrics[camera_name] = { "camera_fps": mp.Value("d", 0.0), "skipped_fps": mp.Value("d", 0.0), "process_fps": mp.Value("d", 0.0), "detection_enabled": mp.Value( "i", self.config.cameras[camera_name].detect.enabled ), "detection_fps": mp.Value("d", 0.0), "detection_frame": mp.Value("d", 0.0), "read_start": mp.Value("d", 0.0), "ffmpeg_pid": mp.Value("i", 0), "frame_queue": mp.Queue(maxsize=2), } def set_log_levels(self): logging.getLogger().setLevel(self.config.logger.default.value.upper()) for log, level in self.config.logger.logs.items(): logging.getLogger(log).setLevel(level.value.upper()) if not "werkzeug" in self.config.logger.logs: logging.getLogger("werkzeug").setLevel("ERROR") def init_queues(self): # Queues for clip processing self.event_queue = mp.Queue() self.event_processed_queue = mp.Queue() self.video_output_queue = mp.Queue(maxsize=len(self.config.cameras.keys()) * 2) # Queue for cameras to push tracked objects to self.detected_frames_queue = mp.Queue( maxsize=len(self.config.cameras.keys()) * 2 ) def init_database(self): # Migrate DB location old_db_path = os.path.join(CLIPS_DIR, "frigate.db") if not os.path.isfile(self.config.database.path) and os.path.isfile( old_db_path ): os.rename(old_db_path, self.config.database.path) # Migrate DB schema migrate_db = SqliteExtDatabase(self.config.database.path) # Run migrations del logging.getLogger("peewee_migrate").handlers[:] router = Router(migrate_db) router.run() migrate_db.close() self.db = SqliteQueueDatabase(self.config.database.path) models = [Event, Recordings] self.db.bind(models) def init_stats(self): self.stats_tracking = stats_init(self.camera_metrics, self.detectors) def init_web_server(self): self.flask_app = create_app( self.config, self.db, self.stats_tracking, self.detected_frames_processor, ) def init_mqtt(self): self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics) def start_mqtt_relay(self): self.mqtt_relay = MqttSocketRelay( self.mqtt_client, self.config.mqtt.topic_prefix ) self.mqtt_relay.start() def start_detectors(self): model_path = self.config.model.path model_shape = (self.config.model.height, self.config.model.width) for name in self.config.cameras.keys(): self.detection_out_events[name] = mp.Event() try: shm_in = mp.shared_memory.SharedMemory( name=name, create=True, size=self.config.model.height * self.config.model.width * 3, ) except FileExistsError: shm_in = mp.shared_memory.SharedMemory(name=name) try: shm_out = mp.shared_memory.SharedMemory( name=f"out-{name}", create=True, size=20 * 6 * 4 ) except FileExistsError: shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}") self.detection_shms.append(shm_in) self.detection_shms.append(shm_out) for name, detector in self.config.detectors.items(): if detector.type == DetectorTypeEnum.cpu: self.detectors[name] = EdgeTPUProcess( name, self.detection_queue, self.detection_out_events, model_path, model_shape, "cpu", detector.num_threads, ) if detector.type == DetectorTypeEnum.edgetpu: self.detectors[name] = EdgeTPUProcess( name, self.detection_queue, self.detection_out_events, model_path, model_shape, detector.device, detector.num_threads, ) def start_detected_frames_processor(self): self.detected_frames_processor = TrackedObjectProcessor( self.config, self.mqtt_client, self.config.mqtt.topic_prefix, self.detected_frames_queue, self.event_queue, self.event_processed_queue, self.video_output_queue, self.stop_event, ) self.detected_frames_processor.start() def start_video_output_processor(self): output_processor = mp.Process( target=output_frames, name=f"output_processor", args=( self.config, self.video_output_queue, ), ) output_processor.daemon = True self.output_processor = output_processor output_processor.start() logger.info(f"Output process started: {output_processor.pid}") def start_camera_processors(self): model_shape = (self.config.model.height, self.config.model.width) for name, config in self.config.cameras.items(): camera_process = mp.Process( target=track_camera, name=f"camera_processor:{name}", args=( name, config, model_shape, self.config.model.merged_labelmap, self.detection_queue, self.detection_out_events[name], self.detected_frames_queue, self.camera_metrics[name], ), ) camera_process.daemon = True self.camera_metrics[name]["process"] = camera_process camera_process.start() logger.info(f"Camera processor started for {name}: {camera_process.pid}") def start_camera_capture_processes(self): for name, config in self.config.cameras.items(): capture_process = mp.Process( target=capture_camera, name=f"camera_capture:{name}", args=(name, config, self.camera_metrics[name]), ) capture_process.daemon = True self.camera_metrics[name]["capture_process"] = capture_process capture_process.start() logger.info(f"Capture process started for {name}: {capture_process.pid}") def start_event_processor(self): self.event_processor = EventProcessor( self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event, ) self.event_processor.start() def start_event_cleanup(self): self.event_cleanup = EventCleanup(self.config, self.stop_event) self.event_cleanup.start() def start_recording_maintainer(self): self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event) self.recording_maintainer.start() def start_recording_cleanup(self): self.recording_cleanup = RecordingCleanup(self.config, self.stop_event) self.recording_cleanup.start() def start_stats_emitter(self): self.stats_emitter = StatsEmitter( self.config, self.stats_tracking, self.mqtt_client, self.config.mqtt.topic_prefix, self.stop_event, ) self.stats_emitter.start() def start_watchdog(self): self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event) self.frigate_watchdog.start() def start(self): self.init_logger() logger.info(f"Starting Frigate ({VERSION})") try: try: self.init_config() except Exception as e: print("*************************************************************") print("*************************************************************") print("*** Your config file is not valid! ***") print("*** Please check the docs at ***") print("*** https://docs.frigate.video/configuration/index ***") print("*************************************************************") print("*************************************************************") print("*** Config Validation Errors ***") print("*************************************************************") print(e) print("*************************************************************") print("*** End Config Validation Errors ***") print("*************************************************************") self.log_process.terminate() sys.exit(1) self.set_environment_vars() self.ensure_dirs() self.set_log_levels() self.init_queues() self.init_database() self.init_mqtt() except Exception as e: print(e) self.log_process.terminate() sys.exit(1) self.start_detectors() self.start_video_output_processor() self.start_detected_frames_processor() self.start_camera_processors() self.start_camera_capture_processes() self.init_stats() self.init_web_server() self.start_mqtt_relay() self.start_event_processor() self.start_event_cleanup() self.start_recording_maintainer() self.start_recording_cleanup() self.start_stats_emitter() self.start_watchdog() # self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id) def receiveSignal(signalNumber, frame): self.stop() sys.exit() signal.signal(signal.SIGTERM, receiveSignal) try: self.flask_app.run(host="127.0.0.1", port=5001, debug=False) except KeyboardInterrupt: pass self.stop() def stop(self): logger.info(f"Stopping...") self.stop_event.set() self.mqtt_relay.stop() self.detected_frames_processor.join() self.event_processor.join() self.event_cleanup.join() self.recording_maintainer.join() self.recording_cleanup.join() self.stats_emitter.join() self.frigate_watchdog.join() self.db.stop() for detector in self.detectors.values(): detector.stop() while len(self.detection_shms) > 0: shm = self.detection_shms.pop() shm.close() shm.unlink()