def purge_duplicates(self): duplicate_query = """with grouped_events as ( select id, label, camera, has_snapshot, has_clip, row_number() over ( partition by label, camera, round(start_time/5,0)*5 order by end_time-start_time desc ) as copy_number from event ) select distinct id, camera, has_snapshot, has_clip from grouped_events where copy_number > 1;""" duplicate_events = Event.raw(duplicate_query) for event in duplicate_events: logger.debug(f"Removing duplicate: {event.id}") media_name = f"{event.camera}-{event.id}" if event.has_snapshot: media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg") media_path.unlink(missing_ok=True) if event.has_clip: media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4") media_path.unlink(missing_ok=True) (Event.delete().where( Event.id << [event.id for event in duplicate_events]).execute())
def run(self): while not self.stop_event.is_set(): try: event_type, camera, event_data = self.event_queue.get( timeout=10) except queue.Empty: if not self.stop_event.is_set(): self.refresh_cache() continue logger.debug( f"Event received: {event_type} {camera} {event_data['id']}") self.refresh_cache() if event_type == "start": self.events_in_process[event_data["id"]] = event_data if event_type == "end": clips_config = self.config.cameras[camera].clips clip_created = False if self.should_create_clip(camera, event_data): if clips_config.enabled and (clips_config.objects is None or event_data["label"] in clips_config.objects): clip_created = self.create_clip( camera, event_data, clips_config.pre_capture, clips_config.post_capture, ) if clip_created or event_data["has_snapshot"]: Event.create( id=event_data["id"], label=event_data["label"], camera=camera, start_time=event_data["start_time"], end_time=event_data["end_time"], top_score=event_data["top_score"], false_positive=event_data["false_positive"], zones=list(event_data["entered_zones"]), thumbnail=event_data["thumbnail"], has_clip=clip_created, has_snapshot=event_data["has_snapshot"], ) del self.events_in_process[event_data["id"]] self.event_processed_queue.put((event_data["id"], camera)) logger.info(f"Exiting event processor...")
def run(self): while True: if self.stop_event.is_set(): logger.info(f"Exiting event processor...") break try: event_type, camera, event_data = self.event_queue.get( timeout=10) except queue.Empty: if not self.stop_event.is_set(): self.refresh_cache() continue logger.debug( f"Event received: {event_type} {camera} {event_data['id']}") self.refresh_cache() if event_type == 'start': self.events_in_process[event_data['id']] = event_data if event_type == 'end': clips_config = self.config.cameras[camera].clips if not event_data['false_positive']: clip_created = False if clips_config.enabled and (clips_config.objects is None or event_data['label'] in clips_config.objects): clip_created = self.create_clip( camera, event_data, clips_config.pre_capture, clips_config.post_capture) Event.create( id=event_data['id'], label=event_data['label'], camera=camera, start_time=event_data['start_time'], end_time=event_data['end_time'], top_score=event_data['top_score'], false_positive=event_data['false_positive'], zones=list(event_data['entered_zones']), thumbnail=event_data['thumbnail'], has_clip=clip_created, has_snapshot=event_data['has_snapshot'], ) del self.events_in_process[event_data['id']] self.event_processed_queue.put((event_data['id'], camera))
def event_clip(id): download = request.args.get("download", type=bool) try: event: Event = Event.get(Event.id == id) except DoesNotExist: return "Event not found.", 404 if not event.has_clip: return "Clip not available", 404 file_name = f"{event.camera}-{id}.mp4" clip_path = os.path.join(CLIPS_DIR, file_name) if not os.path.isfile(clip_path): end_ts = (datetime.now().timestamp() if event.end_time is None else event.end_time) return recording_clip(event.camera, event.start_time, end_ts) response = make_response() response.headers["Content-Description"] = "File Transfer" response.headers["Cache-Control"] = "no-cache" response.headers["Content-Type"] = "video/mp4" if download: response.headers[ "Content-Disposition"] = "attachment; filename=%s" % file_name response.headers["Content-Length"] = os.path.getsize(clip_path) response.headers[ "X-Accel-Redirect"] = f"/clips/{file_name}" # nginx: http://wiki.nginx.org/NginxXSendfile return response
def delete_event(id): try: event = Event.get(Event.id == id) except DoesNotExist: return make_response( jsonify({ "success": False, "message": "Event" + id + " not found" }), 404) media_name = f"{event.camera}-{event.id}" if event.has_snapshot: media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg") media.unlink(missing_ok=True) media = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png") media.unlink(missing_ok=True) if event.has_clip: media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4") media.unlink(missing_ok=True) event.delete_instance() return make_response( jsonify({ "success": True, "message": "Event" + id + " deleted" }), 200)
def events_summary(): has_clip = request.args.get("has_clip", type=int) has_snapshot = request.args.get("has_snapshot", type=int) clauses = [] if not has_clip is None: clauses.append((Event.has_clip == has_clip)) if not has_snapshot is None: clauses.append((Event.has_snapshot == has_snapshot)) if len(clauses) == 0: clauses.append((1 == 1)) groups = (Event.select( Event.camera, Event.label, fn.strftime("%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")).alias("day"), Event.zones, fn.COUNT(Event.id).alias("count"), ).where(reduce(operator.and_, clauses)).group_by( Event.camera, Event.label, fn.strftime("%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")), Event.zones, )) return jsonify([e for e in groups.dicts()])
def event_snapshot(id): jpg_bytes = None try: event = Event.get(Event.id == id) if not event.has_snapshot: return "Snapshot not available", 404 # read snapshot from disk with open(os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), "rb") as image_file: jpg_bytes = image_file.read() except DoesNotExist: # see if the object is currently being tracked try: camera_states = current_app.detected_frames_processor.camera_states.values( ) for camera_state in camera_states: if id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(id) if not tracked_obj is None: jpg_bytes = tracked_obj.get_jpg_bytes( timestamp=request.args.get("timestamp", type=int), bounding_box=request.args.get("bbox", type=int), crop=request.args.get("crop", type=int), height=request.args.get("h", type=int), ) except: return "Event not found", 404 except: return "Event not found", 404 response = make_response(jpg_bytes) response.headers["Content-Type"] = "image/jpg" return response
def event_thumbnail(id): format = request.args.get('format', 'ios') thumbnail_bytes = None try: event = Event.get(Event.id == id) thumbnail_bytes = base64.b64decode(event.thumbnail) except DoesNotExist: # see if the object is currently being tracked try: for camera_state in current_app.detected_frames_processor.camera_states.values( ): if id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(id) if not tracked_obj is None: thumbnail_bytes = tracked_obj.get_thumbnail() except: return "Event not found", 404 if thumbnail_bytes is None: return "Event not found", 404 # android notifications prefer a 2:1 ratio if format == 'android': jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8) img = cv2.imdecode(jpg_as_np, flags=1) thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1] * 0.5), int(img.shape[1] * 0.5), cv2.BORDER_CONSTANT, (0, 0, 0)) ret, jpg = cv2.imencode('.jpg', thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) thumbnail_bytes = jpg.tobytes() response = make_response(thumbnail_bytes) response.headers['Content-Type'] = 'image/jpg' return response
def run(self): while not self.stop_event.is_set(): try: event_type, camera, event_data = self.event_queue.get( timeout=10) except queue.Empty: continue logger.debug( f"Event received: {event_type} {camera} {event_data['id']}") if event_type == "start": self.events_in_process[event_data["id"]] = event_data if event_type == "end": event_config: EventsConfig = self.config.cameras[ camera].record.events if event_data["has_clip"] or event_data["has_snapshot"]: Event.create( id=event_data["id"], label=event_data["label"], camera=camera, start_time=event_data["start_time"] - event_config.pre_capture, end_time=event_data["end_time"] + event_config.post_capture, top_score=event_data["top_score"], false_positive=event_data["false_positive"], zones=list(event_data["entered_zones"]), thumbnail=event_data["thumbnail"], region=event_data["region"], box=event_data["box"], area=event_data["area"], has_clip=event_data["has_clip"], has_snapshot=event_data["has_snapshot"], ) del self.events_in_process[event_data["id"]] self.event_processed_queue.put((event_data["id"], camera)) logger.info(f"Exiting event processor...")
def run(self): # only expire events every 5 minutes while not self.stop_event.wait(300): self.expire("clips") self.expire("snapshots") self.purge_duplicates() # drop events from db where has_clip and has_snapshot are false delete_query = Event.delete().where(Event.has_clip == False, Event.has_snapshot == False) delete_query.execute() logger.info(f"Exiting event cleanup...")
def vod_event(id): try: event: Event = Event.get(Event.id == id) except DoesNotExist: logger.error(f"Event not found: {id}") return "Event not found.", 404 if not event.has_clip: logger.error(f"Event does not have recordings: {id}") return "Recordings not available", 404 clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4") if not os.path.isfile(clip_path): end_ts = (datetime.now().timestamp() if event.end_time is None else event.end_time) vod_response = vod_ts(event.camera, event.start_time, end_ts) # If the recordings are not found, set has_clip to false if (type(vod_response) == tuple and len(vod_response) == 2 and vod_response[1] == 404): Event.update(has_clip=False).where(Event.id == id).execute() return vod_response duration = int((event.end_time - event.start_time) * 1000) return jsonify({ "cache": True, "discontinuity": False, "durations": [duration], "sequences": [{ "clips": [{ "type": "source", "path": clip_path }] }], })
def events(): limit = request.args.get("limit", 100) camera = request.args.get("camera") label = request.args.get("label") zone = request.args.get("zone") after = request.args.get("after", type=float) before = request.args.get("before", type=float) has_clip = request.args.get("has_clip", type=int) has_snapshot = request.args.get("has_snapshot", type=int) include_thumbnails = request.args.get("include_thumbnails", default=1, type=int) clauses = [] excluded_fields = [] if camera: clauses.append((Event.camera == camera)) if label: clauses.append((Event.label == label)) if zone: clauses.append((Event.zones.cast("text") % f'*"{zone}"*')) if after: clauses.append((Event.start_time >= after)) if before: clauses.append((Event.start_time <= before)) if not has_clip is None: clauses.append((Event.has_clip == has_clip)) if not has_snapshot is None: clauses.append((Event.has_snapshot == has_snapshot)) if not include_thumbnails: excluded_fields.append(Event.thumbnail) if len(clauses) == 0: clauses.append((True)) events = ( Event.select() .where(reduce(operator.and_, clauses)) .order_by(Event.start_time.desc()) .limit(limit) ) return jsonify([model_to_dict(e, exclude=excluded_fields) for e in events])
def events(): limit = request.args.get('limit', 100) camera = request.args.get('camera') label = request.args.get('label') zone = request.args.get('zone') after = request.args.get('after', type=float) before = request.args.get('before', type=float) has_clip = request.args.get('has_clip', type=int) has_snapshot = request.args.get('has_snapshot', type=int) include_thumbnails = request.args.get('include_thumbnails', default=1, type=int) clauses = [] excluded_fields = [] if camera: clauses.append((Event.camera == camera)) if label: clauses.append((Event.label == label)) if zone: clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*")) if after: clauses.append((Event.start_time >= after)) if before: clauses.append((Event.start_time <= before)) if not has_clip is None: clauses.append((Event.has_clip == has_clip)) if not has_snapshot is None: clauses.append((Event.has_snapshot == has_snapshot)) if not include_thumbnails: excluded_fields.append(Event.thumbnail) if len(clauses) == 0: clauses.append((1 == 1)) events = (Event.select().where(reduce(operator.and_, clauses)).order_by( Event.start_time.desc()).limit(limit)) return jsonify([model_to_dict(e, exclude=excluded_fields) for e in events])
def run(self): counter = 0 while (True): if self.stop_event.is_set(): logger.info(f"Exiting event cleanup...") break # only expire events every 10 minutes, but check for stop events every 10 seconds time.sleep(10) counter = counter + 1 if counter < 60: continue counter = 0 self.expire('clips') self.expire('snapshots') # drop events from db where has_clip and has_snapshot are false delete_query = (Event.delete().where(Event.has_clip == False, Event.has_snapshot == False)) delete_query.execute()
def vod_event(id): try: event: Event = Event.get(Event.id == id) except DoesNotExist: return "Event not found.", 404 if not event.has_clip: return "Clip not available", 404 clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4") if not os.path.isfile(clip_path): return vod_ts(event.camera, event.start_time, event.end_time) duration = int((event.end_time - event.start_time) * 1000) return jsonify( { "cache": True, "discontinuity": False, "durations": [duration], "sequences": [{"clips": [{"type": "source", "path": clip_path}]}], } )
def move_files(self): cache_files = sorted([ d for d in os.listdir(CACHE_DIR) if os.path.isfile(os.path.join(CACHE_DIR, d)) and d.endswith(".mp4") and not d.startswith("clip_") ]) files_in_use = [] for process in psutil.process_iter(): try: if process.name() != "ffmpeg": continue flist = process.open_files() if flist: for nt in flist: if nt.path.startswith(CACHE_DIR): files_in_use.append(nt.path.split("/")[-1]) except: continue # group recordings by camera grouped_recordings = defaultdict(list) for f in cache_files: # Skip files currently in use if f in files_in_use: continue cache_path = os.path.join(CACHE_DIR, f) basename = os.path.splitext(f)[0] camera, date = basename.rsplit("-", maxsplit=1) start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S") grouped_recordings[camera].append({ "cache_path": cache_path, "start_time": start_time, }) # delete all cached files past the most recent 5 keep_count = 5 for camera in grouped_recordings.keys(): if len(grouped_recordings[camera]) > keep_count: to_remove = grouped_recordings[camera][:-keep_count] for f in to_remove: Path(f["cache_path"]).unlink(missing_ok=True) self.end_time_cache.pop(f["cache_path"], None) grouped_recordings[camera] = grouped_recordings[camera][ -keep_count:] for camera, recordings in grouped_recordings.items(): # clear out all the recording info for old frames while (len(self.recordings_info[camera]) > 0 and self.recordings_info[camera][0][0] < recordings[0]["start_time"].timestamp()): self.recordings_info[camera].pop(0) # get all events with the end time after the start of the oldest cache file # or with end_time None events: Event = (Event.select().where( Event.camera == camera, (Event.end_time == None) | (Event.end_time >= recordings[0]["start_time"].timestamp()), Event.has_clip, ).order_by(Event.start_time)) for r in recordings: cache_path = r["cache_path"] start_time = r["start_time"] # Just delete files if recordings are turned off if (not camera in self.config.cameras or not self.config.cameras[camera].record.enabled): Path(cache_path).unlink(missing_ok=True) self.end_time_cache.pop(cache_path, None) continue if cache_path in self.end_time_cache: end_time, duration = self.end_time_cache[cache_path] else: ffprobe_cmd = [ "ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", f"{cache_path}", ] p = sp.run(ffprobe_cmd, capture_output=True) if p.returncode == 0: duration = float(p.stdout.decode().strip()) end_time = start_time + datetime.timedelta( seconds=duration) self.end_time_cache[cache_path] = (end_time, duration) else: logger.warning( f"Discarding a corrupt recording segment: {f}") Path(cache_path).unlink(missing_ok=True) continue # if cached file's start_time is earlier than the retain days for the camera if start_time <= ( (datetime.datetime.now() - datetime.timedelta( days=self.config.cameras[camera].record.retain.days))): # if the cached segment overlaps with the events: overlaps = False for event in events: # if the event starts in the future, stop checking events # and remove this segment if event.start_time > end_time.timestamp(): overlaps = False Path(cache_path).unlink(missing_ok=True) self.end_time_cache.pop(cache_path, None) break # if the event is in progress or ends after the recording starts, keep it # and stop looking at events if (event.end_time is None or event.end_time >= start_time.timestamp()): overlaps = True break if overlaps: record_mode = self.config.cameras[ camera].record.events.retain.mode # move from cache to recordings immediately self.store_segment( camera, start_time, end_time, duration, cache_path, record_mode, ) # else retain days includes this segment else: record_mode = self.config.cameras[ camera].record.retain.mode self.store_segment(camera, start_time, end_time, duration, cache_path, record_mode)
def expire_recordings(self): logger.debug("Start expire recordings (new).") logger.debug("Start deleted cameras.") # Handle deleted cameras expire_days = self.config.record.retain.days expire_before = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp() no_camera_recordings: Recordings = Recordings.select().where( Recordings.camera.not_in(list(self.config.cameras.keys())), Recordings.end_time < expire_before, ) deleted_recordings = set() for recording in no_camera_recordings: Path(recording.path).unlink(missing_ok=True) deleted_recordings.add(recording.id) logger.debug(f"Expiring {len(deleted_recordings)} recordings") Recordings.delete().where( Recordings.id << deleted_recordings).execute() logger.debug("End deleted cameras.") logger.debug("Start all cameras.") for camera, config in self.config.cameras.items(): logger.debug(f"Start camera: {camera}.") # When deleting recordings without events, we have to keep at LEAST the configured max clip duration min_end = (datetime.datetime.now() - datetime.timedelta( seconds=config.record.events.max_seconds)).timestamp() expire_days = config.record.retain.days expire_before = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp() expire_date = min(min_end, expire_before) # Get recordings to check for expiration recordings: Recordings = (Recordings.select().where( Recordings.camera == camera, Recordings.end_time < expire_date, ).order_by(Recordings.start_time)) # Get all the events to check against events: Event = ( Event.select().where( Event.camera == camera, # need to ensure segments for all events starting # before the expire date are included Event.start_time < expire_date, Event.has_clip, ).order_by(Event.start_time).objects()) # loop over recordings and see if they overlap with any non-expired events # TODO: expire segments based on segment stats according to config event_start = 0 deleted_recordings = set() for recording in recordings.objects().iterator(): keep = False # Now look for a reason to keep this recording segment for idx in range(event_start, len(events)): event = events[idx] # if the event starts in the future, stop checking events # and let this recording segment expire if event.start_time > recording.end_time: keep = False break # if the event is in progress or ends after the recording starts, keep it # and stop looking at events if event.end_time is None or event.end_time >= recording.start_time: keep = True break # if the event ends before this recording segment starts, skip # this event and check the next event for an overlap. # since the events and recordings are sorted, we can skip events # that end before the previous recording segment started on future segments if event.end_time < recording.start_time: event_start = idx # Delete recordings outside of the retention window or based on the retention mode if (not keep or (config.record.events.retain.mode == RetainModeEnum.motion and recording.motion == 0) or (config.record.events.retain.mode == RetainModeEnum.active_objects and recording.objects == 0)): Path(recording.path).unlink(missing_ok=True) deleted_recordings.add(recording.id) logger.debug(f"Expiring {len(deleted_recordings)} recordings") Recordings.delete().where( Recordings.id << deleted_recordings).execute() logger.debug(f"End camera: {camera}.") logger.debug("End all cameras.") logger.debug("End expire recordings (new).")
def recordings(camera_name): files = glob.glob(f"{RECORD_DIR}/*/*/*/{camera_name}") if len(files) == 0: return jsonify([]) files.sort() dates = OrderedDict() for path in files: first = glob.glob(f"{path}/00.*.mp4") delay = 0 if len(first) > 0: delay = int(first[0].strip(path).split(".")[1]) search = re.search(r".+/(\d{4}[-]\d{2})/(\d{2})/(\d{2}).+", path) if not search: continue date = f"{search.group(1)}-{search.group(2)}" if date not in dates: dates[date] = OrderedDict() dates[date][search.group(3)] = {"delay": delay, "events": []} # Packing intervals to return all events with same label and overlapping times as one row. # See: https://blogs.solidq.com/en/sqlserver/packing-intervals/ events = Event.raw( """WITH C1 AS ( SELECT id, label, camera, top_score, start_time AS ts, +1 AS type, 1 AS sub FROM event WHERE camera = ? UNION ALL SELECT id, label, camera, top_score, end_time + 15 AS ts, -1 AS type, 0 AS sub FROM event WHERE camera = ? ), C2 AS ( SELECT C1.*, SUM(type) OVER(PARTITION BY label ORDER BY ts, type DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) - sub AS cnt FROM C1 ), C3 AS ( SELECT id, label, camera, top_score, ts, (ROW_NUMBER() OVER(PARTITION BY label ORDER BY ts) - 1) / 2 + 1 AS grpnum FROM C2 WHERE cnt = 0 ) SELECT MIN(id) as id, label, camera, MAX(top_score) as top_score, MIN(ts) AS start_time, max(ts) AS end_time FROM C3 GROUP BY label, grpnum ORDER BY start_time;""", camera_name, camera_name, ) e: Event for e in events: date = datetime.fromtimestamp(e.start_time) key = date.strftime("%Y-%m-%d") hour = date.strftime("%H") if key in dates and hour in dates[key]: dates[key][hour]["events"].append( model_to_dict( e, exclude=[ Event.false_positive, Event.zones, Event.thumbnail, Event.has_clip, Event.has_snapshot, ], )) return jsonify([{ "date": date, "events": sum([len(value["events"]) for value in hours.values()]), "recordings": [{ "hour": hour, "delay": value["delay"], "events": value["events"] } for hour, value in hours.items()], } for date, hours in dates.items()])
def event(id): try: return model_to_dict(Event.get(Event.id == id)) except DoesNotExist: return "Event not found", 404
def run(self): # set an end_time on events without an end_time on startup Event.update(end_time=Event.start_time + 30).where(Event.end_time == None).execute() while not self.stop_event.is_set(): try: event_type, camera, event_data = self.event_queue.get( timeout=10) except queue.Empty: continue logger.debug( f"Event received: {event_type} {camera} {event_data['id']}") event_config: EventsConfig = self.config.cameras[ camera].record.events if event_type == "start": self.events_in_process[event_data["id"]] = event_data elif event_type == "update" and should_update_db( self.events_in_process[event_data["id"]], event_data): self.events_in_process[event_data["id"]] = event_data # TODO: this will generate a lot of db activity possibly if event_data["has_clip"] or event_data["has_snapshot"]: Event.replace( id=event_data["id"], label=event_data["label"], camera=camera, start_time=event_data["start_time"] - event_config.pre_capture, end_time=None, top_score=event_data["top_score"], false_positive=event_data["false_positive"], zones=list(event_data["entered_zones"]), thumbnail=event_data["thumbnail"], region=event_data["region"], box=event_data["box"], area=event_data["area"], has_clip=event_data["has_clip"], has_snapshot=event_data["has_snapshot"], ).execute() elif event_type == "end": if event_data["has_clip"] or event_data["has_snapshot"]: Event.replace( id=event_data["id"], label=event_data["label"], camera=camera, start_time=event_data["start_time"] - event_config.pre_capture, end_time=event_data["end_time"] + event_config.post_capture, top_score=event_data["top_score"], false_positive=event_data["false_positive"], zones=list(event_data["entered_zones"]), thumbnail=event_data["thumbnail"], region=event_data["region"], box=event_data["box"], area=event_data["area"], has_clip=event_data["has_clip"], has_snapshot=event_data["has_snapshot"], ).execute() del self.events_in_process[event_data["id"]] self.event_processed_queue.put((event_data["id"], camera)) # set an end_time on events without an end_time before exiting Event.update(end_time=datetime.datetime.now().timestamp()).where( Event.end_time == None).execute() logger.info(f"Exiting event processor...")
def recordings(camera_name): dates = OrderedDict() # Retrieve all recordings for this camera recordings = (Recordings.select().where( Recordings.camera == camera_name).order_by( Recordings.start_time.asc())) last_end = 0 recording: Recordings for recording in recordings: date = datetime.fromtimestamp(recording.start_time) key = date.strftime("%Y-%m-%d") hour = date.strftime("%H") # Create Day Record if key not in dates: dates[key] = OrderedDict() # Create Hour Record if hour not in dates[key]: dates[key][hour] = {"delay": {}, "events": []} # Check for delay the_hour = datetime.strptime(f"{key} {hour}", "%Y-%m-%d %H").timestamp() # diff current recording start time and the greater of the previous end time or top of the hour diff = recording.start_time - max(last_end, the_hour) # Determine seconds into recording seconds = 0 if datetime.fromtimestamp(last_end).strftime("%H") == hour: seconds = int(last_end - the_hour) # Determine the delay delay = min(int(diff), 3600 - seconds) if delay > 1: # Add an offset for any delay greater than a second dates[key][hour]["delay"][seconds] = delay last_end = recording.end_time # Packing intervals to return all events with same label and overlapping times as one row. # See: https://blogs.solidq.com/en/sqlserver/packing-intervals/ events = Event.raw( """WITH C1 AS ( SELECT id, label, camera, top_score, start_time AS ts, +1 AS type, 1 AS sub FROM event WHERE camera = ? UNION ALL SELECT id, label, camera, top_score, end_time + 15 AS ts, -1 AS type, 0 AS sub FROM event WHERE camera = ? ), C2 AS ( SELECT C1.*, SUM(type) OVER(PARTITION BY label ORDER BY ts, type DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) - sub AS cnt FROM C1 ), C3 AS ( SELECT id, label, camera, top_score, ts, (ROW_NUMBER() OVER(PARTITION BY label ORDER BY ts) - 1) / 2 + 1 AS grpnum FROM C2 WHERE cnt = 0 ) SELECT id, label, camera, top_score, start_time, end_time FROM event WHERE camera = ? AND end_time IS NULL UNION ALL SELECT MIN(id) as id, label, camera, MAX(top_score) as top_score, MIN(ts) AS start_time, max(ts) AS end_time FROM C3 GROUP BY label, grpnum ORDER BY start_time;""", camera_name, camera_name, camera_name, ) event: Event for event in events: date = datetime.fromtimestamp(event.start_time) key = date.strftime("%Y-%m-%d") hour = date.strftime("%H") if key in dates and hour in dates[key]: dates[key][hour]["events"].append( model_to_dict( event, exclude=[ Event.false_positive, Event.zones, Event.thumbnail, Event.has_clip, Event.has_snapshot, ], )) return jsonify([{ "date": date, "events": sum([len(value["events"]) for value in hours.values()]), "recordings": [{ "hour": hour, "delay": value["delay"], "events": value["events"] } for hour, value in hours.items()], } for date, hours in dates.items()])
def expire(self, media_type): ## Expire events from unlisted cameras based on the global config if media_type == 'clips': retain_config = self.config.clips.retain file_extension = "mp4" update_params = {"has_clip": False} else: retain_config = self.config.snapshots.retain file_extension = "jpg" update_params = {"has_snapshot": False} distinct_labels = (Event.select(Event.label).where( Event.camera.not_in(self.camera_keys)).distinct()) # loop over object types in db for l in distinct_labels: # get expiration time for this label expire_days = retain_config.objects.get(l.label, retain_config.default) expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp() # grab all events after specific time expired_events = Event.select().where( Event.camera.not_in(self.camera_keys), Event.start_time < expire_after, Event.label == l.label, ) # delete the media from disk for event in expired_events: media_name = f"{event.camera}-{event.id}" media_path = Path( f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}") media_path.unlink(missing_ok=True) # update the clips attribute for the db entry update_query = Event.update(update_params).where( Event.camera.not_in(self.camera_keys), Event.start_time < expire_after, Event.label == l.label, ) update_query.execute() ## Expire events from cameras based on the camera config for name, camera in self.config.cameras.items(): if media_type == 'clips': retain_config = camera.clips.retain else: retain_config = camera.snapshots.retain # get distinct objects in database for this camera distinct_labels = (Event.select( Event.label).where(Event.camera == name).distinct()) # loop over object types in db for l in distinct_labels: # get expiration time for this label expire_days = retain_config.objects.get( l.label, retain_config.default) expire_after = ( datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp() # grab all events after specific time expired_events = Event.select().where( Event.camera == name, Event.start_time < expire_after, Event.label == l.label, ) # delete the grabbed clips from disk for event in expired_events: media_name = f"{event.camera}-{event.id}" media_path = Path( f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}" ) media_path.unlink(missing_ok=True) # update the clips attribute for the db entry update_query = Event.update(update_params).where( Event.camera == name, Event.start_time < expire_after, Event.label == l.label, ) update_query.execute()