class Streamer(object): def __init__(self): super(Streamer, self).__init__() self._sensors = {} Thread(target=self._watcher_thread).start() self._dbs = DBQuery(index="sensors", office=office, host=dbhost) def get(self, sensor): if sensor not in self._sensors: return (None, None) return (self._sensors[sensor]["rtspuri"], self._sensors[sensor]["rtmpuri"]) def set(self, sensor, rtspuri, rtmpuri, simulation): if sensor in self._sensors and self._sensors[sensor][ "status"] == "streaming": return self._sensors[sensor]["status"] p = self._spawn(rtspuri, rtmpuri, simulation) if p.poll() == None: self._sensors[sensor] = { "rtspuri": rtspuri, "rtmpuri": rtmpuri, "status": "streaming", "process": p, } return self._sensors[sensor]["status"] return "disconnected" def _update(self, sensor, status="streaming"): sinfo = {"status": status} self._dbs.update(sensor, sinfo) def _spawn(self, rtspuri, rtmpuri, simulation=False): cmd = [ "ffmpeg", "-i", rtspuri, "-vcodec", "copy", "-an", "-f", "flv", rtmpuri ] if simulation == True: cmd = [ "ffmpeg", "-i", rtspuri, "-vcodec", "libx264", "-preset:v", "ultrafast", "-tune:v", "zerolatency", "-an", "-f", "flv", rtmpuri ] print(cmd, flush=True) p = subprocess.Popen(cmd) return p def _watcher_thread(self): while True: for sensor1 in self._sensors: if self._sensors[sensor1]["process"].poll() != None: self._sensors[sensor1]["status"] = "disconnected" self._update(sensor1, status="disconnected") time.sleep(30)
"status": "processing", "skip": every_nth_frame, }) break except Exception as e: print("Exception: "+str(e), flush=True) time.sleep(10) # compete for a sensor connection while not stop: try: print("Searching...", flush=True) for sensor in dbs.search("sensor:'camera' and status:'idle' and algorithm='crowd-counting' and office:["+str(office[0])+","+str(office[1])+"]"): try: # compete (with other va instances) for a sensor r=dbs.update(sensor["_id"],{"status":"streaming"},version=sensor["_version"]) # stream from the sensor print("Connected to "+sensor["_id"]+"...",flush=True) connect(sensor,algorithm,sensor["_source"]["url"]) # if exit, there is somehting wrong r=dbs.update(sensor["_id"],{"status":"disconnected"}) if stop: break except Exception as e: print("Exception: "+str(e), flush=True) except Exception as e: print("Exception: "+str(e), flush=True)
def _rec2db(self, office, sensor, timestamp, path): dt = datetime.datetime.fromtimestamp(timestamp / 1000) officestr = (str(office[0]) + "c" + str(office[1])).replace( "-", "n").replace(".", "d") mp4path = self._storage + "/" + officestr + "/" + sensor + "/" + str( dt.year) + "/" + str(dt.month) + "/" + str(dt.day) os.makedirs(mp4path, exist_ok=True) mp4file = mp4path + "/" + str(timestamp) + ".mp4" list( run([ "/usr/bin/ffmpeg", "-f", "mp4", "-i", path, "-c", "copy", mp4file ])) list( run([ "/usr/bin/ffmpeg", "-i", mp4file, "-vf", "scale=640:360", "-frames:v", "1", mp4file + ".png" ])) sinfo = probe(mp4file) sinfo.update({ "sensor": sensor, "office": { "lat": office[0], "lon": office[1], }, "time": timestamp, "path": mp4file[len(self._storage) + 1:], }) if local_office: # calculate total bandwidth bandwidth = 0 for stream1 in sinfo["streams"]: if "bit_rate" in stream1: bandwidth = bandwidth + stream1["bit_rate"] if bandwidth: db_cam = DBQuery(host=dbhost, index="sensors", office=office) db_cam.update(sensor, {"bandwidth": bandwidth}) # check disk usage and send alert disk_usage = psutil.disk_usage(self._storage)[3] if disk_usage > 75 and sensor_index: level = "fatal" if disk_uage > 85 else "warning" db_alt = DBIngest(host=dbhost, index="alerts", office=office) db_alt.ingest({ "time": int( time.mktime(datetime.datetime.now().timetuple()) * 1000), "office": { "lat": office[0], "lon": office[1], }, "location": { "lat": office[0], "lon": office[1], }, level: [{ "message": "Disk usage: " + str(disk_usage) + "%", "args": { "disk": disk_usage, } }] }) # ingest recording local db_rec = DBIngest(host=dbhost, index="recordings", office=office) db_rec.ingest(sinfo) else: # ingest recording cloud db_rec = DBIngest(host=dbhost, index="recordings_c", office="") db_rec.ingest(sinfo)
'status': 'idle', }) if not dbhost: continue try: if not r: # new camera print("Searching for template", flush=True) template = list( dbp.search(" or ".join( ['{}="{}"'.format(id1[0], id1[1]) for id1 in camids]), size=1)) if not template: template = list( dbp.search("ip={} and port={}".format(ip, port), size=1)) if template: print("Ingesting", flush=True) record = template[0]["_source"] record.update(sinfo) dbi.ingest(record, refresh="wait_for") else: print("Template not found", flush=True) else: # camera re-connect dbs.update(r[0]["_id"], sinfo) except Exception as e: print(traceback.format_exc(), flush=True) if not dbhost: break time.sleep(service_interval)
def _rec2db(self, office, sensor, timestamp, path): disk_usage = psutil.disk_usage(self._storage)[3] if disk_usage < halt_rec_th: dt = datetime.datetime.fromtimestamp(timestamp / 1000) officestr = (str(office[0]) + "c" + str(office[1])).replace( "-", "n").replace(".", "d") mp4path = self._storage + "/" + officestr + "/" + sensor + "/" + str( dt.year) + "/" + str(dt.month) + "/" + str(dt.day) os.makedirs(mp4path, exist_ok=True) mp4file = mp4path + "/" + str(timestamp) + ".mp4" # perform a straight copy to fix negative timestamp for chrome list( run([ "/usr/local/bin/ffmpeg", "-f", "mp4", "-i", path, "-c", "copy", mp4file ])) sinfo = probe(mp4file) sinfo.update({ "sensor": sensor, "office": { "lat": office[0], "lon": office[1], }, "time": timestamp, "path": mp4file[len(self._storage) + 1:], }) else: print("Disk full: recording halted", flush=True) sinfo = None if local_office: if sinfo["bandwidth"]: db_cam = DBQuery(host=dbhost, index="sensors", office=office) db_cam.update(sensor, {"bandwidth": sinfo["bandwidth"]}) # check disk usage and send alert disk_usage = psutil.disk_usage(self._storage).percent if disk_usage >= warn_disk_th: level = "fatal" if disk_usage >= fatal_disk_th else "warning" db_alt = DBIngest(host=dbhost, index="alerts", office=office) message = text["halt recording"].format( disk_usage ) if disk_usage >= halt_rec_th else text["disk usage"].format( disk_usage) db_alt.ingest({ "time": int(time.time() * 1000), "office": { "lat": office[0], "lon": office[1], }, "location": { "lat": office[0], "lon": office[1], }, level: [{ "message": message, "args": { "disk": disk_usage, } }] }) # ingest recording local if sinfo: print("Ingest recording: {}".format(sinfo), flush=True) office1 = office if local_office else "" # denormalize sensor address to recordings dbs = DBQuery(host=dbhost, index="sensors", office=office1) r = list(dbs.search("_id='" + sinfo["sensor"] + "'", size=1)) if r: sinfo["address"] = r[0]["_source"]["address"] db_rec = DBIngest(host=dbhost, index="recordings", office=office1) db_rec.ingest(sinfo)
break except Exception as e: print("Waiting for DB...", flush=True) time.sleep(10) # compete for a sensor connection while not stop: try: print("Searching...", flush=True) for sensor in dbs.search( "sensor:'camera' and status:'idle' and algorithm='crowd-counting' and office:[" + str(office[0]) + "," + str(office[1]) + "]"): try: # compete (with other va instances) for a sensor r = dbs.update(sensor["_id"], {"status": "streaming"}, seq_no=sensor["_seq_no"], primary_term=sensor["_primary_term"]) # stream from the sensor print("Connected to " + sensor["_id"] + "...", flush=True) connect(sensor["_id"], sensor["_source"]["location"], sensor["_source"]["url"], algorithm, sensor["_source"]["algorithm"], sensor["_source"]["resolution"], sensor["_source"]["zonemap"]) # if exit, there is somehting wrong r = dbs.update(sensor["_id"], {"status": "disconnected"}) if stop: break except Exception as e:
class FSHandler(FileSystemEventHandler): def __init__(self, sensor, office, dbhost, rec_volume): self.sensor = sensor self.office = office self.db_rec = DBIngest(host=dbhost, index="recordings", office=office) self.db_sensors = DBQuery(host=dbhost, index="sensors", office=office) self.recording_volume = rec_volume self.last_file = None self.finalize_timer = None self.record_cache = [] self.timeout = 80 #set to 20 seconds... this should change according to recording chunk length def on_created(self, event): if event.is_directory: return if event.src_path.endswith(".png"): return if self.last_file and (self.last_file == event.src_path): return if self.finalize_timer: self.finalize_timer.cancel() if self.last_file: try: self.ingest() except Exception as error: logger.error("Failed to ingest: %s %s\n" % (self.last_file, error)) logger.debug("Started recording new file! " + event.src_path) self.last_file = event.src_path del (self.finalize_timer) self.finalize_timer = Timer(self.timeout, self.ingest) self.finalize_timer.start() logger.debug("Started file watch timer for " + str(self.timeout) + " seconds") def ffmpeg_convert(self, filename): with tempfile.TemporaryDirectory() as tmpdirname: filename = os.path.abspath(filename) tmpfilename = os.path.abspath( os.path.join(tmpdirname, os.path.basename(filename))) output = "" try: list( run([ "/usr/bin/ffmpeg", "-i", filename, "-c", "copy", tmpfilename ])) shutil.move(tmpfilename, filename) list( run([ "/usr/bin/ffmpeg", "-i", filename, "-vf", "thumbnail,scale=640:360", "-frames:v", "1", filename + ".png" ])) return filename, probe(filename) except Exception as error: logger.error("Error converting mp4 with ffmpeg: %s %s" % (error, error.output)) raise def get_timestamp(self, filename): parsed = os.path.basename(filename).split('_') return int(int(parsed[-2]) / 1000000) def ingest(self): logger.debug("Finished recording file " + self.last_file) converted_file, sinfo = self.ffmpeg_convert(self.last_file) sinfo.update({ "sensor": self.sensor, "office": { "lat": self.office[0], "lon": self.office[1], }, "time": self.get_timestamp(converted_file), "path": os.path.abspath(converted_file).split( os.path.abspath(self.recording_volume) + "/")[1], }) # calculate total bandwidth bandwidth = 0 for stream1 in sinfo["streams"]: if "bit_rate" in stream1: bandwidth = bandwidth + stream1["bit_rate"] self.db_sensors.update(self.sensor, {"bandwidth": bandwidth}) self.db_rec.ingest(sinfo) self.record_cache.append(sinfo)
class Feeder(): def __init__(self): logger.debug("Initializing Feeder") self.office = list(map(float, os.environ["OFFICE"].split(","))) self.alg_id = None self.recording_volume = os.environ["STORAGE_VOLUME"] self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"]) #Hosts self.dbhost = os.environ["DBHOST"] self.vahost = "http://localhost:8080/pipelines" self.mqtthost = os.environ["MQTTHOST"] #Clients self.db_alg = DBIngest(host=self.dbhost, index="algorithms", office=self.office) self.db_inf = DBIngest(host=self.dbhost, index="analytics", office=self.office) self.db_sensors = DBQuery(host=self.dbhost, index="sensors", office=self.office) self.mqttclient = None self.mqtttopic = None self.observer = Observer() self.batchsize = 300 self.inference_cache = [] self._threadflag = False def start(self): logger.info(" ### Starting Feeder ### ") logger.debug("Waiting for VA startup") r = requests.Response() r.status_code = 400 while r.status_code != 200 and r.status_code != 201: try: r = requests.get(self.vahost) except Exception as e: r = requests.Response() r.status_code = 400 time.sleep(10) # Register Algorithm logger.debug("Registering as algorithm in the DB") while True: try: self.alg_id = self.db_alg.ingest({ "name": "object_detection", "office": { "lat": self.office[0], "lon": self.office[1] }, "status": "idle", "skip": self.every_nth_frame, })["_id"] break except Exception as e: logger.debug("Register algo exception: " + str(e)) time.sleep(10) self.mqtttopic = "smtc_va_inferences_" + self.alg_id camera_monitor_thread = Thread(target=self.monitor_cameras, daemon=True) logger.debug("Starting working threads") self._threadflag = True self.startmqtt() self.observer.start() camera_monitor_thread.start() logger.debug("Waiting for interrupt...") camera_monitor_thread.join() self.observer.join() def stop(self): logger.info(" ### Stopping Feeder ### ") self._threadflag = False logger.debug("Unregistering algorithm from DB") self.db_alg.delete(self.alg_id) self.mqttclient.loop_stop() self.observer.stop() def startmqtt(self): self.mqttclient = mqtt.Client("feeder_" + self.alg_id) self.mqttclient.connect(self.mqtthost) self.mqttclient.on_message = self.mqtt_handler self.mqttclient.loop_start() self.mqttclient.subscribe(self.mqtttopic) def mqtt_handler(self, client, userdata, message): m_in = json.loads(str(message.payload.decode("utf-8", "ignore"))) for tag in m_in["tags"]: m_in[tag] = m_in["tags"][tag] del m_in["tags"] m_in["time"] = m_in["real_base"] + m_in["timestamp"] # convert to milliseconds m_in["time"] = int(m_in["time"] / 1000000) self.inference_cache.append(m_in) if len(self.inference_cache) >= self.batchsize: try: self.db_inf.ingest_bulk(self.inference_cache[:self.batchsize]) self.inference_cache = self.inference_cache[self.batchsize:] except Exception as e: logger.debug("Ingest Error: " + str(e)) def monitor_cameras(self): logger.debug("Starting Sensor Monitor Thread") while self._threadflag: logger.debug("Searching for sensors...") try: for sensor in self.db_sensors.search( "sensor:'camera' and status:'idle' and office:[" + str(self.office[0]) + "," + str(self.office[1]) + "]"): logger.debug(sensor) try: fswatch = None logger.debug("Sensor found! " + sensor["_id"]) logger.debug("Setting sensor " + sensor["_id"] + " to streaming") r = self.db_sensors.update(sensor["_id"], {"status": "streaming"}, version=sensor["_version"]) logger.debug( "Setting algorithm to streaming from sensor " + sensor["_id"]) r = self.db_alg.update(self.alg_id, { "source": sensor["_id"], "status": "processing" }) # Attempt to POST to VA service jsonData = { "source": { "uri": sensor["_source"]["url"], "type": "uri" }, "tags": { "algorithm": self.alg_id, "sensor": sensor["_id"], "office": { "lat": self.office[0], "lon": self.office[1], }, }, "parameters": { "every-nth-frame": self.every_nth_frame, "recording_prefix": "recordings/" + sensor["_id"], "method": "mqtt", "address": self.mqtthost, "clientid": self.alg_id, "topic": self.mqtttopic }, } folderpath = os.path.join( os.path.realpath(self.recording_volume), sensor["_id"]) if not os.path.exists(folderpath): os.makedirs(folderpath) logger.debug("Adding folder watch for " + folderpath) filehandler = FSHandler( sensor=sensor["_id"], office=self.office, dbhost=self.dbhost, rec_volume=self.recording_volume) fswatch = self.observer.schedule(filehandler, folderpath, recursive=True) try: logger.info("Posting Request to VA Service") r = requests.post(self.vahost + "/object_detection/2", json=jsonData, timeout=10) r.raise_for_status() pipeline_id = None if r.status_code == 200: logger.debug("Started pipeline " + r.text) pipeline_id = int(r.text) while r.status_code == 200: logger.debug("Querying status of pipeline") r = requests.get(self.vahost + "/object_detection/2/" + str(pipeline_id) + "/status", timeout=10) r.raise_for_status() jsonValue = r.json() if "avg_pipeline_latency" not in jsonValue: jsonValue["avg_pipeline_latency"] = 0 state = jsonValue["state"] try: logger.debug("fps: ") logger.debug(str(jsonValue)) except: logger.debug("error") logger.debug("Pipeline state is " + str(state)) if state == "COMPLETED" or state == "ABORTED" or state == "ERROR": logger.debug("Pipeline ended") break self.db_alg.update( self.alg_id, { "performance": jsonValue["avg_fps"], "latency": jsonValue["avg_pipeline_latency"] * 1000 }) time.sleep(10) logger.debug("Setting sensor " + sensor["_id"] + " to disconnected") r = self.db_sensors.update( sensor["_id"], {"status": "disconnected"}) except requests.exceptions.RequestException as e: logger.error( "Feeder: Request to VA Service Failed: " + str(e)) logger.debug("Setting sensor " + sensor["_id"] + " to idle") r = self.db_sensors.update(sensor["_id"], {"status": "idle"}) except Exception as e: logger.error("Feeder Exception: " + str(e)) if fswatch: self.observer.unschedule(fswatch) del (filehandler) logger.debug("Setting algorithm to idle") r = self.db_alg.update(self.alg_id, {"status": "idle"}) break except Exception as e: print(e, flush=True) time.sleep(5) logger.debug("Sensor monitor thread done")
try: if not r: # new camera print("Searching for template", flush=True) template=list(dbp.search(" or ".join(['{}="{}"'.format(id1[0],id1[1]) for id1 in camids]),size=1)) if not template: template=list(dbp.search("ip={} and port={}".format(ip,port),size=1)) if template: print("Ingesting", flush=True) record=template[0]["_source"] record.update(sinfo) record.pop('passcode',None) sid=str(record["sensorid"]) if "sensorid" in record else None dbi.ingest(record, id1=sid, refresh="wait_for") else: print("Template not found", flush=True) else: # camera re-connect dbs.update(r[0]["_id"],sinfo) # query the sensor id with rtspuri if rtmp_host: r=list(dbs.search("rtspuri='{}'".format(rtspuri),size=1)) if r: sensor=r[0]["_id"] rtmpuri=rtmp_host+"/"+str(sensor) # rtsp -> rtmp if streamer.set(sensor,rtspuri,rtmpuri,simulation) == "streaming": # update the url sinfo.update({"url": rtmpuri, "rtmpid":sensor, "rtmpuri": rtmpuri, "status": "streaming"}) # update record to offince sensor db record=r[0]["_source"] record.update(sinfo) if update_sensors_db(sensor,record) == False:
def _cleanup_thread(self): owt = OWTAPI() dbs = DBQuery(index="sensors", office=office, host=dbhost) while not self._stop.is_set(): todelete = [] tostartstreamout = [] tostopstreamout = [] for name in self._rooms: try: participants = owt.list_participants( self._rooms[name]["room"]) except: participants = 0 now = int(time.time()) print( "Watcher: room {} participant {} inactive {} stream-out status {}" .format(name, participants, now - self._rooms[name]["time"], self._rooms[name]["stream_out"]["status"]), flush=True) print(self._rooms[name], flush=True) if participants > 0: self._rooms[name]["time"] = now elif now - self._rooms[name]["time"] > self._inactive: todelete.append(name) if self._rooms[name]["stream_out"]["status"] == "start": tostartstreamout.append(name) elif self._rooms[name]["stream_out"]["status"] == "stop": tostopstreamout.append(name) for name in tostartstreamout: if self._rooms[name]["sensor"]["subtype"] != "mobile_camera": continue sensor = self._rooms[name]["sensor"] stream1 = self._rooms[name]["stream_in"] room1 = self._rooms[name]["room"] rtmpurl = self._rooms[name]["stream_out"]["rtmpurl"] for _item in dbs.search("_id='{}'".format(sensor["id"]), size=1): print(_item, flush=True) try: stream1 = stream1 if stream1 else owt.list_streams( room1)[0] except: continue self._rooms[name]["stream_in"] = stream1 if stream1 and rtmpurl: try: self._rooms[name]["stream_out"][ "stream"] = owt.start_streaming_outs( room=room1, url=rtmpurl, video_from=stream1)["id"] except: continue self._rooms[name]["stream_out"]["status"] = "streaming" try: dbs.update(sensor["id"], { "status": "disconnected", "url": rtmpurl }) except: continue for name in tostopstreamout: if self._rooms[name]["sensor"]["subtype"] != "mobile_camera": continue stream1 = self._rooms[name]["stream_out"]["stream"] room1 = self._rooms[name]["room"] if stream1 and rtmpurl: try: owt.stop_streaming_outs(room1, stream1) except: continue self._rooms[name]["stream_out"]["status"] = "idle" for name in todelete: stream1 = self._rooms[name]["stream_in"] room1 = self._rooms[name]["room"] try: streams = [stream1] if stream1 else owt.list_streams(room1) except: streams = [] # for stream1 in streams: # print("Remove stream {}".format(stream1), flush=True) # try: # owt.delete_stream(room1,stream1) # except: # pass print("Remove room {}:{}".format(name, room1), flush=True) try: owt.delete_room(room1) except: pass self._rooms.pop(name, None) self._stop.wait(self._inactive / 3.0)
def _rec2db(self, office, sensor, timestamp, path): disk_usage=psutil.disk_usage(self._storage)[3] if disk_usage<halt_rec_th: dt=datetime.datetime.fromtimestamp(timestamp/1000) officestr=(str(office[0])+"c"+str(office[1])).replace("-","n").replace(".","d") mp4path=self._storage+"/"+officestr+"/"+sensor+"/"+str(dt.year)+"/"+str(dt.month)+"/"+str(dt.day) os.makedirs(mp4path,exist_ok=True) mp4file=mp4path+"/"+str(timestamp)+".mp4" # perform a straight copy to fix negative timestamp for chrome list(run(["/usr/local/bin/ffmpeg","-f","mp4","-i",path,"-c","copy",mp4file])) sinfo=probe(mp4file) sinfo.update({ "sensor": sensor, "office": { "lat": office[0], "lon": office[1], }, "time": timestamp, "path": mp4file[len(self._storage)+1:], }) else: print("Disk full: recording halted", flush=True) sinfo=None if local_office: if sinfo["bandwidth"]: db_cam=DBQuery(host=dbhost, index="sensors", office=office) db_cam.update(sensor, {"bandwidth": sinfo["bandwidth"]}) # check disk usage and send alert disk_usage=psutil.disk_usage(self._storage).percent if disk_usage>=warn_disk_th: level="fatal" if disk_usage>=fatal_disk_th else "warning" db_alt=DBIngest(host=dbhost, index="alerts", office=office) message=text["halt recording"].format(disk_usage) if disk_usage>=halt_rec_th else text["disk usage"].format(disk_usage) db_alt.ingest({ "time": int(time.time()*1000), "office": { "lat": office[0], "lon": office[1], }, "location": { "lat": office[0], "lon": office[1], }, level: [{ "message": message, "args": { "disk": disk_usage, } }] }) # ingest recording local if sinfo: db_rec=DBIngest(host=dbhost, index="recordings", office=office) db_rec.ingest(sinfo) else: # ingest recording cloud if sinfo: db_s=DBQuery(host=dbhost, index="sensors", office=sinfo["office"]) sensor=list(db_s.search("_id='"+sinfo["sensor"]+"'",size=1)) if sensor: # remove status sensor[0]["_source"].pop("status",None) # denormalize address sinfo["address"]=sensor[0]["_source"]["address"] # calcualte hash code for the sensor m=hashlib.md5() m.update(json.dumps(sensor[0]["_source"],ensure_ascii=False).encode('utf-8')) md5=m.hexdigest() # locate the sensor record in cloud db_sc=DBQuery(host=dbhost, index="sensors", office="") sensor_c=list(db_sc.search("md5='"+md5+"'",size=1)) if not sensor_c: # if not available, ingest a sensor record in cloud sensor_c=[{ "_source": sensor[0]["_source"].copy() }] sensor_c[0]["_source"]["md5"]=md5 db_sc=DBIngest(host=dbhost, index="sensors", office="") print("Ingest sensor: {}".format(sensor_c[0]["_source"]), flush=True) sensor_c[0]=db_sc.ingest(sensor_c[0]["_source"]) # replace cloud sensor id and ingest recording sinfo["sensor"]=sensor_c[0]["_id"] print("Ingest recording: {}".format(sinfo), flush=True) db_rec=DBIngest(host=dbhost, index="recordings", office="") db_rec.ingest(sinfo) # copy local analytics to cloud db_a=DBQuery(host=dbhost, index="analytics", office=sinfo["office"]) data=[] for r in db_a.search('sensor="'+sensor[0]["_id"]+'" and office:['+str(office[0])+','+str(office[1])+'] and time>='+str(sinfo["time"])+' and time<='+str(sinfo["time"]+sinfo["duration"]*1000),size=10000): r["_source"]["sensor"]=sinfo["sensor"] data.append(r["_source"]) db_ac=DBIngest(host=dbhost, index="analytics", office="") print("Ingest analytics: {}".format(len(data)), flush=True) db_ac.ingest_bulk(data)
# compete for a sensor connection while True: try: for sensor in dbs.search( "type:'camera' and status:'disconnected' and office:[" + str(office[0]) + "," + str(office[1]) + "]"): try: if sensor["_source"]["url"].split(":")[0] != "rtmp": continue if "start_time" in sensor[ "_source"] and sensor["_source"]["start_time"] < 0: continue rtmpuri = sensor["_source"]["url"] sinfo = probe(rtmpuri) if sinfo["resolution"]["width"] != 0 and sinfo["resolution"][ "height"] != 0: print("RTMP status disconnected->idle:", sensor["_id"], sensor["_source"]["subtype"], flush=True) # ready for connecting sinfo.update({"status": "idle"}) r = dbs.update(sensor["_id"], sinfo, seq_no=sensor["_seq_no"], primary_term=sensor["_primary_term"]) except Exception as e: print("Exception: " + str(e), flush=True) except Exception as e: print("Exception: " + str(e), flush=True) time.sleep(service_interval)
class Handler(FileSystemEventHandler): def __init__(self, sensor): super(Handler, self).__init__() self._sensor = sensor self._db_rec = DBIngest(host=dbhost, index="recordings", office=office) self._db_cam = DBQuery(host=dbhost, index="sensors", office=office) self._last_file = None def on_created(self, event): print("on_created: " + event.src_path, flush=True) if event.is_directory: return if event.src_path.endswith(".png"): return if self._last_file: if self._last_file == event.src_path: return try: if psutil.cpu_percent() >= 80: time.sleep(2) # yield on busy self._process_file(self._last_file) except Exception as e: print("Exception: " + str(e), flush=True) self._last_file = event.src_path def _ffmpeg_convert(self, filename): print("post-processing " + filename, flush=True) with tempfile.TemporaryDirectory() as tmpdirname: filename = os.path.abspath(filename) tmpfilename = os.path.abspath( os.path.join(tmpdirname, os.path.basename(filename))) try: list( run([ "/usr/bin/ffmpeg", "-i", filename, "-c", "copy", tmpfilename ])) shutil.move(tmpfilename, filename) list( run([ "/usr/bin/ffmpeg", "-i", filename, "-vf", "thumbnail,scale=640:360", "-frames:v", "1", filename + ".png" ])) return filename, probe(filename) except Exception as e: print("Exception: " + str(e), flush=True) return None, None def _get_timestamp(self, filename): parsed = os.path.basename(filename).split('_') return int(int(parsed[-2]) / 1000000) def _process_file(self, filename): converted_file, sinfo = self._ffmpeg_convert(filename) if not converted_file: return sinfo.update({ "sensor": self._sensor, "office": { "lat": office[0], "lon": office[1], }, "time": self._get_timestamp(converted_file), "path": os.path.abspath(converted_file).split( os.path.abspath(storage) + "/")[1], }) # calculate total bandwidth bandwidth = 0 for stream1 in sinfo["streams"]: if "bit_rate" in stream1: bandwidth = bandwidth + stream1["bit_rate"] if bandwidth: self._db_cam.update(self._sensor, {"bandwidth": bandwidth}) self._db_rec.ingest(sinfo)
dbq = DBQuery(index="recordings", office=office, host=dbhost) dbs = DBQuery(index="sensors", office=office, host=dbhost) dba = DBQuery(index="analytics", office=office, host=dbhost) dbsq_c = DBQuery(index="sensors", office="", host=dbchost) dbsi_c = DBIngest(index="sensors", office="", host=dbchost) dba_c = DBIngest(index="analytics", office="", host=dbchost) while not stop.is_set(): print("Searching...", flush=True) try: for q in dbq.search("not evaluated=true", size=25): if stop.is_set(): break # mark it as evaluated dbq.update(q["_id"], {"evaluated": True}) # make the upload decision based on analytics queries r = list( dba.search("( " + query + " ) and ( sensor='" + q["_source"]["sensor"] + "' and time>" + str(q["_source"]["time"]) + " and time<" + str(q["_source"]["time"] + q["_source"]["duration"] * 1000) + " ) ", size=1)) if not r: stop.wait(2) continue # get the sensor record sensor = list(