class SensorsHandler(web.RequestHandler): def __init__(self, app, request, **kwargs): super(SensorsHandler, self).__init__(app, request, **kwargs) self.executor = ThreadPoolExecutor(4) self._db = DBQuery(index="sensors", office=office, host=dbhost) self._owt = OWTAPI() def check_origin(self, origin): return True def _room_details(self, sensor, name, room, stream): details = { "sensor": sensor, "name": name, "room": room, "stream": stream, } if webrtchost: details["url"] = "{}?roomid={}&streamid={}&office={}".format( webrtchost, room, stream, quote(",".join(list(map(str, office))))) return details @run_on_executor def _create_room(self, sensor): r = list( self._db.search("_id='{}' and status='streaming'".format(sensor), size=1)) if not r: return (404, "Sensor Not Found") location = r[0]["_source"]["location"] protocol = "udp" if 'simsn' in r[0]['_source'].keys() else "tcp" name = "{},{}:{}:{}:{}".format(location["lat"], location["lon"], r[0]["_source"]["type"], r[0]["_source"]["subtype"], r[0]["_id"]) room, stream = watcher.get(name) if room and stream: return self._room_details(sensor, name, room, stream) room = self._owt.create_room(name=name, p_limit=streaming_limit) rtsp_url = r[0]["_source"]["url"] stream = self._owt.start_streaming_ins( room=room, rtsp_url=rtsp_url, protocol=protocol) if room else None if not stream: return (503, "Exception when post") watcher.set(name, room, stream) return self._room_details(sensor, name, room, stream) @gen.coroutine def post(self): sensor = unquote(self.get_argument("sensor")) r = yield self._create_room(sensor) if isinstance(r, dict): self.write(r) else: self.set_status(r[0], r[1])
class NGINXRedirect(NGINX): def __init__(self, upstreams=[], stop=Event()): super(NGINXRedirect, self).__init__(upstreams, stop) self._db = DBQuery(index="offices", office="", host=dbhost) self._saved = self._upstreams def _update_upstreams(self): changed = super(NGINXRedirect, self)._update_upstreams() updates = {s: self._upstreams[s] for s in self._saved} try: for office1 in self._db.search("location:*", size=100): location = office1["_source"]["location"] name = ("office" + str(location["lat"]) + "c" + str(location["lon"])).replace("-", "n").replace(".", "d") protocol, q, host = office1["_source"]["uri"].partition("://") host, c, port = host.partition(":") if name in self._upstreams: ip = self._upstreams[name][2] else: changed = True try: ip = gethostbyname(host) except: ip = "127.0.0.1" updates[name] = [host, c + port, ip] except: self._stop.wait(10) if not changed: for s in self._upstreams: if s not in updates: changed = True break self._upstreams = updates return changed
"lon": office[1], }, "status": "processing", "skip": every_nth_frame, })["_id"] break except Exception as e: print("Exception: " + str(e), flush=True) time.sleep(10) # compete for a sensor connection while not stop: try: print("Searching...", flush=True) for sensor in dbs.search( "sensor:'camera' and status:'idle' and office:[" + str(office[0]) + "," + str(office[1]) + "]"): try: # compete (with other va instances) for a sensor r = dbs.update(sensor["_id"], {"status": "streaming"}, version=sensor["_version"]) # stream from the sensor print("Connected to " + sensor["_id"] + "...", flush=True) connect(sensor["_id"], algorithm, sensor["_source"]["url"]) # if exit, there is somehting wrong r = dbs.update(sensor["_id"], {"status": "disconnected"}) if stop: break except Exception as e:
options.extend(["-p " + str(k) for k in r["port"] if r["port"][k]]) for ip, port in scanner.scan(" ".join(options)): # new or disconnected camera print("Probing " + ip + ":" + str(port), flush=True) try: rtspuri, camids = probe_camera_info(ip, port) if rtspuri is None: continue except: print(traceback.format_exc(), flush=True) continue # check database to see if this camera is already registered r = None if dbhost: r = list(dbs.search("url='{}'".format(rtspuri), size=1)) if r: if r[0]["_source"]["status"] != "disconnected": print("Skipping {}:{}:{}".format( ip, port, r[0]["_source"]["status"]), flush=True) continue sinfo = probe(rtspuri) if sinfo["resolution"]["width"] == 0 or sinfo["resolution"][ "height"] == 0: print("Unknown width & height, skipping", flush=True) continue sinfo.update({ 'sensor': 'camera',
class Feeder(): def __init__(self): logger.debug("Initializing Feeder") self.office = list(map(float, os.environ["OFFICE"].split(","))) self.alg_id = None self.recording_volume = os.environ["STORAGE_VOLUME"] self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"]) #Hosts self.dbhost = os.environ["DBHOST"] self.vahost = "http://localhost:8080/pipelines" self.mqtthost = os.environ["MQTTHOST"] #Clients self.db_alg = DBIngest(host=self.dbhost, index="algorithms", office=self.office) self.db_inf = DBIngest(host=self.dbhost, index="analytics", office=self.office) self.db_sensors = DBQuery(host=self.dbhost, index="sensors", office=self.office) self.mqttclient = None self.mqtttopic = None self.observer = Observer() self.batchsize = 300 self.inference_cache = [] self._threadflag = False def start(self): logger.info(" ### Starting Feeder ### ") logger.debug("Waiting for VA startup") r = requests.Response() r.status_code = 400 while r.status_code != 200 and r.status_code != 201: try: r = requests.get(self.vahost) except Exception as e: r = requests.Response() r.status_code = 400 time.sleep(10) # Register Algorithm logger.debug("Registering as algorithm in the DB") while True: try: self.alg_id = self.db_alg.ingest({ "name": "object_detection", "office": { "lat": self.office[0], "lon": self.office[1] }, "status": "idle", "skip": self.every_nth_frame, })["_id"] break except Exception as e: logger.debug("Register algo exception: " + str(e)) time.sleep(10) self.mqtttopic = "smtc_va_inferences_" + self.alg_id camera_monitor_thread = Thread(target=self.monitor_cameras, daemon=True) logger.debug("Starting working threads") self._threadflag = True self.startmqtt() self.observer.start() camera_monitor_thread.start() logger.debug("Waiting for interrupt...") camera_monitor_thread.join() self.observer.join() def stop(self): logger.info(" ### Stopping Feeder ### ") self._threadflag = False logger.debug("Unregistering algorithm from DB") self.db_alg.delete(self.alg_id) self.mqttclient.loop_stop() self.observer.stop() def startmqtt(self): self.mqttclient = mqtt.Client("feeder_" + self.alg_id) self.mqttclient.connect(self.mqtthost) self.mqttclient.on_message = self.mqtt_handler self.mqttclient.loop_start() self.mqttclient.subscribe(self.mqtttopic) def mqtt_handler(self, client, userdata, message): m_in = json.loads(str(message.payload.decode("utf-8", "ignore"))) for tag in m_in["tags"]: m_in[tag] = m_in["tags"][tag] del m_in["tags"] m_in["time"] = m_in["real_base"] + m_in["timestamp"] # convert to milliseconds m_in["time"] = int(m_in["time"] / 1000000) self.inference_cache.append(m_in) if len(self.inference_cache) >= self.batchsize: try: self.db_inf.ingest_bulk(self.inference_cache[:self.batchsize]) self.inference_cache = self.inference_cache[self.batchsize:] except Exception as e: logger.debug("Ingest Error: " + str(e)) def monitor_cameras(self): logger.debug("Starting Sensor Monitor Thread") while self._threadflag: logger.debug("Searching for sensors...") try: for sensor in self.db_sensors.search( "sensor:'camera' and status:'idle' and office:[" + str(self.office[0]) + "," + str(self.office[1]) + "]"): logger.debug(sensor) try: fswatch = None logger.debug("Sensor found! " + sensor["_id"]) logger.debug("Setting sensor " + sensor["_id"] + " to streaming") r = self.db_sensors.update(sensor["_id"], {"status": "streaming"}, version=sensor["_version"]) logger.debug( "Setting algorithm to streaming from sensor " + sensor["_id"]) r = self.db_alg.update(self.alg_id, { "source": sensor["_id"], "status": "processing" }) # Attempt to POST to VA service jsonData = { "source": { "uri": sensor["_source"]["url"], "type": "uri" }, "tags": { "algorithm": self.alg_id, "sensor": sensor["_id"], "office": { "lat": self.office[0], "lon": self.office[1], }, }, "parameters": { "every-nth-frame": self.every_nth_frame, "recording_prefix": "recordings/" + sensor["_id"], "method": "mqtt", "address": self.mqtthost, "clientid": self.alg_id, "topic": self.mqtttopic }, } folderpath = os.path.join( os.path.realpath(self.recording_volume), sensor["_id"]) if not os.path.exists(folderpath): os.makedirs(folderpath) logger.debug("Adding folder watch for " + folderpath) filehandler = FSHandler( sensor=sensor["_id"], office=self.office, dbhost=self.dbhost, rec_volume=self.recording_volume) fswatch = self.observer.schedule(filehandler, folderpath, recursive=True) try: logger.info("Posting Request to VA Service") r = requests.post(self.vahost + "/object_detection/2", json=jsonData, timeout=10) r.raise_for_status() pipeline_id = None if r.status_code == 200: logger.debug("Started pipeline " + r.text) pipeline_id = int(r.text) while r.status_code == 200: logger.debug("Querying status of pipeline") r = requests.get(self.vahost + "/object_detection/2/" + str(pipeline_id) + "/status", timeout=10) r.raise_for_status() jsonValue = r.json() if "avg_pipeline_latency" not in jsonValue: jsonValue["avg_pipeline_latency"] = 0 state = jsonValue["state"] try: logger.debug("fps: ") logger.debug(str(jsonValue)) except: logger.debug("error") logger.debug("Pipeline state is " + str(state)) if state == "COMPLETED" or state == "ABORTED" or state == "ERROR": logger.debug("Pipeline ended") break self.db_alg.update( self.alg_id, { "performance": jsonValue["avg_fps"], "latency": jsonValue["avg_pipeline_latency"] * 1000 }) time.sleep(10) logger.debug("Setting sensor " + sensor["_id"] + " to disconnected") r = self.db_sensors.update( sensor["_id"], {"status": "disconnected"}) except requests.exceptions.RequestException as e: logger.error( "Feeder: Request to VA Service Failed: " + str(e)) logger.debug("Setting sensor " + sensor["_id"] + " to idle") r = self.db_sensors.update(sensor["_id"], {"status": "idle"}) except Exception as e: logger.error("Feeder Exception: " + str(e)) if fswatch: self.observer.unschedule(fswatch) del (filehandler) logger.debug("Setting algorithm to idle") r = self.db_alg.update(self.alg_id, {"status": "idle"}) break except Exception as e: print(e, flush=True) time.sleep(5) logger.debug("Sensor monitor thread done")
dba.wait(stop) algorithm=dba.ingest({ "name": text["entrance-counting"], "office": { "lat": office[0], "lon": office[1], }, "status": "processing", "skip": every_nth_frame, })["_id"] # compete for a sensor connection while not stop.is_set(): try: print("Searching...", flush=True) for sensor in dbs.search("type:'camera' and status:'idle' and algorithm='entrance-counting' and office:["+str(office[0])+","+str(office[1])+"]"): try: # compete (with other va instances) for a sensor r=dbs.update(sensor["_id"],{"status":"streaming"},seq_no=sensor["_seq_no"],primary_term=sensor["_primary_term"]) # stream from the sensor print("Connected to "+sensor["_id"]+"...",flush=True) connect(sensor["_id"],sensor["_source"]["location"],sensor["_source"]["url"],algorithm,sensor["_source"]["algorithm"]) # if exit, there is somehting wrong r=dbs.update(sensor["_id"],{"status":"disconnected"}) if stop.is_set(): break except Exception as e: print("Exception in count-entrance search sensor: "+str(e), flush=True)
"lon": office[1], }, "status": "processing", "skip": every_nth_frame, })["_id"] break except Exception as e: print("Waiting for DB...", flush=True) time.sleep(10) # compete for a sensor connection while not stop: try: print("Searching...", flush=True) for sensor in dbs.search( "sensor:'camera' and status:'idle' and algorithm='" + myAlgorithm + "' and office:[" + str(office[0]) + "," + str(office[1]) + "]"): try: # compete (with other va instances) for a sensor r = dbs.update(sensor["_id"], {"status": "streaming"}, seq_no=sensor["_seq_no"], primary_term=sensor["_primary_term"]) # stream from the sensor print("Connected to " + sensor["_id"] + "...", flush=True) connect(sensor["_id"], sensor["_source"]["location"], sensor["_source"]["url"], algorithm, sensor["_source"]["algorithm"]) # if exit, there is somehting wrong r = dbs.update(sensor["_id"], {"status": "disconnected"})
def _cleanup_thread(self): owt = OWTAPI() dbs = DBQuery(index="sensors", office=office, host=dbhost) while not self._stop.is_set(): todelete = [] tostartstreamout = [] tostopstreamout = [] for name in self._rooms: try: participants = owt.list_participants( self._rooms[name]["room"]) except: participants = 0 now = int(time.time()) print( "Watcher: room {} participant {} inactive {} stream-out status {}" .format(name, participants, now - self._rooms[name]["time"], self._rooms[name]["stream_out"]["status"]), flush=True) print(self._rooms[name], flush=True) if participants > 0: self._rooms[name]["time"] = now elif now - self._rooms[name]["time"] > self._inactive: todelete.append(name) if self._rooms[name]["stream_out"]["status"] == "start": tostartstreamout.append(name) elif self._rooms[name]["stream_out"]["status"] == "stop": tostopstreamout.append(name) for name in tostartstreamout: if self._rooms[name]["sensor"]["subtype"] != "mobile_camera": continue sensor = self._rooms[name]["sensor"] stream1 = self._rooms[name]["stream_in"] room1 = self._rooms[name]["room"] rtmpurl = self._rooms[name]["stream_out"]["rtmpurl"] for _item in dbs.search("_id='{}'".format(sensor["id"]), size=1): print(_item, flush=True) try: stream1 = stream1 if stream1 else owt.list_streams( room1)[0] except: continue self._rooms[name]["stream_in"] = stream1 if stream1 and rtmpurl: try: self._rooms[name]["stream_out"][ "stream"] = owt.start_streaming_outs( room=room1, url=rtmpurl, video_from=stream1)["id"] except: continue self._rooms[name]["stream_out"]["status"] = "streaming" try: dbs.update(sensor["id"], { "status": "disconnected", "url": rtmpurl }) except: continue for name in tostopstreamout: if self._rooms[name]["sensor"]["subtype"] != "mobile_camera": continue stream1 = self._rooms[name]["stream_out"]["stream"] room1 = self._rooms[name]["room"] if stream1 and rtmpurl: try: owt.stop_streaming_outs(room1, stream1) except: continue self._rooms[name]["stream_out"]["status"] = "idle" for name in todelete: stream1 = self._rooms[name]["stream_in"] room1 = self._rooms[name]["room"] try: streams = [stream1] if stream1 else owt.list_streams(room1) except: streams = [] # for stream1 in streams: # print("Remove stream {}".format(stream1), flush=True) # try: # owt.delete_stream(room1,stream1) # except: # pass print("Remove room {}:{}".format(name, room1), flush=True) try: owt.delete_room(room1) except: pass self._rooms.pop(name, None) self._stop.wait(self._inactive / 3.0)
continue sinfo["resolution"] = {"width": width, "height": height} sinfo.update(desc) sinfo.update({ 'sensor': 'camera', 'model': 'ip_camera', 'url': rtspuri, 'status': 'idle', }) print(json.dumps(sinfo, indent=2), flush=True) if not dbhost: continue try: r = list( dbs.search("sensor:'camera' and " + camid[0] + "='" + camid[1] + "'", size=1)) if not r: # new camera print("Searching for template: " + camid[0] + "=" + camid[1], flush=True) template = list( dbp.search(camid[0] + "='" + camid[1] + "'", size=1)) if template: print("Ingesting", flush=True) record = template[0]["_source"] record.update(sinfo) dbi.ingest(record) else: print("Template not found", flush=True) elif r[0]["_source"][ "status"] == "disconnected": # camera re-connect
"service": text["maintanence"], "status": "active", }) dbq = DBQuery(index="recordings", office=office, host=dbhost) dbs = DBQuery(index="sensors", office=office, host=dbhost) dba = DBQuery(index="analytics", office=office, host=dbhost) dbsq_c = DBQuery(index="sensors", office="", host=dbchost) dbsi_c = DBIngest(index="sensors", office="", host=dbchost) dba_c = DBIngest(index="analytics", office="", host=dbchost) while not stop.is_set(): print("Searching...", flush=True) try: for q in dbq.search("not evaluated=true", size=25): if stop.is_set(): break # mark it as evaluated dbq.update(q["_id"], {"evaluated": True}) # make the upload decision based on analytics queries r = list( dba.search("( " + query + " ) and ( sensor='" + q["_source"]["sensor"] + "' and time>" + str(q["_source"]["time"]) + " and time<" + str(q["_source"]["time"] + q["_source"]["duration"] * 1000) + " ) ", size=1)) if not r: stop.wait(2)
def discover_all_onvif_cameras(): ip_range = os.environ['IP_SCAN_RANGE'] port_range = os.environ['PORT_SCAN_RANGE'] office = list(map(float,os.environ["OFFICE"].split(","))) distance = float(os.environ["DISTANCE"]) angleoffset = float(os.environ["ANGLEOFFSET"]) dbhost= os.environ["DBHOST"] sensor_index = 0 mac_sensor_id = {} while True: desclist = [] onvifcams = scan_onvif_camera(ip_range, port_range) nsensors = len(onvifcams) db = DBIngest(index="sensors",office=office,host=dbhost) dbs = DBQuery(index="sensors",office=office,host=dbhost) for cam in onvifcams: ip = cam['ip'] port = int(cam['port']) desc = discover_onvif_camera(ip, port) if (desc['MAC'] == None): if('NetworkInterfaces' in desc): if(len(desc['NetworkInterfaces']) >= 1): desc['MAC'] = desc['NetworkInterfaces'][0]['Info']['HwAddress'] # let's use camera serial number as id else: desc['MAC'] = desc['DeviceInformation']['SerialNumber'] if(desc['MAC'] not in mac_sensor_id): sensor_index += 1 mac_sensor_id[desc['MAC']] = sensor_index sensor_id = mac_sensor_id[desc['MAC']] # Add credential to rtsp uri rtspuri = desc["MediaStreamUri"]["Uri"] rtspuri = rtspuri.replace('rtsp://', 'rtsp://*****:*****@') camdesc = { "sensor": "camera", "icon": "camera.gif", "office": { "lat": office[0], "lon": office[1] }, "model": "ip_camera", "resolution": { "width": desc["MediaVideoSources"][0]['Resolution']['Width'], "height": desc["MediaVideoSources"][0]['Resolution']["Height"] }, "location": geo_point(office, distance, math.pi * 2 / nsensors * sensor_id + math.pi * angleoffset / 180.0), "url": rtspuri, "mac": desc["MAC"], 'theta': 15.0, 'mnth': 15.0, 'alpha': 45.0, 'fovh': 90.0, 'fovv': 68.0, "status": "idle", } try: found=list(dbs.search("sensor:'camera' and model:'ip_camera' and mac='"+desc['MAC']+"'", size=1)) if not found: desclist.append(camdesc) except Exception as e: print(e) if desclist: try: db.ingest_bulk(desclist) except Exception as e: print("Exception: "+str(e), flush=True) time.sleep(60)
def quit_service(signum, sigframe): exit(143) signal(SIGTERM, quit_service) dbq = DBQuery(index=indexes[0], office=office, host=dbhost) dba = DBQuery(index=indexes[1], office=office, host=dbhost) while True: print("Sleeping...") time.sleep(service_interval) print("Searching...", flush=True) try: data = list(dba.search("not recording=*", size=search_batch)) if not data: continue updates = [] while data: sensor1 = data[-1]["_source"]["sensor"] office1 = data[-1]["_source"]["office"] time1 = data[-1]["_source"]["time"] for q in dbq.search('sensor="' + sensor1 + '" and office:[' + str(office1["lat"]) + ',' + str(office1["lon"]) + '] and time<=' + str(time1) + ' and time+duration*1000>=' + str(time1)): for i in range(len(data) - 1, -1, -1): if data[i]["_source"]["sensor"] == sensor1 and data[i][
signal(SIGTERM, quit_service) dbs = DBIngest(index="services", office=office, host=dbhost) dbs.wait(stop) rs = dbs.ingest({ "name": text["smart-upload"], "service": text["maintanence"], "status": "active", }) dbq = DBQuery(index="recordings", office=office, host=dbhost) dba = DBQuery(index="analytics", office=office, host=dbhost) while not stop.is_set(): print("Searching...", flush=True) try: for q in dbq.search("evaluated=false", size=25): if stop.is_set(): break # mark it as evaluated dbq.update(q["_id"], {"evaluated": True}) # make the upload decision based on analytics queries r = list( dba.search("( " + query + " ) and ( sensor='" + q["_source"]["sensor"] + "' and time>" + str(q["_source"]["time"]) + " and time<" + str(q["_source"]["time"] + q["_source"]["duration"] * 1000) + " ) ", size=1)) if not r: stop.wait(2)
class OccupencyTrigger(Trigger): def __init__(self): super(OccupencyTrigger, self).__init__() self._db = DBQuery(index="analytics", office=office, host=dbhost) def trigger(self): time.sleep(service_interval[0]) objects = ("", 0) seats = ("", 0) people = ("", 0) queue = ("", 0) try: for q in self._db.search( "time>=now-" + str(args[0]) + " and (nobjects>" + str(args[1]) + " or count.people>" + str(args[2]) + " or nseats>" + str(args[3]) + " or count.queue>" + str(args[4]) + ")", size=75): if "nobjects" in q["_source"]: if q["_source"]["nobjects"] > objects[1]: objects = (q["_source"]["location"], q["_source"]["nobjects"]) if "nseats" in q["_source"]: if q["_source"]["nseats"] > seats[1]: seats = (q["_source"]["location"], q["_source"]["nseats"]) if "count" in q["_source"]: if "people" in q["_source"]["count"]: if q["_source"]["count"]["people"] > people[1]: people = (q["_source"]["location"], q["_source"]["count"]["people"]) if "queue" in q["_source"]["count"]: if q["_source"]["count"]["queue"] > queue[1]: queue = (q["_source"]["location"], q["_source"]["count"]["queue"]) except Exception as e: print("Exception: " + str(e), flush=True) info = [] if objects[1] > 0: info.append({ "location": objects[0], "warning": [{ "message": "Traffic busy: #objects=" + str(objects[1]), "args": { "nobjects": objects[1], }, }], }) if people[1] > 0: info.append({ "location": people[0], "warning": [{ "message": "Entrence crowded: #people=" + str(people[1]), "args": { "occupency": people[1], } }], }) if queue[1] > 0: info.append({ "location": queue[0], "warning": [{ "message": "Entrence crowded: #queue=" + str(queue[1]), "args": { "occupency": queue[1], } }], }) if seats[1] > 0: info.append({ "location": seats[0], "warning": [{ "message": "Zone crowded: #seats=" + str(seats[1]), "args": { "nseats": seats[1], } }], }) return info
def _search(self, index, queries, size): try: dbq = DBQuery(index=index, office=office, host=dbhost) return {"response": list(dbq.search(queries, size))} except Exception as e: return {"response": [], "status": str(e)}
]) time.sleep(10) def quit_service(signum, sigframe): exit(143) signal(SIGTERM, quit_service) filters = [pattern for i in range(ncameras)] if dbhost and office: db = DBQuery(index="provisions", office=office, host=dbhost) db.wait() for r1 in db.search( "algorithm='{}' and office:[{},{}] and simfile:* and simsn:*". format(algorithm, office[0], office[1]), size=ncameras): m = re.search('[0-9]+$', r1["_source"]["simsn"]) if not m: continue i = int(m.group(0)) if i < ncameras: filters[i] = r1["_source"]["simfile"] files = list(os.listdir(simulated_root)) with ThreadPoolExecutor(ncameras) as e: k = random.randint(0, ncameras) for i in range(ncameras): files1 = [f for f in files if re.search(filters[i], f)] file = files1[(i + k) % len(files1)] print("#{} camera: {}".format(i, file), flush=True) e.submit(serve_stream, simulated_root + "/" + file, rtsp_port + i * port_step, rtp_port + i * port_step)
"status": "active", }) break except Exception as e: print("Exception: " + str(e), flush=True) time.sleep(10) dbq = DBQuery(index=indexes, office=office, host=dbhost) while True: print("Searching...", flush=True) print("query = ", query) try: for q in dbq.search(query): url = smhost + '/' + q["_source"]["path"] print("url: ", url) mp4file = "/tmp/" + str(os.path.basename(url)) print("Transcoding...") os.remove(mp4file) list( run([ "/usr/local/bin/ffmpeg", "-f", "mp4", "-i", url, "-c:v", "libsvt_hevc", "-c:a", "aac", mp4file ])) print("Uploading: ", cloudhost) sensor = q["_source"]["sensor"]
if isinstance(office, list): dbs.wait(stop) rs = dbs.ingest({ "name": text["cleanup"], "service": text["maintenance"], "status": "active", }) while not stop.is_set(): print("Searching...", flush=True) for index in indexes: if stop.is_set(): break db = DBQuery(index=index, office=office, host=dbhost) try: for r in db.search("time<now-" + str(retention_time * 1000), size=500): if stop.is_set(): break # delete the record db.delete(r["_id"]) # delete the path file if "path" in r["_source"]: try: os.remove(storage + "/" + r["_source"]["path"]) os.remove(storage + "/" + r["_source"]["path"] + ".png") except Exception as e: pass except Exception as e: print("Exception: " + str(e), flush=True)
class OccupencyTrigger(Trigger): def __init__(self): super(OccupencyTrigger, self).__init__() self._db = DBQuery(index="analytics", office=office, host=dbhost) def trigger(self, stop): stop.wait(service_interval[0]) objects = ("", 0) crowd = ("", 0) entrance = ("", 0) svcq = ("", 0) try: for q in self._db.search( "time>=now-" + args[0] + " and ((nobjects>" + args[1] + " and algorithm:'object') or (nobjects>" + args[2] + " and algorithm:'svcq') or (nobjects>" + args[3] + " and algorithm:'crowd') or (nobjects>" + args[4] + " and algorithm:'entrance'))", size=75): nobjects = q["_source"]["nobjects"] algorithm = q["_source"]["algorithm"] location = q["_source"]["location"] if algorithm.find("object") >= 0: if nobjects > objects[1]: objects = (location, nobjects) elif algorithm.find("entrance") >= 0: if nobjects > entrance[1]: entrance = (location, nobjects) elif algorithm.find("svcq") >= 0: if nobjects > svcq[1]: svcq = (location, nobjects) elif algorithm.find("crowd") >= 0: if nobjects > crowd[1]: crowd = (location, nobjects) except Exception as e: print("Exception: " + str(e), flush=True) info = [] if objects[1] > 0: info.append({ "location": objects[0], "warning": [{ "message": text["traffic busy"].format(objects[1]), "args": { "nobjects": objects[1], }, }], }) if entrance[1] > 0: info.append({ "location": entrance[0], "warning": [{ "message": text["entrance crowded"].format(entrance[1]), "args": { "occupency": entrance[1], } }], }) if svcq[1] > 0: info.append({ "location": svcq[0], "warning": [{ "message": text["service slow"].format(svcq[1]), "args": { "occupency": svcq[1], } }], }) if crowd[1] > 0: info.append({ "location": crowd[0], "warning": [{ "message": text["seat crowded"].format(crowd[1]), "args": { "nseats": crowd[1], } }], }) return info
def _search(self, index, queries, size, office): db = DBQuery(index=index, office=office, host=self.dbhost) try: return list(db.search(queries, size)) except Exception as e: return str(e)
if "coded_width" in stream: width=int(stream["coded_width"]) if "coded_height" in stream: height=int(stream["coded_height"]) if width==0 or height==0: print("Unknown width & height, skipping", flush=True) continue # retrieve unique location if mac not in cameras: cameras[mac]=camera_count camera_count=camera_count+1 location = locations[int(cameras[mac] % (len(locations)))] print("location: ["+str(location[0])+","+str(location[1])+"]",flush=True) try: print("Checking for preexistance", flush=True) found=list(dbs.search("sensor:'camera' and model:'ip_camera' and mac='"+mac+"'", size=1)) if not found: print("Ingesting", flush=True) db.ingest({ 'sensor': 'camera', 'icon': 'camera.gif', 'office': { 'lat': office[0], 'lon': office[1] }, 'model': 'ip_camera', 'resolution': { 'width': width, 'height': height }, 'location': { "lat": location[0], "lon": location[1] }, 'url': rtspuri, 'mac': mac, 'theta': 105.0, 'mnth': 75.0, 'alpha': 45.0, 'fovh': 90.0,
"lon": office[1], }, "status": "processing", "skip": every_nth_frame, })["_id"] break except Exception as e: print("Exception: " + str(e), flush=True) time.sleep(10) # compete for a sensor connection while not stop: try: print("Searching...", flush=True) for sensor in dbs.search( "sensor:'camera' and status:'idle' and algorithm='object-detection' and office:[" + str(office[0]) + "," + str(office[1]) + "]"): try: # compete (with other va instances) for a sensor r = dbs.update(sensor["_id"], {"status": "streaming"}, version=sensor["_version"]) # stream from the sensor print("Connected to " + sensor["_id"] + "...", flush=True) connect(sensor["_id"], sensor["_source"]["location"], algorithm, sensor["_source"]["url"]) # if exit, there is somehting wrong r = dbs.update(sensor["_id"], {"status": "disconnected"}) if stop: break
def quit_service(signum, sigframe): nginx1.stop() exit(143) signal(SIGTERM, quit_service) nginx1.start() dbs = DBQuery(index="sensors", office=office, host=dbhost) # compete for a sensor connection while True: try: for sensor in dbs.search( "type:'camera' and status:'disconnected' and office:[" + str(office[0]) + "," + str(office[1]) + "]"): try: if sensor["_source"]["url"].split(":")[0] != "rtmp": continue if "start_time" in sensor[ "_source"] and sensor["_source"]["start_time"] < 0: continue rtmpuri = sensor["_source"]["url"] sinfo = probe(rtmpuri) if sinfo["resolution"]["width"] != 0 and sinfo["resolution"][ "height"] != 0: print("RTMP status disconnected->idle:", sensor["_id"], sensor["_source"]["subtype"], flush=True) # ready for connecting
def _rec2db(self, office, sensor, timestamp, path): disk_usage = psutil.disk_usage(self._storage)[3] if disk_usage < halt_rec_th: dt = datetime.datetime.fromtimestamp(timestamp / 1000) officestr = (str(office[0]) + "c" + str(office[1])).replace( "-", "n").replace(".", "d") mp4path = self._storage + "/" + officestr + "/" + sensor + "/" + str( dt.year) + "/" + str(dt.month) + "/" + str(dt.day) os.makedirs(mp4path, exist_ok=True) mp4file = mp4path + "/" + str(timestamp) + ".mp4" # perform a straight copy to fix negative timestamp for chrome list( run([ "/usr/local/bin/ffmpeg", "-f", "mp4", "-i", path, "-c", "copy", mp4file ])) sinfo = probe(mp4file) sinfo.update({ "sensor": sensor, "office": { "lat": office[0], "lon": office[1], }, "time": timestamp, "path": mp4file[len(self._storage) + 1:], }) else: print("Disk full: recording halted", flush=True) sinfo = None if local_office: if sinfo["bandwidth"]: db_cam = DBQuery(host=dbhost, index="sensors", office=office) db_cam.update(sensor, {"bandwidth": sinfo["bandwidth"]}) # check disk usage and send alert disk_usage = psutil.disk_usage(self._storage).percent if disk_usage >= warn_disk_th: level = "fatal" if disk_usage >= fatal_disk_th else "warning" db_alt = DBIngest(host=dbhost, index="alerts", office=office) message = text["halt recording"].format( disk_usage ) if disk_usage >= halt_rec_th else text["disk usage"].format( disk_usage) db_alt.ingest({ "time": int(time.time() * 1000), "office": { "lat": office[0], "lon": office[1], }, "location": { "lat": office[0], "lon": office[1], }, level: [{ "message": message, "args": { "disk": disk_usage, } }] }) # ingest recording local if sinfo: print("Ingest recording: {}".format(sinfo), flush=True) office1 = office if local_office else "" # denormalize sensor address to recordings dbs = DBQuery(host=dbhost, index="sensors", office=office1) r = list(dbs.search("_id='" + sinfo["sensor"] + "'", size=1)) if r: sinfo["address"] = r[0]["_source"]["address"] db_rec = DBIngest(host=dbhost, index="recordings", office=office1) db_rec.ingest(sinfo)
def _rec2db(self, office, sensor, timestamp, path): disk_usage=psutil.disk_usage(self._storage)[3] if disk_usage<halt_rec_th: dt=datetime.datetime.fromtimestamp(timestamp/1000) officestr=(str(office[0])+"c"+str(office[1])).replace("-","n").replace(".","d") mp4path=self._storage+"/"+officestr+"/"+sensor+"/"+str(dt.year)+"/"+str(dt.month)+"/"+str(dt.day) os.makedirs(mp4path,exist_ok=True) mp4file=mp4path+"/"+str(timestamp)+".mp4" # perform a straight copy to fix negative timestamp for chrome list(run(["/usr/local/bin/ffmpeg","-f","mp4","-i",path,"-c","copy",mp4file])) sinfo=probe(mp4file) sinfo.update({ "sensor": sensor, "office": { "lat": office[0], "lon": office[1], }, "time": timestamp, "path": mp4file[len(self._storage)+1:], }) else: print("Disk full: recording halted", flush=True) sinfo=None if local_office: if sinfo["bandwidth"]: db_cam=DBQuery(host=dbhost, index="sensors", office=office) db_cam.update(sensor, {"bandwidth": sinfo["bandwidth"]}) # check disk usage and send alert disk_usage=psutil.disk_usage(self._storage).percent if disk_usage>=warn_disk_th: level="fatal" if disk_usage>=fatal_disk_th else "warning" db_alt=DBIngest(host=dbhost, index="alerts", office=office) message=text["halt recording"].format(disk_usage) if disk_usage>=halt_rec_th else text["disk usage"].format(disk_usage) db_alt.ingest({ "time": int(time.time()*1000), "office": { "lat": office[0], "lon": office[1], }, "location": { "lat": office[0], "lon": office[1], }, level: [{ "message": message, "args": { "disk": disk_usage, } }] }) # ingest recording local if sinfo: db_rec=DBIngest(host=dbhost, index="recordings", office=office) db_rec.ingest(sinfo) else: # ingest recording cloud if sinfo: db_s=DBQuery(host=dbhost, index="sensors", office=sinfo["office"]) sensor=list(db_s.search("_id='"+sinfo["sensor"]+"'",size=1)) if sensor: # remove status sensor[0]["_source"].pop("status",None) # denormalize address sinfo["address"]=sensor[0]["_source"]["address"] # calcualte hash code for the sensor m=hashlib.md5() m.update(json.dumps(sensor[0]["_source"],ensure_ascii=False).encode('utf-8')) md5=m.hexdigest() # locate the sensor record in cloud db_sc=DBQuery(host=dbhost, index="sensors", office="") sensor_c=list(db_sc.search("md5='"+md5+"'",size=1)) if not sensor_c: # if not available, ingest a sensor record in cloud sensor_c=[{ "_source": sensor[0]["_source"].copy() }] sensor_c[0]["_source"]["md5"]=md5 db_sc=DBIngest(host=dbhost, index="sensors", office="") print("Ingest sensor: {}".format(sensor_c[0]["_source"]), flush=True) sensor_c[0]=db_sc.ingest(sensor_c[0]["_source"]) # replace cloud sensor id and ingest recording sinfo["sensor"]=sensor_c[0]["_id"] print("Ingest recording: {}".format(sinfo), flush=True) db_rec=DBIngest(host=dbhost, index="recordings", office="") db_rec.ingest(sinfo) # copy local analytics to cloud db_a=DBQuery(host=dbhost, index="analytics", office=sinfo["office"]) data=[] for r in db_a.search('sensor="'+sensor[0]["_id"]+'" and office:['+str(office[0])+','+str(office[1])+'] and time>='+str(sinfo["time"])+' and time<='+str(sinfo["time"]+sinfo["duration"]*1000),size=10000): r["_source"]["sensor"]=sinfo["sensor"] data.append(r["_source"]) db_ac=DBIngest(host=dbhost, index="analytics", office="") print("Ingest analytics: {}".format(len(data)), flush=True) db_ac.ingest_bulk(data)
"lat": office[0], "lon": office[1], }, "status": "processing", "skip": every_nth_frame, }) break except Exception as e: print("Exception: "+str(e), flush=True) time.sleep(10) # compete for a sensor connection while not stop: try: print("Searching...", flush=True) for sensor in dbs.search("sensor:'camera' and status:'idle' and algorithm='crowd-counting' and office:["+str(office[0])+","+str(office[1])+"]"): try: # compete (with other va instances) for a sensor r=dbs.update(sensor["_id"],{"status":"streaming"},version=sensor["_version"]) # stream from the sensor print("Connected to "+sensor["_id"]+"...",flush=True) connect(sensor,algorithm,sensor["_source"]["url"]) # if exit, there is somehting wrong r=dbs.update(sensor["_id"],{"status":"disconnected"}) if stop: break except Exception as e: print("Exception: "+str(e), flush=True)
try: print("Probing for width & height", flush=True) sinfo=probe(rtspuri) for stream in sinfo["streams"]: if "coded_width" in stream: width=int(stream["coded_width"]) if "coded_height" in stream: height=int(stream["coded_height"]) except Exception as e: print("Exception: "+str(e), flush=True) if not width or not height: print("Unknown width & height, skipping", flush=True) continue sinfo["resolution"]={ "width": width, "height": height } try: found=list(dbs.search("sensor:'camera' and "+camid[0]+"='"+camid[1]+"'",size=1)) if not found: template=list(dbp.search(camid[0]+"='"+camid[1]+"'",size=1)) if template: print("Ingesting", flush=True) record=desc if desc else {} record.update(sinfo) record.update(template[0]["_source"]) record.update({ 'sensor': 'camera', 'model': 'ip_camera', 'url': rtspuri, 'status': 'idle', }) dbi.ingest(record)