office = list(map(float, env["OFFICE"].split(","))) dbhost = env["DBHOST"] stop = Event() def quit_service(signum, sigframe): stop.set() signal(SIGTERM, quit_service) # register trigger dbt = DBIngest(index="services", office=office, host=dbhost) dbt.wait(stop) rt = dbt.ingest({ "name": text["alert trigger"], "service": text["triggers"], "status": "active", }) imbalance = ImbalanceTrigger() occupency = OccupencyTrigger() cpu = CPUTrigger() with ThreadPoolExecutor(3) as e: e.submit(imbalance.loop, stop) e.submit(occupency.loop, stop) e.submit(cpu.loop, stop) dbt.delete(rt["_id"])
time.sleep(10) # compete for a sensor connection while not stop: try: print("Searching...", flush=True) for sensor in dbs.search("sensor:'camera' and status:'idle' and algorithm='crowd-counting' and office:["+str(office[0])+","+str(office[1])+"]"): try: # compete (with other va instances) for a sensor r=dbs.update(sensor["_id"],{"status":"streaming"},version=sensor["_version"]) # stream from the sensor print("Connected to "+sensor["_id"]+"...",flush=True) connect(sensor,algorithm,sensor["_source"]["url"]) # if exit, there is somehting wrong r=dbs.update(sensor["_id"],{"status":"disconnected"}) if stop: break except Exception as e: print("Exception: "+str(e), flush=True) except Exception as e: print("Exception: "+str(e), flush=True) time.sleep(10) # delete the algorithm instance dba.delete(algorithm["_id"])
class Feeder(): def __init__(self): logger.debug("Initializing Feeder") self.office = list(map(float, os.environ["OFFICE"].split(","))) self.alg_id = None self.recording_volume = os.environ["STORAGE_VOLUME"] self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"]) #Hosts self.dbhost = os.environ["DBHOST"] self.vahost = "http://localhost:8080/pipelines" self.mqtthost = os.environ["MQTTHOST"] #Clients self.db_alg = DBIngest(host=self.dbhost, index="algorithms", office=self.office) self.db_inf = DBIngest(host=self.dbhost, index="analytics", office=self.office) self.db_sensors = DBQuery(host=self.dbhost, index="sensors", office=self.office) self.mqttclient = None self.mqtttopic = None self.observer = Observer() self.batchsize = 300 self.inference_cache = [] self._threadflag = False def start(self): logger.info(" ### Starting Feeder ### ") logger.debug("Waiting for VA startup") r = requests.Response() r.status_code = 400 while r.status_code != 200 and r.status_code != 201: try: r = requests.get(self.vahost) except Exception as e: r = requests.Response() r.status_code = 400 time.sleep(10) # Register Algorithm logger.debug("Registering as algorithm in the DB") while True: try: self.alg_id = self.db_alg.ingest({ "name": "object_detection", "office": { "lat": self.office[0], "lon": self.office[1] }, "status": "idle", "skip": self.every_nth_frame, })["_id"] break except Exception as e: logger.debug("Register algo exception: " + str(e)) time.sleep(10) self.mqtttopic = "smtc_va_inferences_" + self.alg_id camera_monitor_thread = Thread(target=self.monitor_cameras, daemon=True) logger.debug("Starting working threads") self._threadflag = True self.startmqtt() self.observer.start() camera_monitor_thread.start() logger.debug("Waiting for interrupt...") camera_monitor_thread.join() self.observer.join() def stop(self): logger.info(" ### Stopping Feeder ### ") self._threadflag = False logger.debug("Unregistering algorithm from DB") self.db_alg.delete(self.alg_id) self.mqttclient.loop_stop() self.observer.stop() def startmqtt(self): self.mqttclient = mqtt.Client("feeder_" + self.alg_id) self.mqttclient.connect(self.mqtthost) self.mqttclient.on_message = self.mqtt_handler self.mqttclient.loop_start() self.mqttclient.subscribe(self.mqtttopic) def mqtt_handler(self, client, userdata, message): m_in = json.loads(str(message.payload.decode("utf-8", "ignore"))) for tag in m_in["tags"]: m_in[tag] = m_in["tags"][tag] del m_in["tags"] m_in["time"] = m_in["real_base"] + m_in["timestamp"] # convert to milliseconds m_in["time"] = int(m_in["time"] / 1000000) self.inference_cache.append(m_in) if len(self.inference_cache) >= self.batchsize: try: self.db_inf.ingest_bulk(self.inference_cache[:self.batchsize]) self.inference_cache = self.inference_cache[self.batchsize:] except Exception as e: logger.debug("Ingest Error: " + str(e)) def monitor_cameras(self): logger.debug("Starting Sensor Monitor Thread") while self._threadflag: logger.debug("Searching for sensors...") try: for sensor in self.db_sensors.search( "sensor:'camera' and status:'idle' and office:[" + str(self.office[0]) + "," + str(self.office[1]) + "]"): logger.debug(sensor) try: fswatch = None logger.debug("Sensor found! " + sensor["_id"]) logger.debug("Setting sensor " + sensor["_id"] + " to streaming") r = self.db_sensors.update(sensor["_id"], {"status": "streaming"}, version=sensor["_version"]) logger.debug( "Setting algorithm to streaming from sensor " + sensor["_id"]) r = self.db_alg.update(self.alg_id, { "source": sensor["_id"], "status": "processing" }) # Attempt to POST to VA service jsonData = { "source": { "uri": sensor["_source"]["url"], "type": "uri" }, "tags": { "algorithm": self.alg_id, "sensor": sensor["_id"], "office": { "lat": self.office[0], "lon": self.office[1], }, }, "parameters": { "every-nth-frame": self.every_nth_frame, "recording_prefix": "recordings/" + sensor["_id"], "method": "mqtt", "address": self.mqtthost, "clientid": self.alg_id, "topic": self.mqtttopic }, } folderpath = os.path.join( os.path.realpath(self.recording_volume), sensor["_id"]) if not os.path.exists(folderpath): os.makedirs(folderpath) logger.debug("Adding folder watch for " + folderpath) filehandler = FSHandler( sensor=sensor["_id"], office=self.office, dbhost=self.dbhost, rec_volume=self.recording_volume) fswatch = self.observer.schedule(filehandler, folderpath, recursive=True) try: logger.info("Posting Request to VA Service") r = requests.post(self.vahost + "/object_detection/2", json=jsonData, timeout=10) r.raise_for_status() pipeline_id = None if r.status_code == 200: logger.debug("Started pipeline " + r.text) pipeline_id = int(r.text) while r.status_code == 200: logger.debug("Querying status of pipeline") r = requests.get(self.vahost + "/object_detection/2/" + str(pipeline_id) + "/status", timeout=10) r.raise_for_status() jsonValue = r.json() if "avg_pipeline_latency" not in jsonValue: jsonValue["avg_pipeline_latency"] = 0 state = jsonValue["state"] try: logger.debug("fps: ") logger.debug(str(jsonValue)) except: logger.debug("error") logger.debug("Pipeline state is " + str(state)) if state == "COMPLETED" or state == "ABORTED" or state == "ERROR": logger.debug("Pipeline ended") break self.db_alg.update( self.alg_id, { "performance": jsonValue["avg_fps"], "latency": jsonValue["avg_pipeline_latency"] * 1000 }) time.sleep(10) logger.debug("Setting sensor " + sensor["_id"] + " to disconnected") r = self.db_sensors.update( sensor["_id"], {"status": "disconnected"}) except requests.exceptions.RequestException as e: logger.error( "Feeder: Request to VA Service Failed: " + str(e)) logger.debug("Setting sensor " + sensor["_id"] + " to idle") r = self.db_sensors.update(sensor["_id"], {"status": "idle"}) except Exception as e: logger.error("Feeder Exception: " + str(e)) if fswatch: self.observer.unschedule(fswatch) del (filehandler) logger.debug("Setting algorithm to idle") r = self.db_alg.update(self.alg_id, {"status": "idle"}) break except Exception as e: print(e, flush=True) time.sleep(5) logger.debug("Sensor monitor thread done")
try: # compete (with other va instances) for a sensor r = dbs.update(sensor["_id"], {"status": "streaming"}, seq_no=sensor["_seq_no"], primary_term=sensor["_primary_term"]) # stream from the sensor print("Connected to " + sensor["_id"] + "...", flush=True) connect(sensor["_id"], sensor["_source"]["location"], sensor["_source"]["url"], algorithm, sensor["_source"]["algorithm"], sensor["_source"]["resolution"], sensor["_source"]["zonemap"]) # if exit, there is somehting wrong r = dbs.update(sensor["_id"], {"status": "disconnected"}) if stop: break except Exception as e: print("Exception in count-crowd search sensor: " + str(e), flush=True) except Exception as e: print("Exception in count-crowd sensor connection: " + str(e), flush=True) time.sleep(10) # delete the algorithm instance dba.delete(algorithm)
while not stop.is_set(): print("Searching...", flush=True) for index in indexes: if stop.is_set(): break db = DBQuery(index=index, office=office, host=dbhost) try: for r in db.search("time<now-" + str(retention_time * 1000), size=500): if stop.is_set(): break # delete the record db.delete(r["_id"]) # delete the path file if "path" in r["_source"]: try: os.remove(storage + "/" + r["_source"]["path"]) os.remove(storage + "/" + r["_source"]["path"] + ".png") except Exception as e: pass except Exception as e: print("Exception: " + str(e), flush=True) stop.wait(service_interval) if rs: dbs.delete(rs["_id"])