Beispiel #1
0
class MQTT2DB(object):
    def __init__(self, algorithm):
        super(MQTT2DB, self).__init__()
        self._mqtt = mqtt.Client("feeder_" + algorithm)

        while True:
            try:
                self._mqtt.connect(mqtthost)
                break
            except Exception as e:
                print("Exception: " + str(e), flush=True)
                time.sleep(10)

        self._db = DBIngest(host=dbhost, index="analytics", office=office)
        self._cache = []
        self._lock = Lock()
        self._timer = IntervalTimer(2.0, self.on_timer)

    def loop(self, topic):
        self._mqtt.on_message = self.on_message
        self._mqtt.subscribe(topic)
        self._timer.start()
        self._mqtt.loop_forever()

    def stop(self):
        self._timer.cancel()
        self._mqtt.disconnect()

    def on_message(self, client, userdata, message):
        try:
            r = json.loads(str(message.payload.decode("utf-8", "ignore")))
            r.update(r["tags"])
            del r["tags"]
            if "real_base" not in r: r["real_base"] = 0
            r["time"] = int((r["real_base"] + r["timestamp"]) / 1000000)

            if "objects" in r and scenario == "traffic":
                r["nobjects"] = int(len(r["objects"]))
            if "objects" in r and scenario == "stadium":
                r["count"] = {"people": len(r["objects"])}
        except Exception as e:
            print("Exception: " + str(e), flush=True)
        self._lock.acquire()
        self._cache.append(r)
        self._lock.release()

    def on_timer(self):
        self._lock.acquire()
        bulk = self._cache
        self._cache = []
        self._lock.release()

        bulk_size = 500
        while len(bulk):
            try:
                self._db.ingest_bulk(bulk[:bulk_size])
                bulk = bulk[bulk_size:]
            except Exception as e:
                print("Exception: " + str(e), flush=True)
            time.sleep(0.25)
Beispiel #2
0
    def __init__(self):
        logger.debug("Initializing Feeder")

        self.office = list(map(float, os.environ["OFFICE"].split(",")))
        self.alg_id = None
        self.recording_volume = os.environ["STORAGE_VOLUME"]
        self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"])

        #Hosts
        self.dbhost = os.environ["DBHOST"]
        self.vahost = "http://localhost:8080/pipelines"
        self.mqtthost = os.environ["MQTTHOST"]

        #Clients
        self.db_alg = DBIngest(host=self.dbhost,
                               index="algorithms",
                               office=self.office)
        self.db_inf = DBIngest(host=self.dbhost,
                               index="analytics",
                               office=self.office)
        self.db_sensors = DBQuery(host=self.dbhost,
                                  index="sensors",
                                  office=self.office)
        self.mqttclient = None
        self.mqtttopic = None
        self.observer = Observer()

        self.batchsize = 300
        self.inference_cache = []

        self._threadflag = False
def Provision(officestr):
    print("Provisioning...", flush=True)

    # populate db with simulated offices and provisionings
    with open("/run/secrets/sensor-info.json", encoding='utf-8') as fd:
        data = json.load(fd)
        dbp = DBIngest(index="provisions", office=office, host=dbhost)
        for office1 in data:
            if scenario != office1["scenario"]: continue
            location1 = office1["location"]
            if location1["lat"] != office[0] or location1["lon"] != office[1]:
                continue

            sensors = office1.pop("sensors")
            for s in sensors:
                s["office"] = location1
                if "ip" in s:  # convert IP to CIDR
                    if s["ip"].find("/") < 0:
                        s["ip"] = s["ip"] + "/32"
                    s["ip_text"] = s["ip"]  # dup for terms aggs
            dbp.ingest_bulk(sensors, refresh="wait_for")

            office1.pop("scenario")
            office1["uri"] = gwhost
            return office1

    raise Exception("Should not be here.")
class Trigger(object):
    def __init__(self):
        super(Trigger, self).__init__()
        self.__db = DBIngest(index="alerts", office=office, host=dbhost)

    def trigger(self):
        return None

    def loop(self, stop):
        while not stop.is_set():
            info = self.trigger(stop)
            if not info: continue

            for v in info:
                v.update({
                    "time": int(time.time() * 1000),
                    "office": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                })
                if "location" not in v:
                    v["location"] = v["office"]

            self.__db.ingest_bulk(info)
    def __init__(self, pipeline, version="2"):
        super(RunVA, self).__init__()
        self._test_mqtt_connection()

        self._pipeline = pipeline
        self._version = version
        self._db = DBIngest(host=dbhost, index="algorithms", office=office)
        self._stop = None
Beispiel #6
0
 def __init__(self, pipeline, version="2"):
     super(RunVA, self).__init__()
     self._pipeline = pipeline
     self._version = version
     self._db = DBIngest(host=dbhost, index="algorithms", office=office)
     self._maincontext = GLib.MainLoop().get_context()
     self._stop = None
     ModelManager.load_config("/home/models", {})
     PipelineManager.load_config("/home/pipelines", 1)
Beispiel #7
0
    def __init__(self):
        super(MQTT2DB, self).__init__()

        self._db = DBIngest(host=dbhost, index="analytics", office=office)
        self._cache = []
        self._cond = Condition()

        self._mqtt = mqtt.Client()
        self._mqtt.on_message = self.on_message
        self._mqtt.on_disconnect = self.on_disconnect
Beispiel #8
0
 def __init__(self):
     super(RunVA,self).__init__()
     # remove HTTP_PROXY
     print("__init__")
     env=os.environ.copy()
     env.pop("http_proxy",None)
     env.pop("HTTP_PROXY",None)
     self._va=Popen(["/usr/bin/python3","-m","openapi_server"],cwd="/home/video-analytics/app/server",env=env)
     self._db=DBIngest(host=dbhost, index="algorithms", office=office)
     self._stop=None
Beispiel #9
0
    def __init__(self, sensor, office, dbhost, rec_volume):
        self.sensor = sensor
        self.office = office
        self.db_rec = DBIngest(host=dbhost, index="recordings", office=office)
        self.db_sensors = DBQuery(host=dbhost, index="sensors", office=office)
        self.recording_volume = rec_volume

        self.last_file = None
        self.finalize_timer = None
        self.record_cache = []
        self.timeout = 80  #set to 20 seconds... this should change according to recording chunk length
Beispiel #10
0
def connect(sensor, algorithm, uri):
    db=DBIngest(host=dbhost, index="algorithms",office=office)
    db.update(algorithm["_id"], {
        "sensor": sensor["_id"],
    })
    db=DBIngest(host=dbhost, index="analytics", office=office)
    while True:
        counts=[]
        for i in range(100):
            zonecount={}
            for zonemap in sensor["_source"]["zonemap"]:
                zonecount["zone"+str(zonemap["zone"])]=int(random.random()*1000)

            counts.append({
                "time": int(time.mktime(datetime.datetime.now().timetuple())*1000),
                "office": {
                    "lat": office[0],
                    "lon": office[1],
                },
                "sensor": sensor["_id"],
                "algorithm": algorithm["_id"],
                "count": zonecount,
            })

        db.ingest_bulk(counts)
        time.sleep(1000)
Beispiel #11
0
def register_office():
    global db,r
    db=DBIngest(index="offices",office="",host=dbhost)
    while True: 
        try:
            r=db.ingest({
                "office": { 
                "lat": office[0],
                "lon": office[1],
                },
                "uri": host,
            },"$".join(map(str,office)))
            return
        except Exception as e:
            print("Exception: "+str(e), flush=True)
            time.sleep(10)
Beispiel #12
0
class SensorsDBHandler(web.RequestHandler):
    def __init__(self, app, request, **kwargs):
        super(SensorsDBHandler, self).__init__(app, request, **kwargs)
        self.executor = ThreadPoolExecutor(4)
        self._dbi = DBIngest(index="sensors", office=office, host=dbhost)

    def check_origin(self, origin):
        return True

    @run_on_executor
    def _update(self, sensor, source):
        try:
            print("Ingesting", sensor, flush=True)
            r = self._dbi.ingest(source, refresh="wait_for")
            return r
        except Exception as e:
            print(str(e), flush=True)
            return str(e)

    @gen.coroutine
    def put(self):
        options = json.loads(self.request.body.decode('utf-8'))
        r = yield self._update(sensor=options["sensor"],
                               source=options["source"])
        if isinstance(r, str):
            self.set_status(400, encode(r))
            return

        self.write(r)
        self.set_status(200, 'OK')
        self.finish()
Beispiel #13
0
    def __init__(self, algorithm):
        super(MQTT2DB, self).__init__()
        self._mqtt = mqtt.Client("feeder_" + algorithm)

        while True:
            try:
                self._mqtt.connect(mqtthost)
                break
            except Exception as e:
                print("Exception: " + str(e), flush=True)
                time.sleep(10)

        self._db = DBIngest(host=dbhost, index="analytics", office=office)
        self._cache = []
        self._lock = Lock()
        self._timer = IntervalTimer(2.0, self.on_timer)
Beispiel #14
0
    def _rec2db(self, office, sensor, timestamp, path):
        dt = datetime.datetime.fromtimestamp(timestamp / 1000)
        officestr = (str(office[0]) + "c" + str(office[1])).replace(
            "-", "n").replace(".", "d")
        mp4path = self._storage + "/" + officestr + "/" + sensor + "/" + str(
            dt.year) + "/" + str(dt.month) + "/" + str(dt.day)
        os.makedirs(mp4path, exist_ok=True)
        mp4file = mp4path + "/" + str(timestamp) + ".mp4"

        list(
            run([
                "/usr/bin/ffmpeg", "-f", "mp4", "-i", path, "-c", "copy",
                mp4file
            ]))
        list(
            run([
                "/usr/bin/ffmpeg", "-i", mp4file, "-vf", "scale=640:360",
                "-frames:v", "1", mp4file + ".png"
            ]))
        sinfo = probe(mp4file)

        sinfo.update({
            "sensor": sensor,
            "office": {
                "lat": office[0],
                "lon": office[1],
            },
            "time": timestamp,
            "path": mp4file[len(self._storage) + 1:],
        })

        if local_office:
            # calculate total bandwidth
            bandwidth = 0
            for stream1 in sinfo["streams"]:
                if "bit_rate" in stream1:
                    bandwidth = bandwidth + stream1["bit_rate"]
            if bandwidth:
                db_cam = DBQuery(host=dbhost, index="sensors", office=office)
                db_cam.update(sensor, {"bandwidth": bandwidth})

            # check disk usage and send alert
            disk_usage = psutil.disk_usage(self._storage)[3]
            if disk_usage > 75 and sensor_index:
                level = "fatal" if disk_uage > 85 else "warning"
                db_alt = DBIngest(host=dbhost, index="alerts", office=office)
                db_alt.ingest({
                    "time":
                    int(
                        time.mktime(datetime.datetime.now().timetuple()) *
                        1000),
                    "office": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    "location": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    level: [{
                        "message": "Disk usage: " + str(disk_usage) + "%",
                        "args": {
                            "disk": disk_usage,
                        }
                    }]
                })

            # ingest recording local
            db_rec = DBIngest(host=dbhost, index="recordings", office=office)
            db_rec.ingest(sinfo)
        else:
            # ingest recording cloud
            db_rec = DBIngest(host=dbhost, index="recordings_c", office="")
            db_rec.ingest(sinfo)
Beispiel #15
0
                  env["OFFICE"].split(","))) if "OFFICE" in env else None
dbhost = env.get("DBHOST", None)
sim_cameras = {}


def quit_service(signum, sigframe):
    exit(143)


signal(SIGTERM, quit_service)

dbi = None
dbs = None
dbp = None
if dbhost and office:
    dbi = DBIngest(index="sensors", office=office, host=dbhost)
    dbs = DBQuery(index="sensors", office=office, host=dbhost)
    dbp = DBQuery(index="provisions", office=office, host=dbhost)
    dbp.wait()


def get_passcodes(ip, port):
    if office and dbhost:

        def _bucketize(query):
            r = dbp.bucketize(query, ["passcode"], size=1000)
            if "passcode" in r:
                return [k for k in r["passcode"] if r["passcode"][k]]
            return []

        try:
Beispiel #16
0
dbhost=os.environ["DBHOST"]

dbt=None
rt=None

def quit_service(signum, sigframe):
    try:
        if dbt and rt: dbt.delete(rt["_id"])
    except Exception as e:
        pass
    exit(143)

signal(SIGTERM, quit_service)

# register trigger
dbt=DBIngest(index="triggers",office=office,host=dbhost)
while True:
    try:
        rt=dbt.ingest({
            "name": "health_check",
            "status": "processing",
        })
        break
    except Exception as e:
        print("Exception: "+str(e), flush=True)
        time.sleep(10)

dbs=DBQuery(index="sensors",office=office,host=dbhost)
dba=DBQuery(index="algorithms",office=office,host=dbhost)
dbat=DBIngest(index="alerts",office=office,host=dbhost)
    
Beispiel #17
0
    def _rec2db(self, office, sensor, timestamp, path):
        disk_usage = psutil.disk_usage(self._storage)[3]
        if disk_usage < halt_rec_th:
            dt = datetime.datetime.fromtimestamp(timestamp / 1000)
            officestr = (str(office[0]) + "c" + str(office[1])).replace(
                "-", "n").replace(".", "d")
            mp4path = self._storage + "/" + officestr + "/" + sensor + "/" + str(
                dt.year) + "/" + str(dt.month) + "/" + str(dt.day)
            os.makedirs(mp4path, exist_ok=True)
            mp4file = mp4path + "/" + str(timestamp) + ".mp4"

            # perform a straight copy to fix negative timestamp for chrome
            list(
                run([
                    "/usr/local/bin/ffmpeg", "-f", "mp4", "-i", path, "-c",
                    "copy", mp4file
                ]))

            sinfo = probe(mp4file)
            sinfo.update({
                "sensor": sensor,
                "office": {
                    "lat": office[0],
                    "lon": office[1],
                },
                "time": timestamp,
                "path": mp4file[len(self._storage) + 1:],
            })
        else:
            print("Disk full: recording halted", flush=True)
            sinfo = None

        if local_office:
            if sinfo["bandwidth"]:
                db_cam = DBQuery(host=dbhost, index="sensors", office=office)
                db_cam.update(sensor, {"bandwidth": sinfo["bandwidth"]})

            # check disk usage and send alert
            disk_usage = psutil.disk_usage(self._storage).percent
            if disk_usage >= warn_disk_th:
                level = "fatal" if disk_usage >= fatal_disk_th else "warning"
                db_alt = DBIngest(host=dbhost, index="alerts", office=office)
                message = text["halt recording"].format(
                    disk_usage
                ) if disk_usage >= halt_rec_th else text["disk usage"].format(
                    disk_usage)
                db_alt.ingest({
                    "time":
                    int(time.time() * 1000),
                    "office": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    "location": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    level: [{
                        "message": message,
                        "args": {
                            "disk": disk_usage,
                        }
                    }]
                })

        # ingest recording local
        if sinfo:
            print("Ingest recording: {}".format(sinfo), flush=True)
            office1 = office if local_office else ""

            # denormalize sensor address to recordings
            dbs = DBQuery(host=dbhost, index="sensors", office=office1)
            r = list(dbs.search("_id='" + sinfo["sensor"] + "'", size=1))
            if r: sinfo["address"] = r[0]["_source"]["address"]

            db_rec = DBIngest(host=dbhost, index="recordings", office=office1)
            db_rec.ingest(sinfo)
Beispiel #18
0
import os
import json

dbhost = os.environ["DBHOST"]
office = list(map(float, os.environ["OFFICE"].split(",")))
officestr = '$'.join(map(str, office))
proxyhost = os.environ["PROXYHOST"]
scenario = os.environ["SCENARIO"]
zone = os.environ["ZONE"]

print("Provisioning...", flush=True)

# populate db with simulated offices and provisionings
with open("/run/secrets/sensor-info.json", encoding='utf-8') as fd:
    data = json.load(fd)
    dbo = DBIngest(index="offices", office="", host=dbhost)
    dbp = DBIngest(index="provisions", office=office, host=dbhost)
    for office1 in data:
        if scenario != office1["scenario"]: continue
        location1 = office1["location"]
        if location1["lat"] != office[0] or location1["lon"] != office[1]:
            continue

        sensors = office1.pop("sensors")
        for s in sensors:
            s["office"] = location1
            if "ip" in s:  # convert IP to CIDR
                if s["ip"].find("/") < 0:
                    s["ip"] = s["ip"] + "/32"
        dbp.ingest_bulk(sensors)
Beispiel #19
0
class FSHandler(FileSystemEventHandler):
    def __init__(self, sensor, office, dbhost, rec_volume):
        self.sensor = sensor
        self.office = office
        self.db_rec = DBIngest(host=dbhost, index="recordings", office=office)
        self.db_sensors = DBQuery(host=dbhost, index="sensors", office=office)
        self.recording_volume = rec_volume

        self.last_file = None
        self.finalize_timer = None
        self.record_cache = []
        self.timeout = 80  #set to 20 seconds... this should change according to recording chunk length

    def on_created(self, event):
        if event.is_directory: return
        if event.src_path.endswith(".png"): return
        if self.last_file and (self.last_file == event.src_path): return
        if self.finalize_timer: self.finalize_timer.cancel()
        if self.last_file:
            try:
                self.ingest()
            except Exception as error:
                logger.error("Failed to ingest: %s %s\n" %
                             (self.last_file, error))

        logger.debug("Started recording new file! " + event.src_path)
        self.last_file = event.src_path

        del (self.finalize_timer)
        self.finalize_timer = Timer(self.timeout, self.ingest)
        self.finalize_timer.start()
        logger.debug("Started file watch timer for " + str(self.timeout) +
                     " seconds")

    def ffmpeg_convert(self, filename):
        with tempfile.TemporaryDirectory() as tmpdirname:
            filename = os.path.abspath(filename)
            tmpfilename = os.path.abspath(
                os.path.join(tmpdirname, os.path.basename(filename)))
            output = ""
            try:
                list(
                    run([
                        "/usr/bin/ffmpeg", "-i", filename, "-c", "copy",
                        tmpfilename
                    ]))
                shutil.move(tmpfilename, filename)
                list(
                    run([
                        "/usr/bin/ffmpeg", "-i", filename, "-vf",
                        "thumbnail,scale=640:360", "-frames:v", "1",
                        filename + ".png"
                    ]))
                return filename, probe(filename)
            except Exception as error:
                logger.error("Error converting mp4 with ffmpeg: %s %s" %
                             (error, error.output))
                raise

    def get_timestamp(self, filename):
        parsed = os.path.basename(filename).split('_')
        return int(int(parsed[-2]) / 1000000)

    def ingest(self):
        logger.debug("Finished recording file " + self.last_file)
        converted_file, sinfo = self.ffmpeg_convert(self.last_file)
        sinfo.update({
            "sensor":
            self.sensor,
            "office": {
                "lat": self.office[0],
                "lon": self.office[1],
            },
            "time":
            self.get_timestamp(converted_file),
            "path":
            os.path.abspath(converted_file).split(
                os.path.abspath(self.recording_volume) + "/")[1],
        })

        # calculate total bandwidth
        bandwidth = 0
        for stream1 in sinfo["streams"]:
            if "bit_rate" in stream1:
                bandwidth = bandwidth + stream1["bit_rate"]
        self.db_sensors.update(self.sensor, {"bandwidth": bandwidth})
        self.db_rec.ingest(sinfo)
        self.record_cache.append(sinfo)
Beispiel #20
0
service_interval = float(os.environ["SERVICE_INTERVAL"])  # in seconds
indexes = os.environ["INDEXES"].split(",")
office = list(map(
    float, os.environ["OFFICE"].split(","))) if "OFFICE" in os.environ else "*"
dbhost = os.environ["DBHOST"]
storage = "/var/www/mp4"

stop = Event()


def quit_service(signum, sigframe):
    stop.set()


signal(SIGTERM, quit_service)
dbs = DBIngest(index="services", office=office, host=dbhost)
rs = None
if isinstance(office, list):
    dbs.wait(stop)
    rs = dbs.ingest({
        "name": text["cleanup"],
        "service": text["maintenance"],
        "status": "active",
    })

while not stop.is_set():
    print("Searching...", flush=True)
    for index in indexes:
        if stop.is_set(): break

        db = DBQuery(index=index, office=office, host=dbhost)
Beispiel #21
0
class Feeder():
    def __init__(self):
        logger.debug("Initializing Feeder")

        self.office = list(map(float, os.environ["OFFICE"].split(",")))
        self.alg_id = None
        self.recording_volume = os.environ["STORAGE_VOLUME"]
        self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"])

        #Hosts
        self.dbhost = os.environ["DBHOST"]
        self.vahost = "http://localhost:8080/pipelines"
        self.mqtthost = os.environ["MQTTHOST"]

        #Clients
        self.db_alg = DBIngest(host=self.dbhost,
                               index="algorithms",
                               office=self.office)
        self.db_inf = DBIngest(host=self.dbhost,
                               index="analytics",
                               office=self.office)
        self.db_sensors = DBQuery(host=self.dbhost,
                                  index="sensors",
                                  office=self.office)
        self.mqttclient = None
        self.mqtttopic = None
        self.observer = Observer()

        self.batchsize = 300
        self.inference_cache = []

        self._threadflag = False

    def start(self):

        logger.info(" ### Starting Feeder ### ")

        logger.debug("Waiting for VA startup")
        r = requests.Response()
        r.status_code = 400
        while r.status_code != 200 and r.status_code != 201:
            try:
                r = requests.get(self.vahost)
            except Exception as e:
                r = requests.Response()
                r.status_code = 400

            time.sleep(10)

        # Register Algorithm
        logger.debug("Registering as algorithm in the DB")

        while True:
            try:
                self.alg_id = self.db_alg.ingest({
                    "name": "object_detection",
                    "office": {
                        "lat": self.office[0],
                        "lon": self.office[1]
                    },
                    "status": "idle",
                    "skip": self.every_nth_frame,
                })["_id"]
                break
            except Exception as e:
                logger.debug("Register algo exception: " + str(e))
                time.sleep(10)

        self.mqtttopic = "smtc_va_inferences_" + self.alg_id

        camera_monitor_thread = Thread(target=self.monitor_cameras,
                                       daemon=True)

        logger.debug("Starting working threads")
        self._threadflag = True
        self.startmqtt()
        self.observer.start()
        camera_monitor_thread.start()

        logger.debug("Waiting for interrupt...")
        camera_monitor_thread.join()
        self.observer.join()

    def stop(self):
        logger.info(" ### Stopping Feeder ### ")

        self._threadflag = False

        logger.debug("Unregistering algorithm from DB")
        self.db_alg.delete(self.alg_id)

        self.mqttclient.loop_stop()
        self.observer.stop()

    def startmqtt(self):
        self.mqttclient = mqtt.Client("feeder_" + self.alg_id)
        self.mqttclient.connect(self.mqtthost)
        self.mqttclient.on_message = self.mqtt_handler
        self.mqttclient.loop_start()
        self.mqttclient.subscribe(self.mqtttopic)

    def mqtt_handler(self, client, userdata, message):
        m_in = json.loads(str(message.payload.decode("utf-8", "ignore")))

        for tag in m_in["tags"]:
            m_in[tag] = m_in["tags"][tag]

        del m_in["tags"]

        m_in["time"] = m_in["real_base"] + m_in["timestamp"]
        # convert to milliseconds
        m_in["time"] = int(m_in["time"] / 1000000)
        self.inference_cache.append(m_in)
        if len(self.inference_cache) >= self.batchsize:
            try:
                self.db_inf.ingest_bulk(self.inference_cache[:self.batchsize])
                self.inference_cache = self.inference_cache[self.batchsize:]
            except Exception as e:
                logger.debug("Ingest Error: " + str(e))

    def monitor_cameras(self):
        logger.debug("Starting Sensor Monitor Thread")
        while self._threadflag:
            logger.debug("Searching for sensors...")

            try:
                for sensor in self.db_sensors.search(
                        "sensor:'camera' and status:'idle' and office:[" +
                        str(self.office[0]) + "," + str(self.office[1]) + "]"):
                    logger.debug(sensor)
                    try:
                        fswatch = None
                        logger.debug("Sensor found! " + sensor["_id"])
                        logger.debug("Setting sensor " + sensor["_id"] +
                                     " to streaming")
                        r = self.db_sensors.update(sensor["_id"],
                                                   {"status": "streaming"},
                                                   version=sensor["_version"])

                        logger.debug(
                            "Setting algorithm to streaming from sensor " +
                            sensor["_id"])
                        r = self.db_alg.update(self.alg_id, {
                            "source": sensor["_id"],
                            "status": "processing"
                        })

                        # Attempt to POST to VA service
                        jsonData = {
                            "source": {
                                "uri": sensor["_source"]["url"],
                                "type": "uri"
                            },
                            "tags": {
                                "algorithm": self.alg_id,
                                "sensor": sensor["_id"],
                                "office": {
                                    "lat": self.office[0],
                                    "lon": self.office[1],
                                },
                            },
                            "parameters": {
                                "every-nth-frame": self.every_nth_frame,
                                "recording_prefix":
                                "recordings/" + sensor["_id"],
                                "method": "mqtt",
                                "address": self.mqtthost,
                                "clientid": self.alg_id,
                                "topic": self.mqtttopic
                            },
                        }

                        folderpath = os.path.join(
                            os.path.realpath(self.recording_volume),
                            sensor["_id"])
                        if not os.path.exists(folderpath):
                            os.makedirs(folderpath)

                        logger.debug("Adding folder watch for " + folderpath)
                        filehandler = FSHandler(
                            sensor=sensor["_id"],
                            office=self.office,
                            dbhost=self.dbhost,
                            rec_volume=self.recording_volume)
                        fswatch = self.observer.schedule(filehandler,
                                                         folderpath,
                                                         recursive=True)

                        try:
                            logger.info("Posting Request to VA Service")
                            r = requests.post(self.vahost +
                                              "/object_detection/2",
                                              json=jsonData,
                                              timeout=10)
                            r.raise_for_status()
                            pipeline_id = None

                            if r.status_code == 200:
                                logger.debug("Started pipeline " + r.text)
                                pipeline_id = int(r.text)

                            while r.status_code == 200:
                                logger.debug("Querying status of pipeline")
                                r = requests.get(self.vahost +
                                                 "/object_detection/2/" +
                                                 str(pipeline_id) + "/status",
                                                 timeout=10)
                                r.raise_for_status()
                                jsonValue = r.json()
                                if "avg_pipeline_latency" not in jsonValue:
                                    jsonValue["avg_pipeline_latency"] = 0
                                state = jsonValue["state"]
                                try:
                                    logger.debug("fps: ")
                                    logger.debug(str(jsonValue))
                                except:
                                    logger.debug("error")
                                logger.debug("Pipeline state is " + str(state))
                                if state == "COMPLETED" or state == "ABORTED" or state == "ERROR":
                                    logger.debug("Pipeline ended")
                                    break

                                self.db_alg.update(
                                    self.alg_id, {
                                        "performance":
                                        jsonValue["avg_fps"],
                                        "latency":
                                        jsonValue["avg_pipeline_latency"] *
                                        1000
                                    })

                                time.sleep(10)

                            logger.debug("Setting sensor " + sensor["_id"] +
                                         " to disconnected")
                            r = self.db_sensors.update(
                                sensor["_id"], {"status": "disconnected"})

                        except requests.exceptions.RequestException as e:
                            logger.error(
                                "Feeder: Request to VA Service Failed: " +
                                str(e))
                            logger.debug("Setting sensor " + sensor["_id"] +
                                         " to idle")
                            r = self.db_sensors.update(sensor["_id"],
                                                       {"status": "idle"})

                    except Exception as e:
                        logger.error("Feeder Exception: " + str(e))

                    if fswatch:
                        self.observer.unschedule(fswatch)
                        del (filehandler)

                    logger.debug("Setting algorithm to idle")
                    r = self.db_alg.update(self.alg_id, {"status": "idle"})
                    break
            except Exception as e:
                print(e, flush=True)

            time.sleep(5)

        logger.debug("Sensor monitor thread done")
Beispiel #22
0
from configuration import env

office = list(map(float, env["OFFICE"].split(",")))
dbhost = env["DBHOST"]

stop = Event()


def quit_service(signum, sigframe):
    stop.set()


signal(SIGTERM, quit_service)

# register trigger
dbt = DBIngest(index="services", office=office, host=dbhost)
dbt.wait(stop)
rt = dbt.ingest({
    "name": text["alert trigger"],
    "service": text["triggers"],
    "status": "active",
})

imbalance = ImbalanceTrigger()
occupency = OccupencyTrigger()
cpu = CPUTrigger()
with ThreadPoolExecutor(3) as e:
    e.submit(imbalance.loop, stop)
    e.submit(occupency.loop, stop)
    e.submit(cpu.loop, stop)
import re

dbhost = env["DBHOST"]
dbchost = env.get("DBCHOST", None)
office = list(map(float, env["OFFICE"].split(",")))
replicas = list(map(int, env["REPLICAS"].split(",")))


def quit_service():
    exit(143)


signal(SIGTERM, quit_service)

# wait until DB is ready
dbs = DBIngest(index="sensors", office=office, host=dbhost)
while True:
    try:
        if dbs.health(): break
    except:
        print("Waiting for DB...", flush=True)
    time.sleep(1)

officestr = dbs.office()
settings = {
    "offices": {
        "settings": {
            "index": {
                "number_of_shards": 1,
                "number_of_replicas": replicas[0],
            },
Beispiel #24
0
        rec2db.start()

        runva=RunVA("entrance_counting", stop=stop)
        runva.loop(sensor, location, uri, algorithm, algorithmName)

        rec2db.stop()
        raise Exception("VA exited. This should not happen.")

    except:
        print(traceback.format_exc(), flush=True)

def quit_service(signum, sigframe):
    stop.set()

signal(SIGTERM, quit_service)
dba=DBIngest(host=dbhost, index="algorithms", office=office)
dbs=DBQuery(host=dbhost, index="sensors", office=office)

# register algorithm (while waiting for db to startup)
dba.wait(stop)
algorithm=dba.ingest({
    "name": text["entrance-counting"],
    "office": {
        "lat": office[0],
        "lon": office[1],
    },
    "status": "processing",
    "skip": every_nth_frame,
})["_id"]

# compete for a sensor connection
Beispiel #25
0
                    "lon": office[1],
                },
                "sensor": sensor["_id"],
                "algorithm": algorithm["_id"],
                "count": zonecount,
            })

        db.ingest_bulk(counts)
        time.sleep(1000)

def quit_service(signum, sigframe):
    global stop
    stop=True

signal(SIGTERM, quit_service)
dba=DBIngest(host=dbhost, index="algorithms", office=office)
dbs=DBQuery(host=dbhost, index="sensors", office=office)

# register algorithm (while waiting for db to startup)
while True:
    try:
        algorithm=dba.ingest({
            "name": "crowd-counting",
            "office": {
                "lat": office[0],
                "lon": office[1],
            },
            "status": "processing",
            "skip": every_nth_frame,
        })
        break
Beispiel #26
0
 def __init__(self):
     super(RunVA, self).__init__()
     self._va = Popen(["/usr/bin/python3", "-m", "openapi_server"],
                      cwd="/home/video-analytics/app/server")
     self._db = DBIngest(host=dbhost, index="algorithms", office=office)
     self._stop = None
 def __init__(self):
     super(Trigger, self).__init__()
     self.__db = DBIngest(index="alerts", office=office, host=dbhost)
Beispiel #28
0
dbt = None
rt = None


def quit_service(signum, sigframe):
    try:
        if dbt and rt: dbt.delete(rt["_id"])
    except Exception as e:
        pass
    exit(143)


signal(SIGTERM, quit_service)

# register trigger
dbt = DBIngest(index="services", office=office, host=dbhost)
while True:
    try:
        rt = dbt.ingest({
            "name": "triggers",
            "service": "alert trigger",
            "status": "active",
        })
        break
    except Exception as e:
        print("Exception: " + str(e), flush=True)
        time.sleep(10)

imbalance = ImbalanceTrigger()
occupency = OccupencyTrigger()
cpu = CPUTrigger()
Beispiel #29
0
class RunVA(object):
    def __init__(self):
        super(RunVA, self).__init__()
        # remove HTTP_PROXY
        env = os.environ.copy()
        env.pop("http_proxy", None)
        env.pop("HTTP_PROXY", None)
        self._va = Popen(["/usr/bin/python3", "-m", "openapi_server"],
                         cwd="/home/video-analytics/app/server",
                         env=env)
        self._db = DBIngest(host=dbhost, index="algorithms", office=office)
        self._stop = None

    def stop(self):
        self._stop = True

    def loop(self, sensor, location, uri, algorithm, topic):
        req = {
            "source": {
                "uri": uri,
                "type": "uri"
            },
            "destination": {
                "type": "mqtt",
                "host": mqtthost,
                "clientid": algorithm,
                "topic": topic
            },
            "tags": {
                "sensor": sensor,
                "location": location,
                "algorithm": algorithm,
                "office": {
                    "lat": office[0],
                    "lon": office[1],
                },
            },
            "parameters": {
                "every-nth-frame": every_nth_frame,
                "recording_prefix": "recordings/" + sensor
            },
        }

        while True:
            try:
                r = requests.post(vahost + "/object_detection/2",
                                  json=req,
                                  timeout=10)
                if r.status_code == 200:
                    pid = int(r.text)
                    break
            except Exception as e:
                print("Exception: " + str(e), flush=True)
            time.sleep(10)

        while not self._stop:
            r = requests.get(vahost + "/object_detection/2/" + str(pid) +
                             "/status",
                             timeout=10)
            if r.status_code != 200:
                print("pipeline status: " + str(r.status_code), flush=True)
                print(r.text, flush=True)
                break

            pinfo = r.json()
            print(pinfo, flush=True)

            state = pinfo["state"]
            if state == "COMPLETED" or state == "ABORTED" or state == "ERROR":
                print("pineline ended with " + str(state), flush=True)
                break

            if state == "RUNNING":
                if "avg_pipeline_latency" not in pinfo:
                    pinfo["avg_pipeline_latency"] = 0
                self._db.update(
                    algorithm, {
                        "sensor": sensor,
                        "performance": pinfo["avg_fps"],
                        "latency": pinfo["avg_pipeline_latency"] * 1000,
                    })
            time.sleep(10)

        print("exiting va pipeline", flush=True)
        self._va.terminate()
                raise Exception("VA exited. This should not happen.")

    except Exception as e:
        print("Exception: " + str(e), flush=True)


def quit_service(signum, sigframe):
    global stop
    stop = True
    if mqtt2db: mqtt2db.stop()
    if rec2db: rec2db.stop()
    if runva: runva.stop()


signal(SIGTERM, quit_service)
dba = DBIngest(host=dbhost, index="algorithms", office=office)
dbs = DBQuery(host=dbhost, index="sensors", office=office)

# register algorithm (while waiting for db to startup)
while True:
    try:
        algorithm = dba.ingest({
            "name": "object_detection",
            "office": {
                "lat": office[0],
                "lon": office[1],
            },
            "status": "processing",
            "skip": every_nth_frame,
        })["_id"]
        break