Beispiel #1
0
    def __init__(self):
        logger.debug("Initializing Feeder")

        self.office = list(map(float, os.environ["OFFICE"].split(",")))
        self.alg_id = None
        self.recording_volume = os.environ["STORAGE_VOLUME"]
        self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"])

        #Hosts
        self.dbhost = os.environ["DBHOST"]
        self.vahost = "http://localhost:8080/pipelines"
        self.mqtthost = os.environ["MQTTHOST"]

        #Clients
        self.db_alg = DBIngest(host=self.dbhost,
                               index="algorithms",
                               office=self.office)
        self.db_inf = DBIngest(host=self.dbhost,
                               index="analytics",
                               office=self.office)
        self.db_sensors = DBQuery(host=self.dbhost,
                                  index="sensors",
                                  office=self.office)
        self.mqttclient = None
        self.mqtttopic = None
        self.observer = Observer()

        self.batchsize = 300
        self.inference_cache = []

        self._threadflag = False
Beispiel #2
0
    def __init__(self, database, constants):
        """
        The constructor of StateTracker.

        The constructor of StateTracker which creates a DB query object, creates necessary state rep. dicts, etc. and
        calls reset.

        Parameters:
            database (dict): The database with format dict(long: dict)
            constants (dict): Loaded constants in dict

        """
        # db查找工具
        self.db_helper = DBQuery(database)
        # 整个对话的目标key,默认为'ticket'
        self.match_key = usersim_default_key
        # intents的dict,key为intent,value为序号
        self.intents_dict = convert_list_to_dict(all_intents)
        # intents个数
        self.num_intents = len(all_intents)
        # slots的dict,key为slot,value为序号
        self.slots_dict = convert_list_to_dict(all_slots)
        # slots个数
        self.num_slots = len(all_slots)
        # 所允许的最长对话回合数,超过此回合则对话失败
        self.max_round_num = constants['run']['max_round_num']
        # 对话状态中的零状态,即什么信息也没有
        self.none_state = np.zeros(self.get_state_size())
        # 初始化StateTracker
        self.reset()
Beispiel #3
0
 def _hint(self, indexes,office):
     try:
         hints={}
         for index in indexes:
             db=DBQuery(index=index,office=office,host=dbhost)
             hints[index]=db.hints(size=100)
         return hints
     except Exception as e:
         return str(e)
Beispiel #4
0
    def __init__(self, database, max_round_num,all_slots):

        self.db_helper = DBQuery(database)
        self.intents_dict = {i:intent for i,intent in enumerate(all_intents)}
        self.num_intents = len(all_intents)
        self.slots_dict = {i:x for i,x in enumerate(all_slots)}
        self.num_slots = len(all_slots)
        self.max_round_num = max_round_num
        self.none_state = np.zeros(self.get_state_size())
        self.reset()
Beispiel #5
0
    def __init__(self, sensor, office, dbhost, rec_volume):
        self.sensor = sensor
        self.office = office
        self.db_rec = DBIngest(host=dbhost, index="recordings", office=office)
        self.db_sensors = DBQuery(host=dbhost, index="sensors", office=office)
        self.recording_volume = rec_volume

        self.last_file = None
        self.finalize_timer = None
        self.record_cache = []
        self.timeout = 80  #set to 20 seconds... this should change according to recording chunk length
Beispiel #6
0
class Streamer(object):
    def __init__(self):
        super(Streamer, self).__init__()
        self._sensors = {}
        Thread(target=self._watcher_thread).start()
        self._dbs = DBQuery(index="sensors", office=office, host=dbhost)

    def get(self, sensor):
        if sensor not in self._sensors: return (None, None)
        return (self._sensors[sensor]["rtspuri"],
                self._sensors[sensor]["rtmpuri"])

    def set(self, sensor, rtspuri, rtmpuri, simulation):
        if sensor in self._sensors and self._sensors[sensor][
                "status"] == "streaming":
            return self._sensors[sensor]["status"]
        p = self._spawn(rtspuri, rtmpuri, simulation)
        if p.poll() == None:
            self._sensors[sensor] = {
                "rtspuri": rtspuri,
                "rtmpuri": rtmpuri,
                "status": "streaming",
                "process": p,
            }
            return self._sensors[sensor]["status"]
        return "disconnected"

    def _update(self, sensor, status="streaming"):
        sinfo = {"status": status}
        self._dbs.update(sensor, sinfo)

    def _spawn(self, rtspuri, rtmpuri, simulation=False):
        cmd = [
            "ffmpeg", "-i", rtspuri, "-vcodec", "copy", "-an", "-f", "flv",
            rtmpuri
        ]
        if simulation == True:
            cmd = [
                "ffmpeg", "-i", rtspuri, "-vcodec", "libx264", "-preset:v",
                "ultrafast", "-tune:v", "zerolatency", "-an", "-f", "flv",
                rtmpuri
            ]
        print(cmd, flush=True)
        p = subprocess.Popen(cmd)
        return p

    def _watcher_thread(self):
        while True:
            for sensor1 in self._sensors:
                if self._sensors[sensor1]["process"].poll() != None:
                    self._sensors[sensor1]["status"] = "disconnected"
                    self._update(sensor1, status="disconnected")

            time.sleep(30)
Beispiel #7
0
    def __init__(self, database, max_round_num):

        self.db_helper = DBQuery(database)
        self.match_key = usersim_default_key
        self.intents_dict = convert_list_to_dict(all_intents)
        self.num_intents = len(all_intents)
        self.slots_dict = convert_list_to_dict(all_slots)
        self.num_slots = len(all_slots)
        self.max_round_num = max_round_num
        self.none_state = np.zeros(self.get_state_size())
        self.reset()
class ImbalanceTrigger(Trigger):
    def __init__(self):
        super(ImbalanceTrigger, self).__init__()
        self._dbs = DBQuery(index="sensors", office=office, host=dbhost)
        self._dba = DBQuery(index="algorithms", office=office, host=dbhost)

    def trigger(self, stop):
        stop.wait(service_interval[2])
        info = []

        try:
            nsensors = {
                "total":
                self._dbs.count("type='camera'"),
                "streaming":
                self._dbs.count("type='camera' and status='streaming'"),
                "idle":
                self._dbs.count("type='camera' and status='idle'"),
            }
            nalgorithms = {
                "total": self._dba.count("name:*"),
            }
        except Exception as e:
            print("Exception: " + str(e), flush=True)
            return info

        if nsensors["total"] > nsensors["streaming"] + nsensors["idle"]:
            info.append({
                "fatal": [{
                    "message":
                    text["check sensor"].format(nsensors["total"] -
                                                nsensors["streaming"] -
                                                nsensors["idle"]),
                    "args":
                    nsensors,
                }]
            })

        if nalgorithms["total"] > nsensors["streaming"] + nsensors["idle"]:
            info.append({
                "warning": [{
                    "message":
                    text("imbalance").format(
                        nalgorithms["total"],
                        nsensors["streaming"] + nsensors["idle"]),
                    "args": {
                        "nalgorithms": nalgorithms["total"],
                        "nsensors": nsensors["streaming"] + nsensors["idle"],
                    },
                }],
            })

        return info
Beispiel #9
0
    def _bucketize(self, index, queries, field, size, office):
        db = DBQuery(index=index, office=office, host=dbhost)
        try:
            buckets = db.bucketize(queries, [field], size)
        except Exception as e:
            return str(e)

        # reformat buckets to have str keys
        buckets1 = {}
        if field in buckets:
            for k in buckets[field]:
                buckets1[str(k)] = buckets[field][k]
        return buckets1
Beispiel #10
0
class SensorsHandler(web.RequestHandler):
    def __init__(self, app, request, **kwargs):
        super(SensorsHandler, self).__init__(app, request, **kwargs)
        self.executor = ThreadPoolExecutor(4)
        self._db = DBQuery(index="sensors", office=office, host=dbhost)
        self._owt = OWTAPI()

    def check_origin(self, origin):
        return True

    def _room_details(self, sensor, name, room, stream):
        details = {
            "sensor": sensor,
            "name": name,
            "room": room,
            "stream": stream,
        }
        if webrtchost:
            details["url"] = "{}?roomid={}&streamid={}&office={}".format(
                webrtchost, room, stream,
                quote(",".join(list(map(str, office)))))
        return details

    @run_on_executor
    def _create_room(self, sensor):
        r = list(
            self._db.search("_id='{}' and status='streaming'".format(sensor),
                            size=1))
        if not r: return (404, "Sensor Not Found")

        location = r[0]["_source"]["location"]
        protocol = "udp" if 'simsn' in r[0]['_source'].keys() else "tcp"
        name = "{},{}:{}:{}:{}".format(location["lat"], location["lon"],
                                       r[0]["_source"]["type"],
                                       r[0]["_source"]["subtype"], r[0]["_id"])
        room, stream = watcher.get(name)
        if room and stream:
            return self._room_details(sensor, name, room, stream)

        room = self._owt.create_room(name=name, p_limit=streaming_limit)
        rtsp_url = r[0]["_source"]["url"]
        stream = self._owt.start_streaming_ins(
            room=room, rtsp_url=rtsp_url, protocol=protocol) if room else None
        if not stream: return (503, "Exception when post")

        watcher.set(name, room, stream)
        return self._room_details(sensor, name, room, stream)

    @gen.coroutine
    def post(self):
        sensor = unquote(self.get_argument("sensor"))

        r = yield self._create_room(sensor)
        if isinstance(r, dict):
            self.write(r)
        else:
            self.set_status(r[0], r[1])
    def __init__(self, database, constants):
        """
        The constructor of StateTracker.
        The constructor of StateTracker which creates a DB query object, creates necessary state rep. dicts, etc. and
        calls reset.
        Parameters:
            database (dict): The database with format dict(long: dict)
            constants (dict): Loaded constants in dict
        """

        self.db_helper = DBQuery(database)
        self.match_key = usersim_default_key
        self.intents_dict = convert_list_to_dict(all_intents)
        self.num_intents = len(all_intents)
        self.slots_dict = convert_list_to_dict(all_slots)
        self.num_slots = len(all_slots)
        self.max_round_num = constants['run']['max_round_num']
        self.none_state = np.zeros(self.get_state_size())
        self.reset()
Beispiel #12
0
class ImbalanceTrigger(Trigger):
    def __init__(self):
        super(ImbalanceTrigger,self).__init__()
        self._dbs=DBQuery(index="sensors",office=office,host=dbhost)
        self._dba=DBQuery(index="algorithms",office=office,host=dbhost)

    def trigger(self):
        time.sleep(service_interval[2])
        info=[]

        try:
            nsensors={
                "total": self._dbs.count("sensor:*"),
                "streaming": self._dbs.count("status:'streaming'"),
                "idle": self._dbs.count("status:'idle'"),
            }
            nalgorithms={
                "total": self._dba.count("name:*"),
            }
        except Exception as e:
            print("Exception: "+str(e), flush=True)
            return info
   
        if nsensors["total"]>nsensors["streaming"]+nsensors["idle"]:
            info.append({
                "fatal": [{ 
                    "message": "Check sensor: #disconnected="+str(nsensors["total"]-nsensors["streaming"]-nsensors["idle"]),
                    "args": nsensors,
                }]
            })

        if nalgorithms["total"]>nsensors["streaming"]+nsensors["idle"]:
            info.append({
                "warning": [{
                    "message": "Imbalance: #analytics="+str(nalgorithms["total"])+",#sensors="+str(nsensors["streaming"]+nsensors["idle"]),
                    "args": {
                        "nalgorithms": nalgorithms["total"],
                        "nsensors": nsensors["streaming"]+nsensors["idle"],
                    },
                }],
            })

        return info
def db_query_test():
    database = DBQuery()

    score = 5.0
    text = "Final test"

    database.insert_comment(score, text)
    score_rows = database.select_score()
    comment_rows = database.select_comment()

    database.close_db()

    print(f"\nscore_rows:\n{score_rows}\n")
    print(f"comment_rows:\n{comment_rows}\n")
    average_score = int(round(np.average(np.array(score_rows))))
    print(average_score)
    return render_template("DBQueryTest.html",
                           score=score_rows,
                           text=comment_rows)
class NGINXRedirect(NGINX):
    def __init__(self, upstreams=[], stop=Event()):
        super(NGINXRedirect, self).__init__(upstreams, stop)
        self._db = DBQuery(index="offices", office="", host=dbhost)
        self._saved = self._upstreams

    def _update_upstreams(self):
        changed = super(NGINXRedirect, self)._update_upstreams()
        updates = {s: self._upstreams[s] for s in self._saved}
        try:
            for office1 in self._db.search("location:*", size=100):
                location = office1["_source"]["location"]
                name = ("office" + str(location["lat"]) + "c" +
                        str(location["lon"])).replace("-",
                                                      "n").replace(".", "d")
                protocol, q, host = office1["_source"]["uri"].partition("://")
                host, c, port = host.partition(":")

                if name in self._upstreams:
                    ip = self._upstreams[name][2]
                else:
                    changed = True
                    try:
                        ip = gethostbyname(host)
                    except:
                        ip = "127.0.0.1"

                updates[name] = [host, c + port, ip]
        except:
            self._stop.wait(10)

        if not changed:
            for s in self._upstreams:
                if s not in updates:
                    changed = True
                    break

        self._upstreams = updates
        return changed
Beispiel #15
0
#!/usr/bin/python3

from urllib.parse import unquote
from tornado import web, gen
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor
from db_query import DBQuery
from language import text, encode
import datetime
import os

dbhost = os.environ["DBHOST"]
proxyhost = os.environ["PROXYHOST"]

db = DBQuery(index="offices", office="", host=dbhost)
offices = {}


class RedirectHandler(web.RequestHandler):
    def __init__(self, app, request, **kwargs):
        super(RedirectHandler, self).__init__(app, request, **kwargs)
        self.executor = ThreadPoolExecutor(4)

    def check_origin(self, origin):
        return True

    @run_on_executor
    def _office_info(self, office):
        office_key = ",".join(map(str, office))
        if office_key in offices:
            if (datetime.datetime.now() -
Beispiel #16
0
 def _search(self, index, queries, size, office):
     db = DBQuery(index=index, office=office, host=self.dbhost)
     try:
         return list(db.search(queries, size))
     except Exception as e:
         return str(e)
Beispiel #17
0
dbs = DBIngest(index="services", office=office, host=dbhost)
rs = None
if isinstance(office, list):
    dbs.wait(stop)
    rs = dbs.ingest({
        "name": text["cleanup"],
        "service": text["maintenance"],
        "status": "active",
    })

while not stop.is_set():
    print("Searching...", flush=True)
    for index in indexes:
        if stop.is_set(): break

        db = DBQuery(index=index, office=office, host=dbhost)
        try:
            for r in db.search("time<now-" + str(retention_time * 1000),
                               size=500):
                if stop.is_set(): break

                # delete the record
                db.delete(r["_id"])

                # delete the path file
                if "path" in r["_source"]:
                    try:
                        os.remove(storage + "/" + r["_source"]["path"])
                        os.remove(storage + "/" + r["_source"]["path"] +
                                  ".png")
                    except Exception as e:
Beispiel #18
0
    def _rec2db(self, office, sensor, timestamp, path):
        dt = datetime.datetime.fromtimestamp(timestamp / 1000)
        officestr = (str(office[0]) + "c" + str(office[1])).replace(
            "-", "n").replace(".", "d")
        mp4path = self._storage + "/" + officestr + "/" + sensor + "/" + str(
            dt.year) + "/" + str(dt.month) + "/" + str(dt.day)
        os.makedirs(mp4path, exist_ok=True)
        mp4file = mp4path + "/" + str(timestamp) + ".mp4"

        list(
            run([
                "/usr/bin/ffmpeg", "-f", "mp4", "-i", path, "-c", "copy",
                mp4file
            ]))
        list(
            run([
                "/usr/bin/ffmpeg", "-i", mp4file, "-vf", "scale=640:360",
                "-frames:v", "1", mp4file + ".png"
            ]))
        sinfo = probe(mp4file)

        sinfo.update({
            "sensor": sensor,
            "office": {
                "lat": office[0],
                "lon": office[1],
            },
            "time": timestamp,
            "path": mp4file[len(self._storage) + 1:],
        })

        if local_office:
            # calculate total bandwidth
            bandwidth = 0
            for stream1 in sinfo["streams"]:
                if "bit_rate" in stream1:
                    bandwidth = bandwidth + stream1["bit_rate"]
            if bandwidth:
                db_cam = DBQuery(host=dbhost, index="sensors", office=office)
                db_cam.update(sensor, {"bandwidth": bandwidth})

            # check disk usage and send alert
            disk_usage = psutil.disk_usage(self._storage)[3]
            if disk_usage > 75 and sensor_index:
                level = "fatal" if disk_uage > 85 else "warning"
                db_alt = DBIngest(host=dbhost, index="alerts", office=office)
                db_alt.ingest({
                    "time":
                    int(
                        time.mktime(datetime.datetime.now().timetuple()) *
                        1000),
                    "office": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    "location": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    level: [{
                        "message": "Disk usage: " + str(disk_usage) + "%",
                        "args": {
                            "disk": disk_usage,
                        }
                    }]
                })

            # ingest recording local
            db_rec = DBIngest(host=dbhost, index="recordings", office=office)
            db_rec.ingest(sinfo)
        else:
            # ingest recording cloud
            db_rec = DBIngest(host=dbhost, index="recordings_c", office="")
            db_rec.ingest(sinfo)
Beispiel #19
0
signal(SIGTERM, quit_service)

# register trigger
dbt=DBIngest(index="triggers",office=office,host=dbhost)
while True:
    try:
        rt=dbt.ingest({
            "name": "health_check",
            "status": "processing",
        })
        break
    except Exception as e:
        print("Exception: "+str(e), flush=True)
        time.sleep(10)

dbs=DBQuery(index="sensors",office=office,host=dbhost)
dba=DBQuery(index="algorithms",office=office,host=dbhost)
dbat=DBIngest(index="alerts",office=office,host=dbhost)
    
while True:
    try:
        nsensors={
            "total": dbs.count("sensor:*"),
            "streaming": dbs.count("status:'streaming'"),
            "idle": dbs.count("status:'idle'"),
        }
        nalgorithms={
            "total": dba.count("name:*"),
        }
   
        alerts={}
Beispiel #20
0
            },
        }, topic=mqtt_topic)

        rec2db.stop()
        raise Exception("VA exited. This should not happen.")

    except:
        print(traceback.format_exc(), flush=True)
    print("connect stopped", flush=True)

def quit_service(signum, sigframe):
    stop.set()

signal(SIGTERM, quit_service)
dba=DBIngest(host=dbhost, index="algorithms", office=office)
dbs=DBQuery(host=dbhost, index="sensors", office=office)

# register algorithm (while waiting for db to startup)
dba.wait(stop)
algorithm=dba.ingest({
    "name": text["crowd-counting"],
    "office": {
        "lat": office[0],
        "lon": office[1],
    },
    "status": "processing",
    "skip": every_nth_frame,
})["_id"]

# compete for a sensor connection
while not stop.is_set():
Beispiel #21
0
class Feeder():
    def __init__(self):
        logger.debug("Initializing Feeder")

        self.office = list(map(float, os.environ["OFFICE"].split(",")))
        self.alg_id = None
        self.recording_volume = os.environ["STORAGE_VOLUME"]
        self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"])

        #Hosts
        self.dbhost = os.environ["DBHOST"]
        self.vahost = "http://localhost:8080/pipelines"
        self.mqtthost = os.environ["MQTTHOST"]

        #Clients
        self.db_alg = DBIngest(host=self.dbhost,
                               index="algorithms",
                               office=self.office)
        self.db_inf = DBIngest(host=self.dbhost,
                               index="analytics",
                               office=self.office)
        self.db_sensors = DBQuery(host=self.dbhost,
                                  index="sensors",
                                  office=self.office)
        self.mqttclient = None
        self.mqtttopic = None
        self.observer = Observer()

        self.batchsize = 300
        self.inference_cache = []

        self._threadflag = False

    def start(self):

        logger.info(" ### Starting Feeder ### ")

        logger.debug("Waiting for VA startup")
        r = requests.Response()
        r.status_code = 400
        while r.status_code != 200 and r.status_code != 201:
            try:
                r = requests.get(self.vahost)
            except Exception as e:
                r = requests.Response()
                r.status_code = 400

            time.sleep(10)

        # Register Algorithm
        logger.debug("Registering as algorithm in the DB")

        while True:
            try:
                self.alg_id = self.db_alg.ingest({
                    "name": "object_detection",
                    "office": {
                        "lat": self.office[0],
                        "lon": self.office[1]
                    },
                    "status": "idle",
                    "skip": self.every_nth_frame,
                })["_id"]
                break
            except Exception as e:
                logger.debug("Register algo exception: " + str(e))
                time.sleep(10)

        self.mqtttopic = "smtc_va_inferences_" + self.alg_id

        camera_monitor_thread = Thread(target=self.monitor_cameras,
                                       daemon=True)

        logger.debug("Starting working threads")
        self._threadflag = True
        self.startmqtt()
        self.observer.start()
        camera_monitor_thread.start()

        logger.debug("Waiting for interrupt...")
        camera_monitor_thread.join()
        self.observer.join()

    def stop(self):
        logger.info(" ### Stopping Feeder ### ")

        self._threadflag = False

        logger.debug("Unregistering algorithm from DB")
        self.db_alg.delete(self.alg_id)

        self.mqttclient.loop_stop()
        self.observer.stop()

    def startmqtt(self):
        self.mqttclient = mqtt.Client("feeder_" + self.alg_id)
        self.mqttclient.connect(self.mqtthost)
        self.mqttclient.on_message = self.mqtt_handler
        self.mqttclient.loop_start()
        self.mqttclient.subscribe(self.mqtttopic)

    def mqtt_handler(self, client, userdata, message):
        m_in = json.loads(str(message.payload.decode("utf-8", "ignore")))

        for tag in m_in["tags"]:
            m_in[tag] = m_in["tags"][tag]

        del m_in["tags"]

        m_in["time"] = m_in["real_base"] + m_in["timestamp"]
        # convert to milliseconds
        m_in["time"] = int(m_in["time"] / 1000000)
        self.inference_cache.append(m_in)
        if len(self.inference_cache) >= self.batchsize:
            try:
                self.db_inf.ingest_bulk(self.inference_cache[:self.batchsize])
                self.inference_cache = self.inference_cache[self.batchsize:]
            except Exception as e:
                logger.debug("Ingest Error: " + str(e))

    def monitor_cameras(self):
        logger.debug("Starting Sensor Monitor Thread")
        while self._threadflag:
            logger.debug("Searching for sensors...")

            try:
                for sensor in self.db_sensors.search(
                        "sensor:'camera' and status:'idle' and office:[" +
                        str(self.office[0]) + "," + str(self.office[1]) + "]"):
                    logger.debug(sensor)
                    try:
                        fswatch = None
                        logger.debug("Sensor found! " + sensor["_id"])
                        logger.debug("Setting sensor " + sensor["_id"] +
                                     " to streaming")
                        r = self.db_sensors.update(sensor["_id"],
                                                   {"status": "streaming"},
                                                   version=sensor["_version"])

                        logger.debug(
                            "Setting algorithm to streaming from sensor " +
                            sensor["_id"])
                        r = self.db_alg.update(self.alg_id, {
                            "source": sensor["_id"],
                            "status": "processing"
                        })

                        # Attempt to POST to VA service
                        jsonData = {
                            "source": {
                                "uri": sensor["_source"]["url"],
                                "type": "uri"
                            },
                            "tags": {
                                "algorithm": self.alg_id,
                                "sensor": sensor["_id"],
                                "office": {
                                    "lat": self.office[0],
                                    "lon": self.office[1],
                                },
                            },
                            "parameters": {
                                "every-nth-frame": self.every_nth_frame,
                                "recording_prefix":
                                "recordings/" + sensor["_id"],
                                "method": "mqtt",
                                "address": self.mqtthost,
                                "clientid": self.alg_id,
                                "topic": self.mqtttopic
                            },
                        }

                        folderpath = os.path.join(
                            os.path.realpath(self.recording_volume),
                            sensor["_id"])
                        if not os.path.exists(folderpath):
                            os.makedirs(folderpath)

                        logger.debug("Adding folder watch for " + folderpath)
                        filehandler = FSHandler(
                            sensor=sensor["_id"],
                            office=self.office,
                            dbhost=self.dbhost,
                            rec_volume=self.recording_volume)
                        fswatch = self.observer.schedule(filehandler,
                                                         folderpath,
                                                         recursive=True)

                        try:
                            logger.info("Posting Request to VA Service")
                            r = requests.post(self.vahost +
                                              "/object_detection/2",
                                              json=jsonData,
                                              timeout=10)
                            r.raise_for_status()
                            pipeline_id = None

                            if r.status_code == 200:
                                logger.debug("Started pipeline " + r.text)
                                pipeline_id = int(r.text)

                            while r.status_code == 200:
                                logger.debug("Querying status of pipeline")
                                r = requests.get(self.vahost +
                                                 "/object_detection/2/" +
                                                 str(pipeline_id) + "/status",
                                                 timeout=10)
                                r.raise_for_status()
                                jsonValue = r.json()
                                if "avg_pipeline_latency" not in jsonValue:
                                    jsonValue["avg_pipeline_latency"] = 0
                                state = jsonValue["state"]
                                try:
                                    logger.debug("fps: ")
                                    logger.debug(str(jsonValue))
                                except:
                                    logger.debug("error")
                                logger.debug("Pipeline state is " + str(state))
                                if state == "COMPLETED" or state == "ABORTED" or state == "ERROR":
                                    logger.debug("Pipeline ended")
                                    break

                                self.db_alg.update(
                                    self.alg_id, {
                                        "performance":
                                        jsonValue["avg_fps"],
                                        "latency":
                                        jsonValue["avg_pipeline_latency"] *
                                        1000
                                    })

                                time.sleep(10)

                            logger.debug("Setting sensor " + sensor["_id"] +
                                         " to disconnected")
                            r = self.db_sensors.update(
                                sensor["_id"], {"status": "disconnected"})

                        except requests.exceptions.RequestException as e:
                            logger.error(
                                "Feeder: Request to VA Service Failed: " +
                                str(e))
                            logger.debug("Setting sensor " + sensor["_id"] +
                                         " to idle")
                            r = self.db_sensors.update(sensor["_id"],
                                                       {"status": "idle"})

                    except Exception as e:
                        logger.error("Feeder Exception: " + str(e))

                    if fswatch:
                        self.observer.unschedule(fswatch)
                        del (filehandler)

                    logger.debug("Setting algorithm to idle")
                    r = self.db_alg.update(self.alg_id, {"status": "idle"})
                    break
            except Exception as e:
                print(e, flush=True)

            time.sleep(5)

        logger.debug("Sensor monitor thread done")
Beispiel #22
0
class OccupencyTrigger(Trigger):
    def __init__(self):
        super(OccupencyTrigger, self).__init__()
        self._db = DBQuery(index="analytics", office=office, host=dbhost)

    def trigger(self):
        time.sleep(service_interval[0])
        objects = ("", 0)
        seats = ("", 0)
        people = ("", 0)
        queue = ("", 0)
        try:
            for q in self._db.search(
                    "time>=now-" + str(args[0]) + " and (nobjects>" +
                    str(args[1]) + " or count.people>" + str(args[2]) +
                    " or nseats>" + str(args[3]) + " or count.queue>" +
                    str(args[4]) + ")",
                    size=75):

                if "nobjects" in q["_source"]:
                    if q["_source"]["nobjects"] > objects[1]:
                        objects = (q["_source"]["location"],
                                   q["_source"]["nobjects"])

                if "nseats" in q["_source"]:
                    if q["_source"]["nseats"] > seats[1]:
                        seats = (q["_source"]["location"],
                                 q["_source"]["nseats"])

                if "count" in q["_source"]:
                    if "people" in q["_source"]["count"]:
                        if q["_source"]["count"]["people"] > people[1]:
                            people = (q["_source"]["location"],
                                      q["_source"]["count"]["people"])

                    if "queue" in q["_source"]["count"]:
                        if q["_source"]["count"]["queue"] > queue[1]:
                            queue = (q["_source"]["location"],
                                     q["_source"]["count"]["queue"])

        except Exception as e:
            print("Exception: " + str(e), flush=True)

        info = []
        if objects[1] > 0:
            info.append({
                "location":
                objects[0],
                "warning": [{
                    "message":
                    "Traffic busy: #objects=" + str(objects[1]),
                    "args": {
                        "nobjects": objects[1],
                    },
                }],
            })
        if people[1] > 0:
            info.append({
                "location":
                people[0],
                "warning": [{
                    "message":
                    "Entrence crowded: #people=" + str(people[1]),
                    "args": {
                        "occupency": people[1],
                    }
                }],
            })
        if queue[1] > 0:
            info.append({
                "location":
                queue[0],
                "warning": [{
                    "message":
                    "Entrence crowded: #queue=" + str(queue[1]),
                    "args": {
                        "occupency": queue[1],
                    }
                }],
            })
        if seats[1] > 0:
            info.append({
                "location":
                seats[0],
                "warning": [{
                    "message": "Zone crowded: #seats=" + str(seats[1]),
                    "args": {
                        "nseats": seats[1],
                    }
                }],
            })
        return info
Beispiel #23
0
class FSHandler(FileSystemEventHandler):
    def __init__(self, sensor, office, dbhost, rec_volume):
        self.sensor = sensor
        self.office = office
        self.db_rec = DBIngest(host=dbhost, index="recordings", office=office)
        self.db_sensors = DBQuery(host=dbhost, index="sensors", office=office)
        self.recording_volume = rec_volume

        self.last_file = None
        self.finalize_timer = None
        self.record_cache = []
        self.timeout = 80  #set to 20 seconds... this should change according to recording chunk length

    def on_created(self, event):
        if event.is_directory: return
        if event.src_path.endswith(".png"): return
        if self.last_file and (self.last_file == event.src_path): return
        if self.finalize_timer: self.finalize_timer.cancel()
        if self.last_file:
            try:
                self.ingest()
            except Exception as error:
                logger.error("Failed to ingest: %s %s\n" %
                             (self.last_file, error))

        logger.debug("Started recording new file! " + event.src_path)
        self.last_file = event.src_path

        del (self.finalize_timer)
        self.finalize_timer = Timer(self.timeout, self.ingest)
        self.finalize_timer.start()
        logger.debug("Started file watch timer for " + str(self.timeout) +
                     " seconds")

    def ffmpeg_convert(self, filename):
        with tempfile.TemporaryDirectory() as tmpdirname:
            filename = os.path.abspath(filename)
            tmpfilename = os.path.abspath(
                os.path.join(tmpdirname, os.path.basename(filename)))
            output = ""
            try:
                list(
                    run([
                        "/usr/bin/ffmpeg", "-i", filename, "-c", "copy",
                        tmpfilename
                    ]))
                shutil.move(tmpfilename, filename)
                list(
                    run([
                        "/usr/bin/ffmpeg", "-i", filename, "-vf",
                        "thumbnail,scale=640:360", "-frames:v", "1",
                        filename + ".png"
                    ]))
                return filename, probe(filename)
            except Exception as error:
                logger.error("Error converting mp4 with ffmpeg: %s %s" %
                             (error, error.output))
                raise

    def get_timestamp(self, filename):
        parsed = os.path.basename(filename).split('_')
        return int(int(parsed[-2]) / 1000000)

    def ingest(self):
        logger.debug("Finished recording file " + self.last_file)
        converted_file, sinfo = self.ffmpeg_convert(self.last_file)
        sinfo.update({
            "sensor":
            self.sensor,
            "office": {
                "lat": self.office[0],
                "lon": self.office[1],
            },
            "time":
            self.get_timestamp(converted_file),
            "path":
            os.path.abspath(converted_file).split(
                os.path.abspath(self.recording_volume) + "/")[1],
        })

        # calculate total bandwidth
        bandwidth = 0
        for stream1 in sinfo["streams"]:
            if "bit_rate" in stream1:
                bandwidth = bandwidth + stream1["bit_rate"]
        self.db_sensors.update(self.sensor, {"bandwidth": bandwidth})
        self.db_rec.ingest(sinfo)
        self.record_cache.append(sinfo)
Beispiel #24
0
 def __init__(self):
     super(OccupencyTrigger, self).__init__()
     self._db = DBQuery(index="analytics", office=office, host=dbhost)
Beispiel #25
0
    def _rec2db(self, office, sensor, timestamp, path):
        disk_usage = psutil.disk_usage(self._storage)[3]
        if disk_usage < halt_rec_th:
            dt = datetime.datetime.fromtimestamp(timestamp / 1000)
            officestr = (str(office[0]) + "c" + str(office[1])).replace(
                "-", "n").replace(".", "d")
            mp4path = self._storage + "/" + officestr + "/" + sensor + "/" + str(
                dt.year) + "/" + str(dt.month) + "/" + str(dt.day)
            os.makedirs(mp4path, exist_ok=True)
            mp4file = mp4path + "/" + str(timestamp) + ".mp4"

            # perform a straight copy to fix negative timestamp for chrome
            list(
                run([
                    "/usr/local/bin/ffmpeg", "-f", "mp4", "-i", path, "-c",
                    "copy", mp4file
                ]))

            sinfo = probe(mp4file)
            sinfo.update({
                "sensor": sensor,
                "office": {
                    "lat": office[0],
                    "lon": office[1],
                },
                "time": timestamp,
                "path": mp4file[len(self._storage) + 1:],
            })
        else:
            print("Disk full: recording halted", flush=True)
            sinfo = None

        if local_office:
            if sinfo["bandwidth"]:
                db_cam = DBQuery(host=dbhost, index="sensors", office=office)
                db_cam.update(sensor, {"bandwidth": sinfo["bandwidth"]})

            # check disk usage and send alert
            disk_usage = psutil.disk_usage(self._storage).percent
            if disk_usage >= warn_disk_th:
                level = "fatal" if disk_usage >= fatal_disk_th else "warning"
                db_alt = DBIngest(host=dbhost, index="alerts", office=office)
                message = text["halt recording"].format(
                    disk_usage
                ) if disk_usage >= halt_rec_th else text["disk usage"].format(
                    disk_usage)
                db_alt.ingest({
                    "time":
                    int(time.time() * 1000),
                    "office": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    "location": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    level: [{
                        "message": message,
                        "args": {
                            "disk": disk_usage,
                        }
                    }]
                })

        # ingest recording local
        if sinfo:
            print("Ingest recording: {}".format(sinfo), flush=True)
            office1 = office if local_office else ""

            # denormalize sensor address to recordings
            dbs = DBQuery(host=dbhost, index="sensors", office=office1)
            r = list(dbs.search("_id='" + sinfo["sensor"] + "'", size=1))
            if r: sinfo["address"] = r[0]["_source"]["address"]

            db_rec = DBIngest(host=dbhost, index="recordings", office=office1)
            db_rec.ingest(sinfo)
            "/usr/bin/cvlc", "-vvv", "--mtu=1200", file1, "--loop",
            ":sout=#gather:rtp{sdp=" + rtsp + ",port=" + str(rtp_port1) + "}",
            ":network-caching:1500", ":sout-all", ":sout-keep"
        ])
        time.sleep(10)


def quit_service(signum, sigframe):
    exit(143)


signal(SIGTERM, quit_service)

filters = [pattern for i in range(ncameras)]
if dbhost and office:
    db = DBQuery(index="provisions", office=office, host=dbhost)
    db.wait()
    for r1 in db.search(
            "algorithm='{}' and office:[{},{}] and simfile:* and simsn:*".
            format(algorithm, office[0], office[1]),
            size=ncameras):
        m = re.search('[0-9]+$', r1["_source"]["simsn"])
        if not m: continue
        i = int(m.group(0))
        if i < ncameras: filters[i] = r1["_source"]["simfile"]

files = list(os.listdir(simulated_root))
with ThreadPoolExecutor(ncameras) as e:
    k = random.randint(0, ncameras)
    for i in range(ncameras):
        files1 = [f for f in files if re.search(filters[i], f)]
Beispiel #27
0
dbhost = env.get("DBHOST", None)
sim_cameras = {}


def quit_service(signum, sigframe):
    exit(143)


signal(SIGTERM, quit_service)

dbi = None
dbs = None
dbp = None
if dbhost and office:
    dbi = DBIngest(index="sensors", office=office, host=dbhost)
    dbs = DBQuery(index="sensors", office=office, host=dbhost)
    dbp = DBQuery(index="provisions", office=office, host=dbhost)
    dbp.wait()


def get_passcodes(ip, port):
    if office and dbhost:

        def _bucketize(query):
            r = dbp.bucketize(query, ["passcode"], size=1000)
            if "passcode" in r:
                return [k for k in r["passcode"] if r["passcode"][k]]
            return []

        try:
            codes = _bucketize("passcode:* and ip={} and port={}".format(
    def _cleanup_thread(self):
        owt = OWTAPI()
        dbs = DBQuery(index="sensors", office=office, host=dbhost)
        while not self._stop.is_set():
            todelete = []
            tostartstreamout = []
            tostopstreamout = []
            for name in self._rooms:
                try:
                    participants = owt.list_participants(
                        self._rooms[name]["room"])
                except:
                    participants = 0
                now = int(time.time())
                print(
                    "Watcher: room {} participant {} inactive {} stream-out status {}"
                    .format(name, participants,
                            now - self._rooms[name]["time"],
                            self._rooms[name]["stream_out"]["status"]),
                    flush=True)
                print(self._rooms[name], flush=True)
                if participants > 0:
                    self._rooms[name]["time"] = now
                elif now - self._rooms[name]["time"] > self._inactive:
                    todelete.append(name)

                if self._rooms[name]["stream_out"]["status"] == "start":
                    tostartstreamout.append(name)
                elif self._rooms[name]["stream_out"]["status"] == "stop":
                    tostopstreamout.append(name)

            for name in tostartstreamout:
                if self._rooms[name]["sensor"]["subtype"] != "mobile_camera":
                    continue
                sensor = self._rooms[name]["sensor"]
                stream1 = self._rooms[name]["stream_in"]
                room1 = self._rooms[name]["room"]
                rtmpurl = self._rooms[name]["stream_out"]["rtmpurl"]
                for _item in dbs.search("_id='{}'".format(sensor["id"]),
                                        size=1):
                    print(_item, flush=True)
                try:
                    stream1 = stream1 if stream1 else owt.list_streams(
                        room1)[0]
                except:
                    continue

                self._rooms[name]["stream_in"] = stream1
                if stream1 and rtmpurl:
                    try:
                        self._rooms[name]["stream_out"][
                            "stream"] = owt.start_streaming_outs(
                                room=room1, url=rtmpurl,
                                video_from=stream1)["id"]
                    except:
                        continue
                self._rooms[name]["stream_out"]["status"] = "streaming"
                try:
                    dbs.update(sensor["id"], {
                        "status": "disconnected",
                        "url": rtmpurl
                    })
                except:
                    continue

            for name in tostopstreamout:
                if self._rooms[name]["sensor"]["subtype"] != "mobile_camera":
                    continue
                stream1 = self._rooms[name]["stream_out"]["stream"]
                room1 = self._rooms[name]["room"]
                if stream1 and rtmpurl:
                    try:
                        owt.stop_streaming_outs(room1, stream1)
                    except:
                        continue
                self._rooms[name]["stream_out"]["status"] = "idle"

            for name in todelete:
                stream1 = self._rooms[name]["stream_in"]
                room1 = self._rooms[name]["room"]

                try:
                    streams = [stream1] if stream1 else owt.list_streams(room1)
                except:
                    streams = []

#                for stream1 in streams:
#                    print("Remove stream {}".format(stream1), flush=True)
#                    try:
#                        owt.delete_stream(room1,stream1)
#                    except:
#                        pass

                print("Remove room {}:{}".format(name, room1), flush=True)
                try:
                    owt.delete_room(room1)
                except:
                    pass

                self._rooms.pop(name, None)

            self._stop.wait(self._inactive / 3.0)
Beispiel #29
0
                },
                "sensor": sensor["_id"],
                "algorithm": algorithm["_id"],
                "count": zonecount,
            })

        db.ingest_bulk(counts)
        time.sleep(1000)

def quit_service(signum, sigframe):
    global stop
    stop=True

signal(SIGTERM, quit_service)
dba=DBIngest(host=dbhost, index="algorithms", office=office)
dbs=DBQuery(host=dbhost, index="sensors", office=office)

# register algorithm (while waiting for db to startup)
while True:
    try:
        algorithm=dba.ingest({
            "name": "crowd-counting",
            "office": {
                "lat": office[0],
                "lon": office[1],
            },
            "status": "processing",
            "skip": every_nth_frame,
        })
        break
    except Exception as e:
Beispiel #30
0
 def _stats(self, index, queries, fields, office):
     db = DBQuery(index=index, office=office, host=self.dbhost)
     try:
         return db.stats(queries, fields)
     except Exception as e:
         return str(e)