def Provision(officestr):
    print("Provisioning...", flush=True)

    # populate db with simulated offices and provisionings
    with open("/run/secrets/sensor-info.json", encoding='utf-8') as fd:
        data = json.load(fd)
        dbp = DBIngest(index="provisions", office=office, host=dbhost)
        for office1 in data:
            if scenario != office1["scenario"]: continue
            location1 = office1["location"]
            if location1["lat"] != office[0] or location1["lon"] != office[1]:
                continue

            sensors = office1.pop("sensors")
            for s in sensors:
                s["office"] = location1
                if "ip" in s:  # convert IP to CIDR
                    if s["ip"].find("/") < 0:
                        s["ip"] = s["ip"] + "/32"
                    s["ip_text"] = s["ip"]  # dup for terms aggs
            dbp.ingest_bulk(sensors, refresh="wait_for")

            office1.pop("scenario")
            office1["uri"] = gwhost
            return office1

    raise Exception("Should not be here.")
def connect(sensor, algorithm, uri):
    db=DBIngest(host=dbhost, index="algorithms",office=office)
    db.update(algorithm["_id"], {
        "sensor": sensor["_id"],
    })
    db=DBIngest(host=dbhost, index="analytics", office=office)
    while True:
        counts=[]
        for i in range(100):
            zonecount={}
            for zonemap in sensor["_source"]["zonemap"]:
                zonecount["zone"+str(zonemap["zone"])]=int(random.random()*1000)

            counts.append({
                "time": int(time.mktime(datetime.datetime.now().timetuple())*1000),
                "office": {
                    "lat": office[0],
                    "lon": office[1],
                },
                "sensor": sensor["_id"],
                "algorithm": algorithm["_id"],
                "count": zonecount,
            })

        db.ingest_bulk(counts)
        time.sleep(1000)
class Trigger(object):
    def __init__(self):
        super(Trigger, self).__init__()
        self.__db = DBIngest(index="alerts", office=office, host=dbhost)

    def trigger(self):
        return None

    def loop(self, stop):
        while not stop.is_set():
            info = self.trigger(stop)
            if not info: continue

            for v in info:
                v.update({
                    "time": int(time.time() * 1000),
                    "office": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                })
                if "location" not in v:
                    v["location"] = v["office"]

            self.__db.ingest_bulk(info)
Exemple #4
0
class MQTT2DB(object):
    def __init__(self, algorithm):
        super(MQTT2DB, self).__init__()
        self._mqtt = mqtt.Client("feeder_" + algorithm)

        while True:
            try:
                self._mqtt.connect(mqtthost)
                break
            except Exception as e:
                print("Exception: " + str(e), flush=True)
                time.sleep(10)

        self._db = DBIngest(host=dbhost, index="analytics", office=office)
        self._cache = []
        self._lock = Lock()
        self._timer = IntervalTimer(2.0, self.on_timer)

    def loop(self, topic):
        self._mqtt.on_message = self.on_message
        self._mqtt.subscribe(topic)
        self._timer.start()
        self._mqtt.loop_forever()

    def stop(self):
        self._timer.cancel()
        self._mqtt.disconnect()

    def on_message(self, client, userdata, message):
        try:
            r = json.loads(str(message.payload.decode("utf-8", "ignore")))
            r.update(r["tags"])
            del r["tags"]
            if "real_base" not in r: r["real_base"] = 0
            r["time"] = int((r["real_base"] + r["timestamp"]) / 1000000)

            if "objects" in r and scenario == "traffic":
                r["nobjects"] = int(len(r["objects"]))
            if "objects" in r and scenario == "stadium":
                r["count"] = {"people": len(r["objects"])}
        except Exception as e:
            print("Exception: " + str(e), flush=True)
        self._lock.acquire()
        self._cache.append(r)
        self._lock.release()

    def on_timer(self):
        self._lock.acquire()
        bulk = self._cache
        self._cache = []
        self._lock.release()

        bulk_size = 500
        while len(bulk):
            try:
                self._db.ingest_bulk(bulk[:bulk_size])
                bulk = bulk[bulk_size:]
            except Exception as e:
                print("Exception: " + str(e), flush=True)
            time.sleep(0.25)
Exemple #5
0
class Feeder():
    def __init__(self):
        logger.debug("Initializing Feeder")

        self.office = list(map(float, os.environ["OFFICE"].split(",")))
        self.alg_id = None
        self.recording_volume = os.environ["STORAGE_VOLUME"]
        self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"])

        #Hosts
        self.dbhost = os.environ["DBHOST"]
        self.vahost = "http://localhost:8080/pipelines"
        self.mqtthost = os.environ["MQTTHOST"]

        #Clients
        self.db_alg = DBIngest(host=self.dbhost,
                               index="algorithms",
                               office=self.office)
        self.db_inf = DBIngest(host=self.dbhost,
                               index="analytics",
                               office=self.office)
        self.db_sensors = DBQuery(host=self.dbhost,
                                  index="sensors",
                                  office=self.office)
        self.mqttclient = None
        self.mqtttopic = None
        self.observer = Observer()

        self.batchsize = 300
        self.inference_cache = []

        self._threadflag = False

    def start(self):

        logger.info(" ### Starting Feeder ### ")

        logger.debug("Waiting for VA startup")
        r = requests.Response()
        r.status_code = 400
        while r.status_code != 200 and r.status_code != 201:
            try:
                r = requests.get(self.vahost)
            except Exception as e:
                r = requests.Response()
                r.status_code = 400

            time.sleep(10)

        # Register Algorithm
        logger.debug("Registering as algorithm in the DB")

        while True:
            try:
                self.alg_id = self.db_alg.ingest({
                    "name": "object_detection",
                    "office": {
                        "lat": self.office[0],
                        "lon": self.office[1]
                    },
                    "status": "idle",
                    "skip": self.every_nth_frame,
                })["_id"]
                break
            except Exception as e:
                logger.debug("Register algo exception: " + str(e))
                time.sleep(10)

        self.mqtttopic = "smtc_va_inferences_" + self.alg_id

        camera_monitor_thread = Thread(target=self.monitor_cameras,
                                       daemon=True)

        logger.debug("Starting working threads")
        self._threadflag = True
        self.startmqtt()
        self.observer.start()
        camera_monitor_thread.start()

        logger.debug("Waiting for interrupt...")
        camera_monitor_thread.join()
        self.observer.join()

    def stop(self):
        logger.info(" ### Stopping Feeder ### ")

        self._threadflag = False

        logger.debug("Unregistering algorithm from DB")
        self.db_alg.delete(self.alg_id)

        self.mqttclient.loop_stop()
        self.observer.stop()

    def startmqtt(self):
        self.mqttclient = mqtt.Client("feeder_" + self.alg_id)
        self.mqttclient.connect(self.mqtthost)
        self.mqttclient.on_message = self.mqtt_handler
        self.mqttclient.loop_start()
        self.mqttclient.subscribe(self.mqtttopic)

    def mqtt_handler(self, client, userdata, message):
        m_in = json.loads(str(message.payload.decode("utf-8", "ignore")))

        for tag in m_in["tags"]:
            m_in[tag] = m_in["tags"][tag]

        del m_in["tags"]

        m_in["time"] = m_in["real_base"] + m_in["timestamp"]
        # convert to milliseconds
        m_in["time"] = int(m_in["time"] / 1000000)
        self.inference_cache.append(m_in)
        if len(self.inference_cache) >= self.batchsize:
            try:
                self.db_inf.ingest_bulk(self.inference_cache[:self.batchsize])
                self.inference_cache = self.inference_cache[self.batchsize:]
            except Exception as e:
                logger.debug("Ingest Error: " + str(e))

    def monitor_cameras(self):
        logger.debug("Starting Sensor Monitor Thread")
        while self._threadflag:
            logger.debug("Searching for sensors...")

            try:
                for sensor in self.db_sensors.search(
                        "sensor:'camera' and status:'idle' and office:[" +
                        str(self.office[0]) + "," + str(self.office[1]) + "]"):
                    logger.debug(sensor)
                    try:
                        fswatch = None
                        logger.debug("Sensor found! " + sensor["_id"])
                        logger.debug("Setting sensor " + sensor["_id"] +
                                     " to streaming")
                        r = self.db_sensors.update(sensor["_id"],
                                                   {"status": "streaming"},
                                                   version=sensor["_version"])

                        logger.debug(
                            "Setting algorithm to streaming from sensor " +
                            sensor["_id"])
                        r = self.db_alg.update(self.alg_id, {
                            "source": sensor["_id"],
                            "status": "processing"
                        })

                        # Attempt to POST to VA service
                        jsonData = {
                            "source": {
                                "uri": sensor["_source"]["url"],
                                "type": "uri"
                            },
                            "tags": {
                                "algorithm": self.alg_id,
                                "sensor": sensor["_id"],
                                "office": {
                                    "lat": self.office[0],
                                    "lon": self.office[1],
                                },
                            },
                            "parameters": {
                                "every-nth-frame": self.every_nth_frame,
                                "recording_prefix":
                                "recordings/" + sensor["_id"],
                                "method": "mqtt",
                                "address": self.mqtthost,
                                "clientid": self.alg_id,
                                "topic": self.mqtttopic
                            },
                        }

                        folderpath = os.path.join(
                            os.path.realpath(self.recording_volume),
                            sensor["_id"])
                        if not os.path.exists(folderpath):
                            os.makedirs(folderpath)

                        logger.debug("Adding folder watch for " + folderpath)
                        filehandler = FSHandler(
                            sensor=sensor["_id"],
                            office=self.office,
                            dbhost=self.dbhost,
                            rec_volume=self.recording_volume)
                        fswatch = self.observer.schedule(filehandler,
                                                         folderpath,
                                                         recursive=True)

                        try:
                            logger.info("Posting Request to VA Service")
                            r = requests.post(self.vahost +
                                              "/object_detection/2",
                                              json=jsonData,
                                              timeout=10)
                            r.raise_for_status()
                            pipeline_id = None

                            if r.status_code == 200:
                                logger.debug("Started pipeline " + r.text)
                                pipeline_id = int(r.text)

                            while r.status_code == 200:
                                logger.debug("Querying status of pipeline")
                                r = requests.get(self.vahost +
                                                 "/object_detection/2/" +
                                                 str(pipeline_id) + "/status",
                                                 timeout=10)
                                r.raise_for_status()
                                jsonValue = r.json()
                                if "avg_pipeline_latency" not in jsonValue:
                                    jsonValue["avg_pipeline_latency"] = 0
                                state = jsonValue["state"]
                                try:
                                    logger.debug("fps: ")
                                    logger.debug(str(jsonValue))
                                except:
                                    logger.debug("error")
                                logger.debug("Pipeline state is " + str(state))
                                if state == "COMPLETED" or state == "ABORTED" or state == "ERROR":
                                    logger.debug("Pipeline ended")
                                    break

                                self.db_alg.update(
                                    self.alg_id, {
                                        "performance":
                                        jsonValue["avg_fps"],
                                        "latency":
                                        jsonValue["avg_pipeline_latency"] *
                                        1000
                                    })

                                time.sleep(10)

                            logger.debug("Setting sensor " + sensor["_id"] +
                                         " to disconnected")
                            r = self.db_sensors.update(
                                sensor["_id"], {"status": "disconnected"})

                        except requests.exceptions.RequestException as e:
                            logger.error(
                                "Feeder: Request to VA Service Failed: " +
                                str(e))
                            logger.debug("Setting sensor " + sensor["_id"] +
                                         " to idle")
                            r = self.db_sensors.update(sensor["_id"],
                                                       {"status": "idle"})

                    except Exception as e:
                        logger.error("Feeder Exception: " + str(e))

                    if fswatch:
                        self.observer.unschedule(fswatch)
                        del (filehandler)

                    logger.debug("Setting algorithm to idle")
                    r = self.db_alg.update(self.alg_id, {"status": "idle"})
                    break
            except Exception as e:
                print(e, flush=True)

            time.sleep(5)

        logger.debug("Sensor monitor thread done")
scenario = os.environ["SCENARIO"]
zone = os.environ["ZONE"]

print("Provisioning...", flush=True)

# populate db with simulated offices and provisionings
with open("/run/secrets/sensor-info.json", encoding='utf-8') as fd:
    data = json.load(fd)
    dbo = DBIngest(index="offices", office="", host=dbhost)
    dbp = DBIngest(index="provisions", office=office, host=dbhost)
    for office1 in data:
        if scenario != office1["scenario"]: continue
        location1 = office1["location"]
        if location1["lat"] != office[0] or location1["lon"] != office[1]:
            continue

        sensors = office1.pop("sensors")
        for s in sensors:
            s["office"] = location1
            if "ip" in s:  # convert IP to CIDR
                if s["ip"].find("/") < 0:
                    s["ip"] = s["ip"] + "/32"
        dbp.ingest_bulk(sensors)

        office1.pop("scenario")
        office1["uri"] = proxyhost
        office1["zone"] = zone
        dbo.ingest(office1, officestr)

print("DB Initialized", flush=True)
Exemple #7
0
class MQTT2DB(object):
    def __init__(self):
        super(MQTT2DB, self).__init__()

        self._db = DBIngest(host=dbhost, index="analytics", office=office)
        self._cache = []
        self._cond = Condition()

        self._mqtt = mqtt.Client()
        self._mqtt.on_message = self.on_message
        self._mqtt.on_disconnect = self.on_disconnect

    def loop(self, topic="analytics"):
        print("connecting mqtt", flush=True)
        timer = Timer(10, self._connect_watchdog)
        timer.start()
        while True:
            try:
                self._mqtt.connect(mqtthost)
                break
            except:
                print(traceback.format_exc(), flush=True)
        timer.cancel()
        print("mqtt connected", flush=True)

        self._stop = False
        Thread(target=self.todb).start()

        self._mqtt.subscribe(topic)
        self._mqtt.loop_forever()

    def _connect_watchdog(self):
        print("quit due to mqtt timeout", flush=True)
        exit(-1)

    def _add1(self, item=None):
        self._cond.acquire()
        if item:
            self._cache.append(item)
        self._cond.notify()
        self._cond.release()

    def stop(self):
        self._mqtt.disconnect()

    def on_disconnect(self, client, userdata, rc):
        self._stop = True
        self._add1()

    def on_message(self, client, userdata, message):
        try:

            r = json.loads(str(message.payload.decode("utf-8", "ignore")))

            if "tags" in r:
                r.update(r["tags"])
                del r["tags"]

            if ("time" not in r) and ("real_base" in r) and ("timestamp" in r):
                real_base = r["real_base"] if "real_base" in r else 0
                r["time"] = int((real_base + r["timestamp"]) / 1000000)

            if "objects" in r and scenario == "traffic":
                r["nobjects"] = int(len(r["objects"]))
            if "objects" in r and scenario == "stadium":
                r["count"] = {"people": len(r["objects"])}
            if "count" in r:
                r["nobjects"] = int(max([r["count"][k] for k in r["count"]]))

        except:
            print(traceback.format_exc(), flush=True)

        self._add1(r)

    def todb(self):
        while not self._stop:
            self._cond.acquire()
            self._cond.wait()
            bulk = self._cache
            self._cache = []
            self._cond.release()

            try:
                self._db.ingest_bulk(bulk)
            except:
                print(traceback.format_exc(), flush=True)
Exemple #8
0
    def _rec2db(self, office, sensor, timestamp, path):
        disk_usage=psutil.disk_usage(self._storage)[3]
        if disk_usage<halt_rec_th:
            dt=datetime.datetime.fromtimestamp(timestamp/1000)
            officestr=(str(office[0])+"c"+str(office[1])).replace("-","n").replace(".","d")
            mp4path=self._storage+"/"+officestr+"/"+sensor+"/"+str(dt.year)+"/"+str(dt.month)+"/"+str(dt.day)
            os.makedirs(mp4path,exist_ok=True)
            mp4file=mp4path+"/"+str(timestamp)+".mp4"

            # perform a straight copy to fix negative timestamp for chrome
            list(run(["/usr/local/bin/ffmpeg","-f","mp4","-i",path,"-c","copy",mp4file]))

            sinfo=probe(mp4file)
            sinfo.update({
                "sensor": sensor,
                "office": {
                    "lat": office[0],
                    "lon": office[1],
                },
                "time": timestamp,
                "path": mp4file[len(self._storage)+1:],
            })
        else:
            print("Disk full: recording halted", flush=True)
            sinfo=None

        if local_office:
            if sinfo["bandwidth"]:
                db_cam=DBQuery(host=dbhost, index="sensors", office=office)
                db_cam.update(sensor, {"bandwidth": sinfo["bandwidth"]})

            # check disk usage and send alert
            disk_usage=psutil.disk_usage(self._storage).percent
            if disk_usage>=warn_disk_th:
                level="fatal" if disk_usage>=fatal_disk_th else "warning"
                db_alt=DBIngest(host=dbhost, index="alerts", office=office)
                message=text["halt recording"].format(disk_usage) if disk_usage>=halt_rec_th else text["disk usage"].format(disk_usage)
                db_alt.ingest({
                    "time": int(time.time()*1000),
                    "office": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    "location": {
                        "lat": office[0],
                        "lon": office[1],
                    },
                    level: [{
                        "message": message,
                        "args": {
                            "disk": disk_usage,
                        }
                    }]
                })

            # ingest recording local
            if sinfo:
                db_rec=DBIngest(host=dbhost, index="recordings", office=office)
                db_rec.ingest(sinfo)
        else:
            # ingest recording cloud
            if sinfo:
                db_s=DBQuery(host=dbhost, index="sensors", office=sinfo["office"])
                sensor=list(db_s.search("_id='"+sinfo["sensor"]+"'",size=1))
                if sensor:
                    # remove status
                    sensor[0]["_source"].pop("status",None)
                    # denormalize address
                    sinfo["address"]=sensor[0]["_source"]["address"]

                    # calcualte hash code for the sensor
                    m=hashlib.md5()
                    m.update(json.dumps(sensor[0]["_source"],ensure_ascii=False).encode('utf-8'))
                    md5=m.hexdigest()

                    # locate the sensor record in cloud
                    db_sc=DBQuery(host=dbhost, index="sensors", office="")
                    sensor_c=list(db_sc.search("md5='"+md5+"'",size=1))
                    if not sensor_c:  # if not available, ingest a sensor record in cloud
                        sensor_c=[{ "_source": sensor[0]["_source"].copy() }]
                        sensor_c[0]["_source"]["md5"]=md5
                        db_sc=DBIngest(host=dbhost, index="sensors", office="")
                        print("Ingest sensor: {}".format(sensor_c[0]["_source"]), flush=True)
                        sensor_c[0]=db_sc.ingest(sensor_c[0]["_source"])

                    # replace cloud sensor id and ingest recording
                    sinfo["sensor"]=sensor_c[0]["_id"]

                    print("Ingest recording: {}".format(sinfo), flush=True)
                    db_rec=DBIngest(host=dbhost, index="recordings", office="")
                    db_rec.ingest(sinfo)

                    # copy local analytics to cloud
                    db_a=DBQuery(host=dbhost, index="analytics", office=sinfo["office"])
                    data=[]
                    for r in db_a.search('sensor="'+sensor[0]["_id"]+'" and office:['+str(office[0])+','+str(office[1])+'] and time>='+str(sinfo["time"])+' and time<='+str(sinfo["time"]+sinfo["duration"]*1000),size=10000):
                        r["_source"]["sensor"]=sinfo["sensor"]
                        data.append(r["_source"])
                    db_ac=DBIngest(host=dbhost, index="analytics", office="")
                    print("Ingest analytics: {}".format(len(data)), flush=True)
                    db_ac.ingest_bulk(data)
Exemple #9
0
                sensor_c[0] = dbsi_c.ingest(sensor_c[0]["_source"])
                print("Ingest sensor: {}".format(sensor_c[0]["_id"]),
                      flush=True)

            analytics = []
            for r in dba.search("sensor='" + q["_source"]["sensor"] +
                                "' and time>" + str(q["_source"]["time"]) +
                                " and time<" +
                                str(q["_source"]["time"] +
                                    q["_source"]["duration"] * 1000),
                                size=10000):
                r["_source"]["sensor"] = sensor_c[0]["_id"]
                analytics.append(r["_source"])

            print("Ingest analytics: {}".format(len(analytics)), flush=True)
            dba_c.ingest_bulk(analytics)

            url = sthost + '/' + q["_source"]["path"]
            print("url: " + url, flush=True)

            mp4file = "/tmp/" + str(os.path.basename(url))

            print("Transcoding...", flush=True)
            # Replace with any transcoding command
            list(
                run([
                    "/usr/local/bin/ffmpeg", "-f", "mp4", "-i", url, "-c",
                    "copy", "-f", "mp4", "-y", mp4file
                ]))

            print("Uploading: " + stchost, flush=True)
Exemple #10
0
def discover_all_onvif_cameras():
    ip_range = os.environ['IP_SCAN_RANGE']
    port_range = os.environ['PORT_SCAN_RANGE']

    office = list(map(float,os.environ["OFFICE"].split(",")))
    distance = float(os.environ["DISTANCE"])
    angleoffset = float(os.environ["ANGLEOFFSET"])
    dbhost= os.environ["DBHOST"]

    sensor_index = 0
    mac_sensor_id = {}

    while True:
        desclist = []
        onvifcams = scan_onvif_camera(ip_range, port_range)
        nsensors = len(onvifcams)

        db = DBIngest(index="sensors",office=office,host=dbhost)
        dbs = DBQuery(index="sensors",office=office,host=dbhost)
        for cam in onvifcams:
            ip = cam['ip']
            port = int(cam['port'])
            desc = discover_onvif_camera(ip, port)

            if (desc['MAC'] == None):
                if('NetworkInterfaces' in desc):
                    if(len(desc['NetworkInterfaces']) >= 1):
                        desc['MAC'] = desc['NetworkInterfaces'][0]['Info']['HwAddress']

                # let's use camera serial number as id
                else:
                    desc['MAC'] = desc['DeviceInformation']['SerialNumber']

            if(desc['MAC'] not in mac_sensor_id):
                sensor_index += 1
                mac_sensor_id[desc['MAC']] = sensor_index
            sensor_id = mac_sensor_id[desc['MAC']]

            # Add credential to rtsp uri
            rtspuri = desc["MediaStreamUri"]["Uri"]
            rtspuri = rtspuri.replace('rtsp://', 'rtsp://*****:*****@')
            camdesc = {
                "sensor": "camera",
                "icon": "camera.gif",
                "office": { "lat": office[0], "lon": office[1] },
                "model": "ip_camera",
                "resolution": { "width": desc["MediaVideoSources"][0]['Resolution']['Width'], "height": desc["MediaVideoSources"][0]['Resolution']["Height"] },
                "location": geo_point(office, distance, math.pi * 2 / nsensors * sensor_id + math.pi * angleoffset / 180.0),
                "url": rtspuri,
                "mac": desc["MAC"],
                'theta': 15.0,
                'mnth': 15.0,
                'alpha': 45.0,
                'fovh': 90.0,
                'fovv': 68.0,
                "status": "idle",
            }

            try:
                found=list(dbs.search("sensor:'camera' and model:'ip_camera' and mac='"+desc['MAC']+"'", size=1))
                if not found:
                    desclist.append(camdesc)
            except Exception as e:
                print(e)

        if desclist:
            try:
                db.ingest_bulk(desclist)
            except Exception as e:
                print("Exception: "+str(e), flush=True)
        
        time.sleep(60)
Exemple #11
0
class MQTT2DB(object):
    def __init__(self, algorithm):
        super(MQTT2DB, self).__init__()
        self._mqtt = mqtt.Client("feeder_" + algorithm)
        self._db = DBIngest(host=dbhost, index="analytics", office=office)
        self._cache = []
        self._cond = Condition()

    def loop(self, topic):
        self._stop = False
        Thread(target=self.todb).start()

        while True:
            try:
                self._mqtt.connect(mqtthost)
                break
            except Exception as e:
                print("Exception: " + str(e), flush=True)
                time.sleep(10)

        self._mqtt.on_message = self.on_message
        self._mqtt.subscribe(topic)
        self._mqtt.loop_forever()

    def _add1(self, item=None):
        self._cond.acquire()
        if item: self._cache.append(item)
        self._cond.notify()
        self._cond.release()

    def stop(self):
        self._mqtt.disconnect()
        self._stop = True
        self._add1()

    def on_message(self, client, userdata, message):
        try:
            r = json.loads(str(message.payload.decode("utf-8", "ignore")))
            r.update(r["tags"])
            del r["tags"]
            if "real_base" not in r: r["real_base"] = 0
            r["time"] = int((r["real_base"] + r["timestamp"]) / 1000000)

            if "objects" in r and scenario == "traffic":
                r["nobjects"] = int(len(r["objects"]))
            if "objects" in r and scenario == "stadium":
                r["count"] = {"people": len(r["objects"])}
        except Exception as e:
            print("Exception: " + str(e), flush=True)

        self._add1(r)

    def todb(self):
        while not self._stop:
            self._cond.acquire()
            self._cond.wait()
            bulk = self._cache
            self._cache = []
            self._cond.release()

            try:
                self._db.ingest_bulk(bulk)
            except Exception as e:
                print("Exception: " + str(e), flush=True)