def __init__(self, moduleConfig=None, loggingQueue=None): """ Initialize new AzureImagePrediction instance. Set parameters required by Azure Face API. """ logging.basicConfig(level=logging.CRITICAL) self.logger = ThreadsafeLogger( loggingQueue, "AzureImagePrediction") # Setup logging queue self.config = moduleConfig # Constants self._subscriptionKey = self.config['Azure']['SubscriptionKey'] self._uriBase = self.config['Azure']['UriBase'] self._headers = { 'Content-Type': 'application/octet-stream', 'Ocp-Apim-Subscription-Key': self.config['Azure']['SubscriptionKey'], } self._params = urllib.parse.urlencode({ "returnFaceId": "true", "returnFaceLandmarks": "false", "returnFaceAttributes": "age,gender,glasses,facialHair" })
def __init__(self, detectedClient, collectionPointConfig, loggingQueue): # Logger self.loggingQueue = loggingQueue self.logger = ThreadsafeLogger(loggingQueue, __name__) self.logger = logging.getLogger( 'btleRegisteredClient.BtleRegisteredClient') self.clientEventLogger = logging.getLogger( 'btleRegisteredClient.BtleEventTesting') self.clientEventSendLogger = logging.getLogger('eventSend') self.clientInRangeTrigerCount = 2 self.lastTimeMessageClientInWasSentToController = -1 self.lastTimeMessageClientOutWasSentToController = -1 self.__countClientInRange = 0 self.__countClientOutOfRange = 0 self.timeInCollectionPointInMilliseconds = 0 self.firstRegisteredTime = time.time() self.collectionPointConfig = collectionPointConfig self.__clientOutThresholdMin = int( self.collectionPointConfig['BtleRssiClientInThreshold'] + (self.collectionPointConfig['BtleRssiClientInThreshold'] * self.collectionPointConfig['BtleRssiErrorVariance'])) self.handleNewDetectedClientEvent( detectedClient ) #standard shared methods when we see a detected client
def __init__(self, collectionPointConfig, loggingQueue): # Logger self.loggingQueue = loggingQueue self.logger = ThreadsafeLogger(loggingQueue, __name__) self.rClients = {} #registered clients self.collectionPointConfig = collectionPointConfig #collection point config
def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue): """ Initialize new CamCollectionPoint instance. Setup queues, variables, configs, constants and loggers. """ super(BtleCollectionPoint, self).__init__() # Queues self.outQueue = pOutBoundQueue #messages from this thread to the main process self.inQueue = pInBoundQueue self.loggingQueue = loggingQueue self.queueBLE = mp.Queue() # Configs self.moduleConfig = configLoader.load( self.loggingQueue) #Get the config for this module self.config = baseConfig # Logger self.logger = ThreadsafeLogger(loggingQueue, __name__) # Variables self.registeredClientRegistry = RegisteredClientRegistry( self.moduleConfig, self.loggingQueue) self.eventManager = EventManager(self.moduleConfig, pOutBoundQueue, self.registeredClientRegistry, self.loggingQueue) self.alive = True self.btleThread = None self.BLEThread = None self.repeatTimerSweepClients = None
def __init__(self, baseConfig, pInBoundEventQueue, pOutBoundEventQueue, loggingQueue): super(MQTTClientModule, self).__init__() self.config = baseConfig self.alive = True self.inQueue = pInBoundEventQueue # Constants self._keepAlive = self.config['MqttKeepAlive'] self._feedName = self.config['MqttFeedName'] self._username = self.config['MqttUsername'] self._key = self.config['MqttKey'] self._host = self.config['MqttHost'] self._port = self.config['MqttPort'] self._publishJson = self.config['MqttPublishJson'] self._publishFaceValues = self.config['MqttPublishFaceValues'] # MQTT setup self._client = mqtt.Client() self._client.username_pw_set(self._username, self._key) self._client.on_connect = self.onConnect self._client.on_disconnect = self.onDisconnect self._client.on_message = self.onMessage self.mqttConnected = False # Logging setup self.logger = ThreadsafeLogger(loggingQueue, "MQTT")
def __init__(self, loggingQueue, moduleConfig): """ Create a new IdsWrapper instance. IdsWrapper uses the Windows IDS DLL, wraps C calls in Python. Tested using the IDS XS 2.0 USB camera """ self.config = moduleConfig self.isOpen = False self.width = 0 self.height = 0 self.bitspixels = 0 self.py_width = 0 self.py_height = 0 self.py_bitspixel = 0 self.cam = None self.pcImgMem = c_char_p() #create placeholder for image memory self.pid=c_int() # Setup logger self.logger = ThreadsafeLogger(loggingQueue, __name__) # Load the correct dll try: if architecture()[0] == '64bit': self.logger.info('Using IDS camera with 64-bit architecture') self.uEyeDll = cdll.LoadLibrary("C:/Windows/System32/uEye_api_64.dll") else: self.logger.info('Using IDS camera with 32-bit architecture') self.uEyeDll = cdll.LoadLibrary("C:/Windows/System32/uEye_api.dll") except Exception as e: self.logger.error('Failed to load IDS DLL: %s . Are you sure you are using an IDS camera?'%e)
def __init__(self, queue, btleConfig, loggingQueue, debugMode=False): Thread.__init__(self) # Logger self.loggingQueue = loggingQueue self.logger = ThreadsafeLogger(loggingQueue, __name__) self.alive = True self.btleConfig = btleConfig self.queue = queue self.btleCollectionPoint = BtleThreadCollectionPoint(self.eventScanResponse,self.btleConfig,self.loggingQueue)
def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue): """ Initialize new CamCollectionPoint instance. Setup queues, variables, configs, predictionEngines, constants and loggers. """ super(CamCollectionPoint, self).__init__() if not self.check_opencv_version("3.", cv2): print( "OpenCV version {0} is not supported. Use 3.x for best results." .format(self.get_opencv_version())) # Queues self.outQueue = pOutBoundQueue #messages from this thread to the main process self.inQueue = pInBoundQueue self.loggingQueue = loggingQueue # Variables self.video = None self.needsReset = False self.needsResetMux = False self.alive = True # Configs self.moduleConfig = camConfigLoader.load( self.loggingQueue) #Get the config for this module self.config = baseConfig # Prediction engine self.imagePredictionEngine = AzureImagePrediction( moduleConfig=self.moduleConfig, loggingQueue=loggingQueue) # Constants self._useIdsCamera = self.moduleConfig['UseIdsCamera'] self._minFaceWidth = self.moduleConfig['MinFaceWidth'] self._minFaceHeight = self.moduleConfig['MinFaceHeight'] self._minNearestNeighbors = self.moduleConfig['MinNearestNeighbors'] self._maximumPeople = self.moduleConfig['MaximumPeople'] self._facePixelBuffer = self.moduleConfig['FacePixelBuffer'] self._collectionThreshold = self.moduleConfig['CollectionThreshold'] self._showVideoStream = self.moduleConfig['ShowVideoStream'] self._sendBlobs = self.moduleConfig['SendBlobs'] self._blobWidth = self.moduleConfig['BlobWidth'] self._blobHeight = self.moduleConfig['BlobHeight'] self._captureWidth = self.moduleConfig['CaptureWidth'] self._captureHeight = self.moduleConfig['CaptureHeight'] self._bitsPerPixel = self.moduleConfig['BitsPerPixel'] self._resetEventTimer = self.moduleConfig['ResetEventTimer'] self._collectionPointType = self.config['CollectionPointType'] self._collectionPointId = self.config['CollectionPointId'] # Logger self.logger = ThreadsafeLogger(loggingQueue, __name__)
def __init__(self, collectionPointConfig, pOutBoundQueue, registeredClientRegistry, loggingQueue): # Logger self.loggingQueue = loggingQueue self.logger = ThreadsafeLogger(loggingQueue, __name__) self.__stats_totalRemoveEvents = 0 self.__stats_totalNewEvents = 0 self.logger.debug("in constructor") self.registeredClientRegistry = registeredClientRegistry self.registeredClientRegistry.eventRegisteredClientAdded += self.__newClientRegistered self.registeredClientRegistry.eventRegisteredClientRemoved += self.__removedRegisteredClient self.collectionPointConfig = collectionPointConfig self.outBoundEventQueue = pOutBoundQueue
def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue): """ Initialize new TVCollectionPoint instance. Setup queues, variables, configs, constants and loggers. """ super(TVCollectionPoint, self).__init__() if not self.check_opencv_version("3.", cv2): print( "OpenCV version {0} is not supported. Use 3.x for best results." .format(self.get_opencv_version())) # Queues self.outQueue = pOutBoundQueue #messages from this thread to the main process self.inQueue = pInBoundQueue self.loggingQueue = loggingQueue # Variables self.video = None self.alive = True self.ix = -1 self.iy = -1 self.fx = -1 self.fy = -1 self.clicking = False self.boundSet = False self.x1, self.x2, self.y1, self.y2 = 0, 0, 0, 0 # Configs #self.moduleConfig = camConfigLoader.load(self.loggingQueue) #Get the config for this module self.config = baseConfig # Constants self._captureWidth = 1600 self._captureHeight = 900 self._numLEDs = 60 self._collectionPointId = "tvcam1" self._collectionPointType = "ambiLED" self._showVideoStream = True self._delimiter = ';' self._colorMode = 'edgeDominant' # self._colorMode = 'edgeMean' self._perimeterDepth = 20 self._topSegments = 3 self._sideSegments = 2 # Logger self.logger = ThreadsafeLogger(loggingQueue, __name__)
def __init__(self, clientEventHandler, btleConfig, loggingQueue, debugMode=False): # Logger self.loggingQueue = loggingQueue self.logger = ThreadsafeLogger(loggingQueue, __name__) self.btleConfig = btleConfig self.clientEventHandler = clientEventHandler self.debug = debugMode # define basic BGAPI parser self.bgapi_rx_buffer = [] self.bgapi_rx_expected_length = 0
def __init__(self, kind="KCF", moduleConfig=None, loggingQueue=None): """ Create an initialize new MultiTracker. Set up constants and parameters. """ self.config = moduleConfig self.trackers = [] # List of trackers self.kind = kind self.focus = None self.loggingQueue = loggingQueue # Constants self._useVelocity = self.config['UseVelocity'] self._closestThreshold = self.config["ClosestThreshold"] self._primaryTarget = self.config['PrimaryTarget'] # Setup logging queue self.logger = ThreadsafeLogger(loggingQueue, __name__)
def load(loggingQueue): """ Load module specific config into dictionary, return it""" logger = ThreadsafeLogger(loggingQueue, "CamCollectionPoint") thisConfig = {} configParser = configparser.ConfigParser() thisConfig = loadSecrets(thisConfig, logger, configParser) thisConfig = loadModule(thisConfig, logger, configParser) return thisConfig
def __init__(self, baseConfig, pInBoundEventQueue, pOutBoundEventQueue, loggingQueue): super(WebsocketClientModule, self).__init__() self.alive = True self.config = baseConfig self.inQueue = pInBoundEventQueue # inQueue are messages from the main process to websocket clients self.outQueue = pOutBoundEventQueue # outQueue are messages from clients to main process self.websocketClient = None self.loggingQueue = loggingQueue self.threadProcessQueue = None # Constants self._port = self.config['WebsocketPort'] self._host = self.config['WebsocketHost'] # logging setup self.logger = ThreadsafeLogger(loggingQueue, __name__)
def __init__(self,baseConfig,pOutBoundEventQueue, pInBoundEventQueue, loggingQueue): # Standard initialization that most collection points would do super(CollectionPoint, self).__init__() self.alive = True self.config = baseConfig self.outBoundEventQueue = pOutBoundEventQueue self.inBoundEventQueue = pInBoundEventQueue self.logger = ThreadsafeLogger(loggingQueue,__name__) # Initialize collection point specific variables self.video = None # Set constants from config self._collectionPointId = self.config['CollectionPointId'] self._collectionPointType = self.config['CollectionPointType'] self._testMode = self.config['TestMode'] if not self.check_opencv_version("3.",cv2): self.logger.critical("open CV is the wrong version {0}. We require version 3.x".format(self.get_opencv_version()))
def __init__(self, bbox, frame, kind, moduleConfig, loggingQueue): """ Create and initialize a new Tracker. Set up constants and parameters. """ if kind in ["KCF", "MIL", "MEDIANFLOW", "GOTURN", "TLD", "BOOSTING"]: self.tracker = cv2.Tracker_create(kind) self.tracker.init(frame, (bbox['x'], bbox['y'], bbox['w'], bbox['h'])) self.created = time() self.bbox = (bbox['x'], bbox['y'], bbox['w'], bbox['h']) self.velocity = (0, 0) self.updateTime = self.created self.config = moduleConfig # Constants self._useVelocity = self.config['UseVelocity'] self._horizontalVelocityBuffer = self.config[ 'HorizontalVelocityBuffer'] self._verticalVelocityBuffer = self.config[ 'VerticalVelocityBuffer'] # Setup logging queue self.logger = ThreadsafeLogger(loggingQueue, __name__) else: self.logger.error("Type %s not supported by mTracker" % kind)
class BtleCollectionPoint(Thread): def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue): """ Initialize new CamCollectionPoint instance. Setup queues, variables, configs, constants and loggers. """ super(BtleCollectionPoint, self).__init__() # Queues self.outQueue = pOutBoundQueue #messages from this thread to the main process self.inQueue = pInBoundQueue self.loggingQueue = loggingQueue self.queueBLE = mp.Queue() # Configs self.moduleConfig = configLoader.load( self.loggingQueue) #Get the config for this module self.config = baseConfig # Logger self.logger = ThreadsafeLogger(loggingQueue, __name__) # Variables self.registeredClientRegistry = RegisteredClientRegistry( self.moduleConfig, self.loggingQueue) self.eventManager = EventManager(self.moduleConfig, pOutBoundQueue, self.registeredClientRegistry, self.loggingQueue) self.alive = True self.btleThread = None self.BLEThread = None self.repeatTimerSweepClients = None # main start method def run(self): ###Pausing Startup to wait for things to start after a system restart self.logger.info( "Pausing execution 15 seconds waiting for other system services to start" ) time.sleep(15) self.logger.info( "Done with our nap. Time to start looking for clients") ######### setup global client registry start ######### # self.registeredClientRegistry = RegisteredClientRegistry(self.moduleConfig, self.loggingQueue) ######### setup global client registry end ######### self.logger.info('here 1') self.btleThread = BlueGigaBtleCollectionPointThread( self.queueBLE, self.moduleConfig, self.loggingQueue) self.BLEThread = Thread(target=self.btleThread.bleDetect, args=(__name__, 10)) self.BLEThread.daemon = True self.BLEThread.start() self.logger.info('here 2') #Setup repeat task to run the sweep every X interval self.repeatTimerSweepClients = RepeatedTimer( (self.moduleConfig['AbandonedClientCleanupIntervalInMilliseconds'] / 1000), self.registeredClientRegistry.sweepOldClients) # Process queue from main thread for shutdown messages self.threadProcessQueue = Thread(target=self.processQueue) self.threadProcessQueue.setDaemon(True) self.threadProcessQueue.start() self.logger.info('here 3') #read the queue while self.alive: if not self.queueBLE.empty(): self.logger.info( 'got a thing here herhehrehfhve!~ ~ ~@~@~!#~ ~ #~ #@@~ ~@# @~#' ) result = self.queueBLE.get(block=False, timeout=1) self.__handleBtleClientEvents(result) def processQueue(self): self.logger.info( "Starting to watch collection point inbound message queue") while self.alive: if not self.inQueue.empty(): self.logger.info("Queue size is %s" % self.inQueue.qsize()) try: message = self.inQueue.get(block=False, timeout=1) if message is not None: if message == "SHUTDOWN": self.logger.info("SHUTDOWN command handled on %s" % __name__) self.shutdown() else: self.sendOutMessage(message) except Exception as e: self.logger.error("Unable to read queue, error: %s " % e) self.shutdown() self.logger.info("Queue size is %s after" % self.inQueue.qsize()) else: time.sleep(.25) #handle btle reads def __handleBtleClientEvents(self, detectedClients): self.logger.debug("doing handleBtleClientEvents: %s" % detectedClients) for client in detectedClients: self.logger.debug("--- Found client ---") self.logger.debug(vars(client)) self.logger.debug("--- Found client end ---") self.eventManager.registerDetectedClient(client) def shutdown(self): self.logger.info("Shutting down") # self.threadProcessQueue.join() self.repeatTimerSweepClients.stop() self.btleThread.stop() self.alive = False time.sleep(1) self.exit = True
class WebsocketClientModule(Thread): def __init__(self, baseConfig, pInBoundEventQueue, pOutBoundEventQueue, loggingQueue): super(WebsocketClientModule, self).__init__() self.alive = True self.config = baseConfig self.inQueue = pInBoundEventQueue # inQueue are messages from the main process to websocket clients self.outQueue = pOutBoundEventQueue # outQueue are messages from clients to main process self.websocketClient = None self.loggingQueue = loggingQueue self.threadProcessQueue = None # Constants self._port = self.config['WebsocketPort'] self._host = self.config['WebsocketHost'] # logging setup self.logger = ThreadsafeLogger(loggingQueue, __name__) def run(self): """ Main thread entry point. Sets up websocket server and event callbacks. Starts thread to monitor inbound message queue. """ self.logger.info("Starting websocket %s" % __name__) self.connect() def listen(self): self.threadProcessQueue = Thread(target=self.processQueue) self.threadProcessQueue.setDaemon(True) self.threadProcessQueue.start() def connect(self): #websocket.enableTrace(True) ws = websocket.WebSocketApp("ws://%s:%s" % (self._host, self._port), on_message=self.onMessage, on_error=self.onError, on_close=self.onClose) ws.on_open = self.onOpen ws.run_forever() def onError(self, ws, message): self.logger.error("Error from websocket client: %s" % message) def onClose(self, ws): if self.alive: self.logger.warn("Closed") self.alive = False # TODO: reconnect timer else: self.logger.info("Closed") def onMessage(self, ws, message): self.logger.info("Message from websocket server: %s" % message) def onOpen(self, ws): self.alive = True self.websocketClient = ws self.listen() def shutdown(self): """ Handle shutdown message. Close and shutdown websocket server. Join queue processing thread. """ self.logger.info("Shutting down websocket server %s" % (multiprocessing.current_process().name)) try: self.logger.info("Closing websocket") self.websocketClient.close() except Exception as e: self.logger.error("Websocket close error : %s " % e) self.alive = False self.threadProcessQueue.join() time.sleep(1) self.exit = True def sendOutMessage(self, message): """ Send message to server """ self.websocketClient.send(json.dumps(message.__dict__)) def processQueue(self): """ Monitor queue of messages from main process to this thread. """ while self.alive: if (self.inQueue.empty() == False): try: message = self.inQueue.get(block=False, timeout=1) if message is not None: if message == "SHUTDOWN": self.logger.debug("SHUTDOWN handled") self.shutdown() else: self.sendOutMessage(message) except Exception as e: self.logger.error("Websocket unable to read queue : %s " % e) else: time.sleep(.25)
class AzureImagePrediction(AbstractImagePrediction): def __init__(self, moduleConfig=None, loggingQueue=None): """ Initialize new AzureImagePrediction instance. Set parameters required by Azure Face API. """ logging.basicConfig(level=logging.CRITICAL) self.logger = ThreadsafeLogger( loggingQueue, "AzureImagePrediction") # Setup logging queue self.config = moduleConfig # Constants self._subscriptionKey = self.config['Azure']['SubscriptionKey'] self._uriBase = self.config['Azure']['UriBase'] self._headers = { 'Content-Type': 'application/octet-stream', 'Ocp-Apim-Subscription-Key': self.config['Azure']['SubscriptionKey'], } self._params = urllib.parse.urlencode({ "returnFaceId": "true", "returnFaceLandmarks": "false", "returnFaceAttributes": "age,gender,glasses,facialHair" }) def getPrediction(self, imageBytes): """ Get prediction results from Azure Face API. Returns object with either a predictions array property or an error property. """ resultData = {} try: tempResult = self.__getPrediction(imageBytes) resultData['predictions'] = tempResult except Exception as e: self.logger.error('Error getting prediction: %s' % e) resultData['error'] = str(e) return resultData def __getPrediction(self, imageBytes): """ Execute REST API call and return result """ if len(self._subscriptionKey) < 10: raise EnvironmentError( 'Azure subscription key - %s - is not valid' % self._subscriptionKey) else: try: api_url = "https://%s/face/v1.0/detect?%s" % (self._uriBase, self._params) r = requests.post(api_url, headers=self._headers, data=imageBytes) if r.status_code != 200: raise ValueError( 'Request to Azure returned an error %s, the response is:\n%s' % (r.status_code, r.text)) jsonResult = r.json() self.logger.debug("Got azure data %s" % jsonResult) return jsonResult except Exception as e: self.logger.error(e)
class MultiTracker(object): def __init__(self, kind="KCF", moduleConfig=None, loggingQueue=None): """ Create an initialize new MultiTracker. Set up constants and parameters. """ self.config = moduleConfig self.trackers = [] # List of trackers self.kind = kind self.focus = None self.loggingQueue = loggingQueue # Constants self._useVelocity = self.config['UseVelocity'] self._closestThreshold = self.config["ClosestThreshold"] self._primaryTarget = self.config['PrimaryTarget'] # Setup logging queue self.logger = ThreadsafeLogger(loggingQueue, __name__) def add(self, bbox, frame, kind="KCF"): """ Add new tracker with default type KCF. """ aTracker = Tracker(bbox, frame, kind, self.config, self.loggingQueue) self.trackers.append(aTracker) def removeAt(self, i): """ Remove Tracker at index i. """ self.trackers.pop(i) def remove(self, aTracker): """ Remove tracker provided as parameter. """ self.trackers.remove(aTracker) def update(self, frame): """ Loop through each tracker updating bounding box, keep track of failures. """ bboxes = [] ind = 0 failed = [] for aTracker in self.trackers: ok, bbox = aTracker.update(frame) if not ok: failed.append(ind) else: bboxes.append(bbox) ind += 1 if len(failed) == 0: return True, bboxes, None else: self.logger.error('Failed to update all trackers') return False, bboxes, failed def clear(self): """ Remove all trackers. """ self.trackers.clear() self.focus = None def bboxContainsPt(self, bbox, pt, vBuffer): """ Check if bbox contains pt. Optionally provide velocity buffer to spread containing space. """ if ((bbox['x'] - vBuffer[0] <= pt[0] <= (bbox['x'] + bbox['w'] + vBuffer[0])) and (bbox['y'] - vBuffer[1] <= pt[1] <= (bbox['y'] + bbox['h'] + vBuffer[1]))): return True else: return False def projectedLocationMatches(self, tracker, bbox): """ Check if the velocity of the tracker could put it in the same spot as the bbox. """ if tracker.velocity: return self.bboxContainsPt(bbox, tracker.getProjectedLocation(time()), tracker.getVelocityBuffer()) else: return False def intersects(self, tracker, bbox): """ Check if the bbox and the trackers bounds intersect. """ if (tracker.right() < bbox['x'] or bbox['x'] + bbox['w'] < tracker.left() or tracker.top() < bbox['y'] or bbox['y'] + bbox['h'] < tracker.bottom()): return False # intersection is empty else: return True # intersection is not empty def contains(self, bbox): """ Check if the MultiTracker already has a tracker for the object detected. Uses intersections and projected locations to determine if the tracker overlaps others. This means objects that overlap when first detected will not _both_ be added to the MultiTracker. """ for aTracker in self.trackers: if self._useVelocity: if self.intersects(aTracker, bbox) and self.projectedLocationMatches( aTracker, bbox): return True elif self.intersects(aTracker, bbox): return True return False def length(self): """ Get number of Trackers in the MultiTracker. """ return len(self.trackers) def getFocus(self): """ Get focal object based on primaryTarget configuration. Currently only closest is supported - checks whether there is a tracker that is larger than the previous closest tracker by the configured threshold. """ if self._primaryTarget == "closest": focusChanged = False if self.focus: # area = self.focus.area() area = self.focus.area() else: area = None for aTracker in self.trackers: # If there's no focus or aTracker is larger than focus, and they aren't the same tracker if not self.focus or ( aTracker.area() > area * (1 + (self._closestThreshold / 100)) and self.focus.getCreated() != aTracker.getCreated()): focusChanged = True self.focus = aTracker area = aTracker.area() if focusChanged: return self.focus else: return None elif self._primaryTarget == "closest_engaged": #TODO self.logger.error('Primary Target %s is not implemented.' % self._primaryTarget) return None else: self.logger.error('Primary Target %s is not implemented.' % self._primaryTarget) return None def checkFocus(self): """ Check if focal Tracker has changed by updating the focus. """ focus = self.getFocus() if focus: return True, focus.bbox else: return False, None
class IdsWrapper(object): def __init__(self, loggingQueue, moduleConfig): """ Create a new IdsWrapper instance. IdsWrapper uses the Windows IDS DLL, wraps C calls in Python. Tested using the IDS XS 2.0 USB camera """ self.config = moduleConfig self.isOpen = False self.width = 0 self.height = 0 self.bitspixels = 0 self.py_width = 0 self.py_height = 0 self.py_bitspixel = 0 self.cam = None self.pcImgMem = c_char_p() #create placeholder for image memory self.pid=c_int() # Setup logger self.logger = ThreadsafeLogger(loggingQueue, __name__) # Load the correct dll try: if architecture()[0] == '64bit': self.logger.info('Using IDS camera with 64-bit architecture') self.uEyeDll = cdll.LoadLibrary("C:/Windows/System32/uEye_api_64.dll") else: self.logger.info('Using IDS camera with 32-bit architecture') self.uEyeDll = cdll.LoadLibrary("C:/Windows/System32/uEye_api.dll") except Exception as e: self.logger.error('Failed to load IDS DLL: %s . Are you sure you are using an IDS camera?'%e) def isOpened(self): """ Return camera open status""" return self.isOpen def set(self, key, value): """ Set the py_width, py_height or py_bitspixel properties """ if key == 'py_width': self.py_width = value elif key == 'py_height': self.py_height = value else: self.py_bitspixel = value def allocateImageMemory(self): """ Wrapped call to allocate image memory """ ret=self.uEyeDll.is_AllocImageMem(self.cam, self.width, self.height, self.bitspixel, byref(self.pcImgMem), byref(self.pid)) if ret == IS_SUCCESS: self.logger.info("Successfully allocated image memory") else: self.logger.error('Memory allocation failed, no camera with value ' + str(self.cam.value) + ' | Error code: ' + str(ret)) return def setImageMemory(self): """ Wrapped call to set image memory """ ret = self.uEyeDll.is_SetImageMem(self.cam, self.pcImgMem, self.pid) if ret == IS_SUCCESS: self.logger.info("Successfully set image memory") else: self.logger.error("Failed to set image memory; error code: " + str(ret)) return def beginCapture(self): """ Wrapped call to begin capture """ ret = self.uEyeDll.is_CaptureVideo (self.cam, c_long(IS_DONT_WAIT)) if ret == IS_SUCCESS: self.logger.info("Successfully began video capture") else: self.logger.error("Failed to begin video capture; error code: " + str(ret)) return def initImageData(self): """ Initialize the ImageData numpy array """ self.ImageData = np.ones((self.py_height,self.py_width),dtype=np.uint8) def setCTypes(self): """ Set C Types for width, height, and bitspixel properties""" self.width = c_int(self.py_width) self.height = c_int(self.py_height) self.bitspixel = c_int(self.py_bitspixel) def start(self): """ Start capturing frames on another thread as a daemon """ self.updateThread = Thread(target=self.update) self.updateThread.setDaemon(True) self.updateThread.start() return self def initializeCamera(self): """ Wrapped call to initialize camera """ ret = self.uEyeDll.is_InitCamera(byref(self.cam), self.hWnd) if ret == IS_SUCCESS: self.logger.info("Successfully initialized camera") else: self.logger.error("Failed to initialize camera; error code: " + str(ret)) return def enableAutoExit(self): """ Wrapped call to allow allocated memory to be dropped on exit. """ ret = self.uEyeDll.is_EnableAutoExit (self.cam, c_uint(1)) if ret == IS_SUCCESS: self.logger.info("Successfully enabled auto exit") else: self.logger.error("Failed to enable auto exit; error code: " + str(ret)) return def setDisplayMode(self): """ Wrapped call to set display mode to DIB """ ret = self.uEyeDll.is_SetDisplayMode (self.cam, c_int(IS_SET_DM_DIB)) if ret == IS_SUCCESS: self.logger.info("Successfully set camera to DIB mode") else: self.logger.error("Failed to set camera mode; error code: " + str(ret)) return def setColorMode(self): """ Wrapped call to set camera color capture mode """ ret = self.uEyeDll.is_SetColorMode(self.cam, c_int(IS_CM_SENSOR_RAW8)) if ret == IS_SUCCESS: self.logger.info("Successfully set color mode") else: self.logger.error("Failed to set color mode; error code: " + str(ret)) return def setCompressionFactor(self): """ Wrapped call to set image compression factor. Required for long USB lengths when bandwidth is constrained, lowers quality. """ ret = self.uEyeDll.is_DeviceFeature(self.cam, IS_DEVICE_FEATURE_CMD_SET_JPEG_COMPRESSION, byref(c_int(self.config['CompressionFactor'])), c_uint(INT_BYTE_SIZE)); if ret == IS_SUCCESS: self.logger.info("Successfully set compression factor to: " + str(self.config['CompressionFactor'])) else: self.logger.error("Failed to set compression factor; error code: " + str(ret)) return def setPixelClock(self): """ Wrapped call to set pixel clock. Required for long USB lengths when bandwidth is constrained Lowers frame rate and increases motion blur. """ ret = self.uEyeDll.is_PixelClock(self.cam, IS_PIXELCLOCK_CMD_SET, byref(c_uint(self.config['PixelClock'])), c_uint(INT_BYTE_SIZE)); if ret == IS_SUCCESS: self.logger.info("Successfully set pixel clock to: " + str(self.config['PixelClock'])) else: self.logger.error("Failed to set pixel clock; error code: " + str(ret)) return def setTrigger(self): """ Wrapped call to set trigger type to software trigger. """ ret = self.uEyeDll.is_SetExternalTrigger(self.cam, c_uint(IS_SET_TRIGGER_SOFTWARE)) if ret == IS_SUCCESS: self.logger.info("Successfully set software trigger") else: self.logger.error("Failed to set software trigger; error code: " + str(ret)) return def setImageProfile(self): """ Wrapped call to set image format. Sets resolution of the capture to UXGA. More modes available in idsConsts.py. """ ret = self.uEyeDll.is_ImageFormat(self.cam, c_uint(IMGFRMT_CMD_SET_FORMAT), byref(c_int(UXGA)), c_uint(INT_BYTE_SIZE)) if ret == IS_SUCCESS: self.logger.info("Successfully set camera image profile") else: self.logger.error("Failed to set camera image profile; error code: " + str(ret)) return def open(self): """ Open connection to IDS camera, set various modes. """ self.cam = c_uint32(0) self.hWnd = c_voidp() self.initializeCamera() self.enableAutoExit() self.setDisplayMode() self.setColorMode() self.setCompressionFactor() self.setPixelClock() self.setTrigger() self.setImageProfile() # Declare video open self.isOpen = True self.logger.info('Successfully opened camera') def update(self): """ Loop to update frames and copy to ImageData variable. """ while True: if not self.isOpen: return self.uEyeDll.is_CopyImageMem (self.cam, self.pcImgMem, self.pid, self.ImageData.ctypes.data_as(c_char_p)) def read(self): """ Read frame currently available in ImageData variable. """ try: return True, self.ImageData except Exception as e: self.logger.error('Error getting image data: %r'% e) return False, None def exit(self): """ Close camera down, release memory. """ self.uEyeDll.is_ExitCamera(self.cam) self.isOpen = False self.logger.info('Closing wrapper and camera') return
class BlueGigaBtleCollectionPointThread(Thread): def __init__(self, queue, btleConfig, loggingQueue, debugMode=False): Thread.__init__(self) # Logger self.loggingQueue = loggingQueue self.logger = ThreadsafeLogger(loggingQueue, __name__) self.alive = True self.btleConfig = btleConfig self.queue = queue self.btleCollectionPoint = BtleThreadCollectionPoint(self.eventScanResponse,self.btleConfig,self.loggingQueue) def bleDetect(self,__name__,repeatcount=10): try: self.btleCollectionPoint.start() except Exception as e: self.logger.error("[btleThread] Unable to connect to BTLE device: %s"%e) self.sendFailureNotice("Unable to connect to BTLE device") quit() while self.alive: try: self.btleCollectionPoint.scan() except Exception as e: self.logger.error("[btleThread] Unable to scan BTLE device: %s"%e) self.sendFailureNotice("Unable to connect to BTLE device to perform a scan") quit() # don't burden the CPU time.sleep(0.01) # handler to print scan responses with a timestamp def eventScanResponse(self,sender,args): #check to make sure there is enough data to be a beacon if len(args["data"]) > 15: # self.logger.debug("=============================== eventScanResponse START ===============================") try: majorNumber = args["data"][26] | (args["data"][25] << 8) # self.logger.debug("majorNumber=%i"%majorNumber) except: majorNumber = 0 try: minorNumber = args["data"][28] | (args["data"][27] << 8) # self.logger.debug("minorNumber=%i"%minorNumber) except: minorNumber = 0 if self.btleConfig['BtleAdvertisingMajor'] == majorNumber and self.btleConfig['BtleAdvertisingMinor'] == minorNumber: self.logger.debug("self.btleConfig['BtleAdvertisingMinor'] == %i and self.btleConfig['BtleAdvertisingMinor'] == %i "%(majorNumber,minorNumber)) self.logger.debug("yep, we care about this major and minor so lets create a detected client and pass it to the event manager") udid = "%s" % ''.join(['%02X' % b for b in args["data"][9:25]]) self.logger.debug("UDID=%s"%udid) rssi = args["rssi"] self.logger.debug("rssi=%s"%rssi) beaconMac = "%s" % ''.join(['%02X' % b for b in args["sender"][::-1]]) self.logger.debug("beaconMac=%s"%beaconMac) rawTxPower = args["data"][29] self.logger.debug("rawTxPower=%i"%rawTxPower) if rawTxPower <= 127: txPower = rawTxPower else: txPower = rawTxPower - 256 self.logger.debug("txPower=%i"%txPower) arrayDetectedClients = [] #we send an array to the event queue, we used to process bacthes of responses #package it up for sending to the queue detectedClient = DetectedClient('btle',udid=udid,beaconMac=beaconMac,majorNumber=majorNumber,minorNumber=minorNumber,tx=txPower,rssi=rssi) arrayDetectedClients.append(detectedClient) #put it on the queue for the event manager to pick up self.queue.put(arrayDetectedClients) self.logger.debug("================================= eventScanResponse END =================================") def stop(self): self.alive = False def sendFailureNotice(self,msg): if len(self.btleConfig['SlackChannelWebhookUrl']) > 10: myMsg = 'Help I have fallen and can not get back up! \n %s. \nSent from %s'%(msg,platform.node()) payload = {'text': myMsg} r = requests.post(self.btleConfig['SlackChannelWebhookUrl'], data = json.dumps(payload))
class EventManager(object): def __init__(self, collectionPointConfig, pOutBoundQueue, registeredClientRegistry, loggingQueue): # Logger self.loggingQueue = loggingQueue self.logger = ThreadsafeLogger(loggingQueue, __name__) self.__stats_totalRemoveEvents = 0 self.__stats_totalNewEvents = 0 self.logger.debug("in constructor") self.registeredClientRegistry = registeredClientRegistry self.registeredClientRegistry.eventRegisteredClientAdded += self.__newClientRegistered self.registeredClientRegistry.eventRegisteredClientRemoved += self.__removedRegisteredClient self.collectionPointConfig = collectionPointConfig self.outBoundEventQueue = pOutBoundQueue def registerDetectedClient(self, detectedClient): self.logger.debug("Registering detected client %s" % detectedClient.extraData["beaconMac"]) eClient = self.registeredClientRegistry.getRegisteredClient( detectedClient.extraData["beaconMac"]) #check for existing if eClient == None: #Newly found client if self.collectionPointConfig['InterfaceType'] == 'btle': rClient = BtleRegisteredClient(detectedClient, self.collectionPointConfig, self.loggingQueue) self.logger.debug( "client %s not found in the existing clients. NEW CLIENT! " % detectedClient.extraData["beaconMac"]) if rClient.shouldSendClientInEvent(): self.__sendEventToController(rClient, "clientIn") elif rClient.shouldSendClientOutEvent(): self.logger.debug( "########################################## SENDING CLIENT OUT eClient ##########################################" ) self.__sendEventToController(rClient, "clientOut") self.registeredClientRegistry.addNewRegisteredClient(rClient) else: eClient.updateWithNewDectedClientData(detectedClient) if eClient.shouldSendClientInEvent(): #self.logger.debug("########################################## SENDING CLIENT IN ##########################################") self.__sendEventToController(eClient, "clientIn") elif eClient.shouldSendClientOutEvent(): self.logger.debug( "########################################## SENDING CLIENT OUT rClient ##########################################" ) self.__sendEventToController(eClient, "clientOut") self.registeredClientRegistry.updateRegisteredClient(eClient) def registerClients(self, detectedClients): for detectedClient in detectedClients: self.registerDetectedClient(detectedClient) def getEventAuditData(self): """Returns a dict with the total New and Remove events the engine has seen since startup""" return { 'NewEvents': self.__stats_totalNewEvents, 'RemoveEvents': self.__stats_totalRemoveEvents } def __newClientRegistered(self, sender, registeredClient): self.logger.debug( "######### NEW CLIENT REGISTERED %s #########" % registeredClient.detectedClient.extraData["beaconMac"]) #we dont need to count for ever and eat up all the memory if self.__stats_totalNewEvents > 1000000: self.__stats_totalNewEvents = 0 else: self.__stats_totalNewEvents += 1 def __removedRegisteredClient(self, sender, registeredClient): self.logger.debug( "######### REGISTERED REMOVED %s #########" % registeredClient.detectedClient.extraData["beaconMac"]) if registeredClient.sweepShouldSendClientOutEvent(): self.__sendEventToController(registeredClient, "clientOut") #we dont need to count for ever and eat up all the memory if self.__stats_totalRemoveEvents > 1000000: self.__stats_totalRemoveEvents = 0 else: self.__stats_totalRemoveEvents += 1 def __sendEventToController(self, registeredClient, eventType): eventMessage = CollectionPointEvent( self.collectionPointConfig['collectionPointId'], registeredClient.lastRegisteredTime, registeredClient.detectedClient.extraData["beaconMac"], self.collectionPointConfig['gatewayType'], eventType, registeredClient.getExtenedDataForEvent()) if eventType == 'clientIn': registeredClient.setClientInMessageSentToController() elif eventType == 'clientOut': registeredClient.setClientOutMessageSentToController() #update reg self.registeredClientRegistry.updateRegisteredClient(registeredClient) self.outBoundEventQueue.put(eventMessage)
class TVCollectionPoint(Thread): def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue): """ Initialize new TVCollectionPoint instance. Setup queues, variables, configs, constants and loggers. """ super(TVCollectionPoint, self).__init__() if not self.check_opencv_version("3.", cv2): print( "OpenCV version {0} is not supported. Use 3.x for best results." .format(self.get_opencv_version())) # Queues self.outQueue = pOutBoundQueue #messages from this thread to the main process self.inQueue = pInBoundQueue self.loggingQueue = loggingQueue # Variables self.video = None self.alive = True self.ix = -1 self.iy = -1 self.fx = -1 self.fy = -1 self.clicking = False self.boundSet = False self.x1, self.x2, self.y1, self.y2 = 0, 0, 0, 0 # Configs #self.moduleConfig = camConfigLoader.load(self.loggingQueue) #Get the config for this module self.config = baseConfig # Constants self._captureWidth = 1600 self._captureHeight = 900 self._numLEDs = 60 self._collectionPointId = "tvcam1" self._collectionPointType = "ambiLED" self._showVideoStream = True self._delimiter = ';' self._colorMode = 'edgeDominant' # self._colorMode = 'edgeMean' self._perimeterDepth = 20 self._topSegments = 3 self._sideSegments = 2 # Logger self.logger = ThreadsafeLogger(loggingQueue, __name__) def run(self): """ Main thread method, run when the thread's start() function is called. Controls flow of detected faces and the MultiTracker. Sends color data in string format, like "#fffff;#f1f1f1;..." """ # Monitor inbound queue on own thread self.threadProcessQueue = Thread(target=self.processQueue) self.threadProcessQueue.setDaemon(True) self.threadProcessQueue.start() self.initializeCamera() # Setup timer for FPS calculations start = time.time() frameCounter = 1 fps = 0 # Start timer for collection events self.collectionStart = time.time() ok, frame = self.video.read() if not ok: self.logger.error('Cannot read video file') self.shutdown() else: framecopy = frame.copy() cont = True while cont or not self.boundSet: cv2.imshow('Set ROI', framecopy) cv2.setMouseCallback('Set ROI', self.getROI, frame) k = cv2.waitKey(0) if k == 32 and self.boundSet: # on space, user wants to finalize bounds, only allow them to exit if bounds set cont = False # elif k != 27: # any other key clears rectangles # framecopy = frame.copy() #ok, frame = self.video.read() # cv2.imshow('Set ROI', framecopy) # cv2.setMouseCallback('Set ROI', self.getROI, framecopy) cv2.destroyWindow('Set ROI') self.initKMeans() # Set up for all modes top_length_pixels = self.fx - self.ix side_length_pixels = self.fy - self.iy perimeter_length_pixels = top_length_pixels * 2 + side_length_pixels * 2 # mode specific setup if self._colorMode == 'dominant': pass if self._colorMode == 'edgeDominant' or self._colorMode == 'edgeMean': perimeter_depth = 0 if self._perimeterDepth < side_length_pixels / 2 and self._perimeterDepth < top_length_pixels / 2: perimeter_depth = self._perimeterDepth else: perimeter_depth = min(side_length_pixels / 2, top_length_pixels / 2) while self.alive: ok, ogframe = self.video.read() if not ok: self.logger.error('Error while reading frame') break frame = ogframe.copy() # Dominant color if self._colorMode == 'dominant': data = self.getDominantColor( cv2.resize(frame[:, :, :], (0, 0), fx=0.4, fy=0.4), self.ix, self.fx, self.iy, self.fy) #self.putCPMessage(data, 'light-dominant') #print('data: ',data) elif self._colorMode == 'edgeMean': data = self.getEdgeMeanColors(frame, top_length_pixels, side_length_pixels, perimeter_length_pixels, perimeter_depth) print('data: ', data) elif self._colorMode == 'edgeDominant': # this is the most promising colorData = self.getEdgeDominantColors( frame, top_length_pixels, side_length_pixels, perimeter_length_pixels, perimeter_depth) # assuming LEDs are evenly distributed, find number for each edge of ROI top_num_leds = self._numLEDs * (top_length_pixels / perimeter_length_pixels) side_num_leds = self._numLEDs * (side_length_pixels / perimeter_length_pixels) data = self.getColorString(colorData, top_num_leds, side_num_leds) self.putCPMessage(data, 'light-edges') # print('data: ', data) if self._showVideoStream: cv2.rectangle(frame, (self.ix, self.iy), (self.fx, self.fy), (255, 0, 0), 1) cv2.imshow("output", frame) cv2.waitKey(1) def getMeanColor(self, frame): color = [frame[:, :, i].mean() for i in range(frame.shape[-1])] return color def initKMeans(self): # kmeans vars self.n_colors = 5 self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) self.flags = cv2.KMEANS_RANDOM_CENTERS def getColorString(self, colorData, top_num_leds, side_num_leds): toReturn = '' for key in colorData: if key == 'top' or key == 'bottom': for i in range(len(colorData[key])): toReturn += (colorData[key][i] + self._delimiter) * int( top_num_leds / self._topSegments) if key == 'right' or key == 'left': for i in range(len(colorData[key])): toReturn += (colorData[key][i] + self._delimiter) * int( side_num_leds / self._sideSegments) return toReturn def getDominantSegmentColor(self, segment): average_color = [ segment[:, :, i].mean() for i in range(segment.shape[-1]) ] arr = np.float32(segment) pixels = arr.reshape((-1, 3)) # kmeans clustering _, labels, centroids = cv2.kmeans(pixels, self.n_colors, None, self.criteria, 10, self.flags) palette = np.uint8(centroids) quantized = palette[labels.flatten()] quantized = quantized.reshape(segment.shape) dominant_color = palette[np.argmax(itemfreq(labels)[:, -1])] return dominant_color def getEdgeMeanColors(self, frame, top_length_pixels, side_length_pixels, perimeter_length_pixels, perimeter_depth): # assuming LEDs are evenly distributed, find number for each edge of ROI top_num_leds = self._numLEDs * (top_length_pixels / perimeter_length_pixels) side_num_leds = self._numLEDs * (side_length_pixels / perimeter_length_pixels) top_segment_length = top_length_pixels / self._topSegments side_segment_length = side_length_pixels / self._sideSegments for i in range(0, self._topSegments): ix = int(self.ix + i * top_segment_length) fx = int(self.ix + (i + 1) * top_segment_length) iy = int(self.iy) fy = int(self.iy + perimeter_depth) c = self.getMeanColor( cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2)) data['top'][i] = self.getRGBHexString(c) if self._showVideoStream: cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 0, 255), 1) cv2.rectangle(frame, (ix, iy - (10 + perimeter_depth)), (fx, fy - perimeter_depth), (int(c[0]), int(c[1]), int(c[2])), 10) for i in range(0, self._sideSegments): ix = int(self.fx - perimeter_depth) fx = int(self.fx) iy = int(self.iy + i * side_segment_length) fy = int(self.iy + (i + 1) * side_segment_length) c = self.getMeanColor( cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2)) data['right'][i] = self.getRGBHexString(c) if self._showVideoStream: cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 255, 0), 1) cv2.rectangle(frame, (ix + perimeter_depth, iy), (fx + (10 + perimeter_depth), fy), (int(c[0]), int(c[1]), int(c[2])), 10) for i in range(0, self._topSegments): ix = int(self.fx - (i + 1) * top_segment_length) fx = int(self.fx - i * top_segment_length) iy = int(self.fy - perimeter_depth) fy = int(self.fy) c = self.getMeanColor( cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2)) data['bottom'][i] = self.getRGBHexString(c) if self._showVideoStream: cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 0, 255), 1) cv2.rectangle(frame, (ix, iy + perimeter_depth), (fx, fy + (10 + perimeter_depth)), (int(c[0]), int(c[1]), int(c[2])), 10) for i in range(0, self._sideSegments): ix = int(self.ix) fx = int(self.ix + perimeter_depth) iy = int(self.fy - (i + 1) * side_segment_length) fy = int(self.fy - i * side_segment_length) c = self.getMeanColor( cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2)) data['left'][i] = self.getRGBHexString(c) if self._showVideoStream: cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 255, 0), 1) cv2.rectangle(frame, (ix - (10 + perimeter_depth), iy), (fx - perimeter_depth, fy), (int(c[0]), int(c[1]), int(c[2])), 10) return data def getEdgeDominantColors(self, frame, top_length_pixels, side_length_pixels, perimeter_length_pixels, perimeter_depth): top_segment_length = top_length_pixels / self._topSegments side_segment_length = side_length_pixels / self._sideSegments data = {} data['top'] = [None] * self._topSegments data['right'] = [None] * self._sideSegments data['bottom'] = [None] * self._topSegments data['left'] = [None] * self._sideSegments for i in range(0, self._topSegments): ix = int(self.ix + i * top_segment_length) fx = int(self.ix + (i + 1) * top_segment_length) iy = int(self.iy) fy = int(self.iy + perimeter_depth) c = self.getDominantSegmentColor( cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2)) data['top'][i] = self.getRGBHexString(c) if self._showVideoStream: cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 0, 255), 1) cv2.rectangle(frame, (ix, iy - (10 + perimeter_depth)), (fx, fy - perimeter_depth), (int(c[0]), int(c[1]), int(c[2])), 10) for i in range(0, self._sideSegments): ix = int(self.fx - perimeter_depth) fx = int(self.fx) iy = int(self.iy + i * side_segment_length) fy = int(self.iy + (i + 1) * side_segment_length) c = self.getDominantSegmentColor( cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2)) data['right'][i] = self.getRGBHexString(c) if self._showVideoStream: cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 255, 0), 1) cv2.rectangle(frame, (ix + perimeter_depth, iy), (fx + (10 + perimeter_depth), fy), (int(c[0]), int(c[1]), int(c[2])), 10) for i in range(0, self._topSegments): ix = int(self.fx - (i + 1) * top_segment_length) fx = int(self.fx - i * top_segment_length) iy = int(self.fy - perimeter_depth) fy = int(self.fy) c = self.getDominantSegmentColor( cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2)) data['bottom'][i] = self.getRGBHexString(c) if self._showVideoStream: cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 0, 255), 1) cv2.rectangle(frame, (ix, iy + perimeter_depth), (fx, fy + (10 + perimeter_depth)), (int(c[0]), int(c[1]), int(c[2])), 10) for i in range(0, self._sideSegments): ix = int(self.ix) fx = int(self.ix + perimeter_depth) iy = int(self.fy - (i + 1) * side_segment_length) fy = int(self.fy - i * side_segment_length) c = self.getDominantSegmentColor( cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2)) data['left'][i] = self.getRGBHexString(c) if self._showVideoStream: cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 255, 0), 1) cv2.rectangle(frame, (ix - (10 + perimeter_depth), iy), (fx - perimeter_depth, fy), (int(c[0]), int(c[1]), int(c[2])), 10) return data def getRGBHexString(self, bgr): return "%x%x%x" % (bgr[2], bgr[1], bgr[0]) def getDominantColor(self, img, ix, fx, iy, fy): ix = int(ix) fx = int(fx) iy = int(iy) fy = int(fy) average_color = [ img[iy:fy, ix:fx, i].mean() for i in range(img.shape[-1]) ] arr = np.float32(img) pixels = arr.reshape((-1, 3)) # kmeans clustering _, labels, centroids = cv2.kmeans(pixels, self.n_colors, None, self.criteria, 10, self.flags) palette = np.uint8(centroids) quantized = palette[labels.flatten()] quantized = quantized.reshape(img.shape) dominant_color = palette[np.argmax(itemfreq(labels)[:, -1])] return dominant_color def initializeCamera(self): # open first webcam available self.video = cv2.VideoCapture(0) if not self.video.isOpened(): self.video.open() #set the resolution from config self.video.set(cv2.CAP_PROP_FRAME_WIDTH, self._captureWidth) self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, self._captureHeight) def getROI(self, event, x, y, flags, frame): framecopy = frame.copy() if event == cv2.EVENT_LBUTTONDOWN: self.clicking = True self.ix, self.iy = x, y elif event == cv2.EVENT_MOUSEMOVE: if self.clicking: cv2.rectangle(framecopy, (self.ix, self.iy), (x, y), (0, 255, 0), -1) cv2.imshow('Set ROI', framecopy) elif event == cv2.EVENT_LBUTTONUP: self.clicking = False cv2.rectangle(framecopy, (self.ix, self.iy), (x, y), (0, 255, 0), -1) cv2.imshow('Set ROI', framecopy) self.fx, self.fy = x, y self.boundSet = True def processQueue(self): self.logger.info( "Starting to watch collection point inbound message queue") while self.alive: if (self.inQueue.empty() == False): self.logger.info("Queue size is %s" % self.inQueue.qsize()) try: message = self.inQueue.get(block=False, timeout=1) if message is not None: if message == "SHUTDOWN": self.logger.info("SHUTDOWN command handled on %s" % __name__) self.shutdown() else: self.handleMessage(message) except Exception as e: self.logger.error("Unable to read queue, error: %s " % e) self.shutdown() self.logger.info("Queue size is %s after" % self.inQueue.qsize()) else: time.sleep(.25) def handleMessage(self, message): self.logger.info("handleMessage not implemented!") def putCPMessage(self, data, type): if type == "off": # Send off message self.logger.info('Sending off message') msg = CollectionPointEvent(self._collectionPointId, self._collectionPointType, 'off', None) self.outQueue.put(msg) elif type == "light-edges": # Reset collection start and now needs needs reset collectionStart = time.time() self.logger.info('Sending light message') msg = CollectionPointEvent(self._collectionPointId, self._collectionPointType, 'light-edges', data) self.outQueue.put(msg) elif type == "light-dominant": # Reset collection start and now needs needs reset collectionStart = time.time() self.logger.info('Sending light message') msg = CollectionPointEvent(self._collectionPointId, self._collectionPointType, 'light-dominant', data) self.outQueue.put(msg) def shutdown(self): self.alive = False self.logger.info("Shutting down") # self.putCPMessage(None, 'off') cv2.destroyAllWindows() time.sleep(1) self.exit = True def get_opencv_version(self): import cv2 as lib return lib.__version__ def check_opencv_version(self, major, lib=None): # if the supplied library is None, import OpenCV if lib is None: import cv2 as lib # return whether or not the current OpenCV version matches the # major version number return lib.__version__.startswith(major)
class BtleThreadCollectionPoint(object): def __init__(self, clientEventHandler, btleConfig, loggingQueue, debugMode=False): # Logger self.loggingQueue = loggingQueue self.logger = ThreadsafeLogger(loggingQueue, __name__) self.btleConfig = btleConfig self.clientEventHandler = clientEventHandler self.debug = debugMode # define basic BGAPI parser self.bgapi_rx_buffer = [] self.bgapi_rx_expected_length = 0 def start(self): packet_mode = False # create BGLib object self.ble = BGLib() self.ble.packet_mode = packet_mode self.ble.debug = self.debug # add handler for BGAPI timeout condition (hopefully won't happen) self.ble.on_timeout += self.my_timeout # on busy hander self.ble.on_busy = self.on_busy # add handler for the gap_scan_response event self.ble.ble_evt_gap_scan_response += self.clientEventHandler # create serial port object and flush buffers self.logger.info( "Establishing serial connection to BLED112 on com port %s at baud rate %s" % (self.btleConfig['BtleDeviceId'], self.btleConfig['BtleDeviceBaudRate'])) self.serial = Serial(port=self.btleConfig['BtleDeviceId'], baudrate=self.btleConfig['BtleDeviceBaudRate'], timeout=1) self.serial.flushInput() self.serial.flushOutput() # disconnect if we are connected already self.ble.send_command(self.serial, self.ble.ble_cmd_connection_disconnect(0)) self.ble.check_activity(self.serial, 1) # stop advertising if we are advertising already self.ble.send_command(self.serial, self.ble.ble_cmd_gap_set_mode(0, 0)) self.ble.check_activity(self.serial, 1) # stop scanning if we are scanning already self.ble.send_command(self.serial, self.ble.ble_cmd_gap_end_procedure()) self.ble.check_activity(self.serial, 1) # set the TX # range 0 to 15 (real TX power from -23 to +3dBm) #self.ble.send_command(self.serial, self.ble.ble_cmd_hardware_set_txpower(self.btleConfig['btleDeviceTxPower'])) #self.ble.check_activity(self.serial,1) #ble_cmd_connection_update connection: 0 (0x00) interval_min: 30 (0x001e) interval_max: 46 (0x002e) latency: 0 (0x0000) timeout: 100 (0x0064) #interval_min 6-3200 #interval_man 6-3200 #latency 0-500 #timeout 10-3200 self.ble.send_command( self.serial, self.ble.ble_cmd_connection_update(0x00, 0x001e, 0x002e, 0x0000, 0x0064)) self.ble.check_activity(self.serial, 1) # set scan parameters #scan_interval 0x4 - 0x4000 #Scan interval defines the interval when scanning is re-started in units of 625us # Range: 0x4 - 0x4000 # Default: 0x4B (75ms) # After every scan interval the scanner will change the frequency it operates at # at it will cycle through all the three advertisements channels in a round robin # fashion. According to the Bluetooth specification all three channels must be # used by a scanner. # #scan_window 0x4 - 0x4000 # Scan Window defines how long time the scanner will listen on a certain # frequency and try to pick up advertisement packets. Scan window is defined # as units of 625us # Range: 0x4 - 0x4000 # Default: 0x32 (50 ms) # Scan windows must be equal or smaller than scan interval # If scan window is equal to the scan interval value, then the Bluetooth module # will be scanning at a 100% duty cycle. # If scan window is half of the scan interval value, then the Bluetooth module # will be scanning at a 50% duty cycle. # #active 1=active 0=passive # 1: Active scanning is used. When an advertisement packet is received the # Bluetooth stack will send a scan request packet to the advertiser to try and # read the scan response data. # 0: Passive scanning is used. No scan request is made. #self.ble.send_command(self.serial, self.ble.ble_cmd_gap_set_scan_parameters(0x4B,0x32,1)) self.ble.send_command( self.serial, self.ble.ble_cmd_gap_set_scan_parameters(0xC8, 0xC8, 0)) self.ble.check_activity(self.serial, 1) # start scanning now self.ble.send_command(self.serial, self.ble.ble_cmd_gap_discover(1)) self.ble.check_activity(self.serial, 1) # handler to notify of an API parser timeout condition def my_timeout(self, sender, args): self.logger.error( "BGAPI timed out. Make sure the BLE device is in a known/idle state." ) # might want to try the following lines to reset, though it probably # wouldn't work at this point if it's already timed out: self.ble.send_command(self.serial, self.ble.ble_cmd_system_reset(0)) self.ble.check_activity(self.serial, 1) self.ble.send_command(self.serial, self.ble.ble_cmd_gap_discover(1)) self.ble.check_activity(self.serial, 1) def on_busy(self, sender, args): self.logger.warn("BGAPI device is busy.") def scan(self): # check for all incoming data (no timeout, non-blocking) self.ble.check_activity(self.serial)
class CollectionPoint(Thread): """ Sample class to show basic structure of collecting data and passing it to communication channels """ def __init__(self,baseConfig,pOutBoundEventQueue, pInBoundEventQueue, loggingQueue): # Standard initialization that most collection points would do super(CollectionPoint, self).__init__() self.alive = True self.config = baseConfig self.outBoundEventQueue = pOutBoundEventQueue self.inBoundEventQueue = pInBoundEventQueue self.logger = ThreadsafeLogger(loggingQueue,__name__) # Initialize collection point specific variables self.video = None # Set constants from config self._collectionPointId = self.config['CollectionPointId'] self._collectionPointType = self.config['CollectionPointType'] self._testMode = self.config['TestMode'] if not self.check_opencv_version("3.",cv2): self.logger.critical("open CV is the wrong version {0}. We require version 3.x".format(self.get_opencv_version())) def run(self): """ Sample run function for a collection point class. Starting point for when the thread is start()'d from main.py Extend this to create your own, or understand how to perform specific actions. """ # Start a thread to monitor the inbound queue self.threadProcessQueue = Thread(target=self.processQueue) self.threadProcessQueue.setDaemon(True) self.threadProcessQueue.start() # Load the OpenCV classifier to detect faces faceCascade = cv2.CascadeClassifier('./classifiers/haarcascades/haarcascade_frontalface_default.xml') tracker = cv2.Tracker_create("KCF") # Get first camera connected video = cv2.VideoCapture(0) if not video.isOpened(): video.open() # Set resolution of capture video.set(cv2.CAP_PROP_FRAME_WIDTH, 1080) video.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) ok, frame = video.read() if not ok: self.logger.error('Cannot read video file') self.shutdown() while self.alive: # Read a new frame ok, frame = video.read() if not ok: self.logger.error('Cannot read video file') break # Convert to grayscale grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Copy the frame to allow manipulation outputImage = frame.copy() # Detect faces faces = faceCascade.detectMultiScale( grayFrame, scaleFactor=1.1, minNeighbors=5, minSize=(5, 5) ) self.logger.info("Found " + str(len(faces)) + " faces") # Draw a rectangle around each face for (x, y, w, h) in faces: cv2.rectangle(outputImage, (x, y), (x+w, y+h), (0, 255, 0), 2) if self._testMode: if len(faces) > 0: msg = CollectionPointEvent(self._collectionPointId,self._collectionPointType,('Found {0} faces'.format(len(faces)))) self.outBoundEventQueue.put(msg) # Display the image cv2.imshow("Faces found", outputImage) ch = 0xFF & cv2.waitKey(1) if ch == 27: #esc key self.shutdown() break video.release() def shutdown(self): self.logger.info("Shutting down collection point") cv2.destroyAllWindows() self.threadProcessQueue.join() self.alive = False time.sleep(1) self.exit = True def get_opencv_version(self): import cv2 as lib return lib.__version__ def check_opencv_version(self,major, lib=None): # if the supplied library is None, import OpenCV if lib is None: import cv2 as lib # return whether or not the current OpenCV version matches the # major version number return lib.__version__.startswith(major)
class MQTTClientModule(Thread): """ Threaded MQTT client for processing and publishing outbound messages""" def __init__(self, baseConfig, pInBoundEventQueue, pOutBoundEventQueue, loggingQueue): super(MQTTClientModule, self).__init__() self.config = baseConfig self.alive = True self.inQueue = pInBoundEventQueue # Constants self._keepAlive = self.config['MqttKeepAlive'] self._feedName = self.config['MqttFeedName'] self._username = self.config['MqttUsername'] self._key = self.config['MqttKey'] self._host = self.config['MqttHost'] self._port = self.config['MqttPort'] self._publishJson = self.config['MqttPublishJson'] self._publishFaceValues = self.config['MqttPublishFaceValues'] # MQTT setup self._client = mqtt.Client() self._client.username_pw_set(self._username, self._key) self._client.on_connect = self.onConnect self._client.on_disconnect = self.onDisconnect self._client.on_message = self.onMessage self.mqttConnected = False # Logging setup self.logger = ThreadsafeLogger(loggingQueue, "MQTT") def onConnect(self, client, userdata, flags, rc): self.logger.debug('MQTT onConnect called') # Result code 0 is success if rc == 0: self.mqttConnected = True # Subscribe to feed here else: self.logger.error('MQTT failed to connect: %s' % rc) raise RuntimeError('MQTT failed to connect: %s' % rc) def onDisconnect(self, client, userdata, rc): self.logger.debug('MQTT onDisconnect called') self.mqttConnected = False if rc != 0: self.logger.debug('MQTT disconnected unexpectedly: %s' % rc) self.handleReconnect(rc) def onMessage(self, client, userdata, msg): self.logger.debug('MQTT onMessage called for client: %s' % client) def connect(self): """ Connect to MQTT broker Skip calling connect if already connected. """ if self.mqttConnected: return self._client.connect(self._host, port=self._port, keepalive=self._keepAlive) def disconnect(self): """ Check if connected""" if self.mqttConnected: self._client.disconnect() def subscribe(self, feed=False): """Subscribe to feed, defaults to feed specified in config""" if not feed: feed = _feedName self._client.subscribe('{0}/feeds/{1}'.format(self._username, feed)) def publish(self, value, feed=False): """Publish a value to a feed""" if not feed: feed = _feedName self._client.publish('{0}/feeds/{1}'.format(self._username, feed), payload=value) def publishFaceValues(self, message): """ Publish face detection values to individual MQTT feeds Parses _extendedData.predictions.faceAttributes property Works with Azure face API responses and """ try: for face in message._extendedData['predictions']: faceAttrs = face['faceAttributes'] for key in faceAttrs: if type(faceAttrs[key]) is dict: val = self.flattenDict(faceAttrs[key]) print('val: ', val) else: val = faceAttrs[key] self.publish(val, key) except Exception as e: self.logger.error('Error publishing values: %s' % e) def flattenDict(self, aDict): """ Get average of simple dictionary of numerical values """ try: val = float(sum(aDict[key] for key in aDict)) / len(aDict) except Exception as e: self.logger.error('Error flattening dict, returning 0: %s' % e) return val or 0 def publishJsonMessage(self, message): msg_str = self.stringifyMessage(message) self.publish(msg_str) def stringifyMessage(self, message): """ Dump into JSON string """ return json.dumps(message.__dict__).encode('utf8') def processQueue(self): self.logger.info('Processing queue') while self.alive: # Pump the loop self._client.loop(timeout=1) if (self.inQueue.empty() == False): try: message = self.inQueue.get(block=False, timeout=1) if message is not None and self.mqttConnected: if message == "SHUTDOWN": self.logger.debug("SHUTDOWN command handled") self.shutdown() else: # Send message as string or split into channels if self._publishJson: self.publishJsonMessage(message) elif self._publishFaceData: self.publishFaceValues(message) else: self.publishValues(message) except Exception as e: self.logger.error("MQTT unable to read queue : %s " % e) else: time.sleep(.25) def shutdown(self): self.logger.info("Shutting down MQTT %s" % (mp.current_process().name)) self.alive = False time.sleep(1) self.exit = True def run(self): """ Thread start method""" self.logger.info("Running MQTT") self.connect() self.alive = True # Start queue loop self.processQueue()
class Tracker(): def __init__(self, bbox, frame, kind, moduleConfig, loggingQueue): """ Create and initialize a new Tracker. Set up constants and parameters. """ if kind in ["KCF", "MIL", "MEDIANFLOW", "GOTURN", "TLD", "BOOSTING"]: self.tracker = cv2.Tracker_create(kind) self.tracker.init(frame, (bbox['x'], bbox['y'], bbox['w'], bbox['h'])) self.created = time() self.bbox = (bbox['x'], bbox['y'], bbox['w'], bbox['h']) self.velocity = (0, 0) self.updateTime = self.created self.config = moduleConfig # Constants self._useVelocity = self.config['UseVelocity'] self._horizontalVelocityBuffer = self.config[ 'HorizontalVelocityBuffer'] self._verticalVelocityBuffer = self.config[ 'VerticalVelocityBuffer'] # Setup logging queue self.logger = ThreadsafeLogger(loggingQueue, __name__) else: self.logger.error("Type %s not supported by mTracker" % kind) def getCreated(self): """ Get created time """ return self.created def right(self): """ Get right bound of tracker """ return self.bbox[0] + self.bbox[2] def top(self): """ Get top bound of tracker """ return self.bbox[1] + self.bbox[3] def bottom(self): """ Get bottom bound of tracker """ return self.bbox[1] def left(self): """ Get left bound of tracker """ return self.bbox[0] def area(self): """ Get area of tracker bounding box """ return abs(self.right() - self.left()) * abs(self.top() - self.bottom()) def update(self, frame): """ Update tracker. If velocity hack is being used, calculate the new velocity of the midpoint. """ ok, bbox = self.tracker.update(frame) if self._useVelocity: # Set velocity (pixels/sec) deltaT = time() - self.updateTime centreNow = ((bbox[0] + bbox[2] / 2), (bbox[1] + bbox[3] / 2)) centreLast = ((self.bbox[0] + self.bbox[2] / 2), (self.bbox[1] + self.bbox[3] / 2)) Vx = (centreNow[0] - centreLast[0]) / deltaT Vy = (centreNow[1] - centreLast[1]) / deltaT self.velocity = (Vx, Vy) self.logger.debug('New velocity: %s' % str(self.velocity[0]) + ', ' + str(self.velocity[1])) self.updateTime = time() self.bbox = bbox return ok, bbox def getProjectedLocation(self, time): """ Get the estimated location of the bounding box, based on previous velocity. """ deltaT = max((time - self.updateTime), 1) centreNow = ((self.bbox[0] + self.bbox[2] / 2), (self.bbox[1] + self.bbox[3] / 2)) projectedX = centreNow[0] + (self.velocity[0] * deltaT) projectedY = centreNow[1] + (self.velocity[1] * deltaT) return (projectedX, projectedY) def getVelocityBuffer(self): ''' Another hack to improve low frame rate tracking. "Spread" out the bounding box based on velocity. ''' return (abs(self.velocity[0]) * self._horizontalVelocityBuffer, abs(self.velocity[1]) * self._verticalVelocityBuffer)
class BtleRegisteredClient: #part of interface for Registered Client def __init__(self, detectedClient, collectionPointConfig, loggingQueue): # Logger self.loggingQueue = loggingQueue self.logger = ThreadsafeLogger(loggingQueue, __name__) self.logger = logging.getLogger( 'btleRegisteredClient.BtleRegisteredClient') self.clientEventLogger = logging.getLogger( 'btleRegisteredClient.BtleEventTesting') self.clientEventSendLogger = logging.getLogger('eventSend') self.clientInRangeTrigerCount = 2 self.lastTimeMessageClientInWasSentToController = -1 self.lastTimeMessageClientOutWasSentToController = -1 self.__countClientInRange = 0 self.__countClientOutOfRange = 0 self.timeInCollectionPointInMilliseconds = 0 self.firstRegisteredTime = time.time() self.collectionPointConfig = collectionPointConfig self.__clientOutThresholdMin = int( self.collectionPointConfig['BtleRssiClientInThreshold'] + (self.collectionPointConfig['BtleRssiClientInThreshold'] * self.collectionPointConfig['BtleRssiErrorVariance'])) self.handleNewDetectedClientEvent( detectedClient ) #standard shared methods when we see a detected client #part of interface for Registered Client def updateWithNewDectedClientData(self, detectedClient): self.timeInCollectionPointInMilliseconds = time.time( ) - self.firstRegisteredTime self.handleNewDetectedClientEvent( detectedClient ) #standard shared methods when we see a detected client #Common methods are handled here for updateWithNewDectedClientData and init def handleNewDetectedClientEvent(self, detectedClient): self.lastRegisteredTime = time.time() self.detectedClient = detectedClient self.txPower = detectedClient.extraData['tx'] self.beaconId = detectedClient.extraData['udid'] #TODO HACK FIX self.incrementInternalClientEventCounts(detectedClient) def incrementInternalClientEventCounts(self, detectedClient): self.clientEventLogger.debug( "==================================== EVENT COUNTS DATA START ====================================" ) self.clientEventLogger.debug( "Counts before inCount %i : outCount %i" % (self.__countClientInRange, self.__countClientOutOfRange)) #self.clientEventLogger.debug("rssi types") #self.clientEventLogger.debug("type of self.detectedClient.extraData['rssi'] = %s" %type(self.detectedClient.extraData['rssi'])) #self.clientEventLogger.debug("type of self.detectedClient.extraData['rssi'] = %s" %type(self.detectedClient.extraData['rssi'])) #self.clientEventLogger.debug("type of self.collectionPointConfig['btleRssiClientInThreshold'] = %s " %type(self.collectionPointConfig['btleRssiClientInThreshold'])) #self.clientEventLogger.debug("type of self.__clientOutThresholdMin = %s " %type(self.__clientOutThresholdMin)) if self.collectionPointConfig['gatewayType'] == 'proximity': #check threshold type if self.collectionPointConfig[ 'btleRssiClientInThresholdType'] == 'rssi': #are they in or are they out of range --- increament internal count. we use the count to normalize the events even more #self.logger.debug("rssi average %i > btleRssi threshold %i: %s" %(self.getRssiAverage(),self.collectionPointConfig['btleRssiClientInThreshold'],self.getRssiAverage() > self.collectionPointConfig['btleRssiClientInThreshold'])) self.clientEventLogger.debug("Registered Client Event") self.clientEventLogger.debug("UDID is %s " % self.getUdid()) self.clientEventLogger.debug("Beacon ID is %s " % self.beaconId) self.clientEventLogger.debug( "RSSI %i" % self.detectedClient.extraData['rssi']) self.clientEventLogger.debug( "BTLE RSSI client in threshold %i" % self.collectionPointConfig['BtleRssiClientInThreshold']) self.clientEventLogger.debug( "BTLE RSSI client out threshold %i" % self.__clientOutThresholdMin) if self.detectedClient.extraData[ 'rssi'] >= self.collectionPointConfig[ 'BtleRssiClientInThreshold']: self.__countClientInRange = self.__countClientInRange + 1 self.__countClientOutOfRange = 0 self.clientEventLogger.debug("CLIENT IN RANGE>>>>>>>>>>>") else: if self.detectedClient.extraData[ 'rssi'] <= self.__clientOutThresholdMin: self.__countClientOutOfRange = self.__countClientOutOfRange + 1 #self.__countClientInRange = 0 self.clientEventLogger.debug( "CLIENT OUT OF RANGE<<<<<<<<<<<") else: self.clientEventLogger.debug( "CLIENT IN BUFFER AREA==========") self.clientEventLogger.debug( "Counts after inCount %i : outCount %i" % (self.__countClientInRange, self.__countClientOutOfRange)) self.clientEventLogger.debug( "==================================== EVENT COUNTS DATA END ====================================" ) self.clientEventLogger.debug("") #part of interface for Registered Client def shouldSendClientInEvent(self): if self.collectionPointConfig['gatewayType'] == 'proximity': #we compare on seconds so we need to adjust this to seconds proximityEventIntervalInSeconds = ( self. collectionPointConfig['ProximityEventIntervalInMilliseconds'] / 1000) timeDiff = math.trunc( time.time() - self.lastTimeMessageClientInWasSentToController) self.logger.debug("shouldSendClientInEvent timeDiff %f > %s" % (timeDiff, proximityEventIntervalInSeconds)) if timeDiff > proximityEventIntervalInSeconds: if self.__countClientInRange > self.clientInRangeTrigerCount: self.logClientEventSend( "SHOULD ClientIN event to controller for") self.zeroEventRangeCounters() return True #TODO add in other types of gateway types return False #part of interface for Registered Client def shouldSendClientOutEvent(self): if self.collectionPointConfig['gatewayType'] == 'proximity': #we compare on seconds so we need to adjust this to seconds proximityEventIntervalInSeconds = ( self. collectionPointConfig['ProximityEventIntervalInMilliseconds'] / 1000) #check the time to see if we need to send a message #have we ever sent an IN event? if not we dont need to send an out event if self.lastTimeMessageClientInWasSentToController > 0: #check timing on last event sent #self.logger.debug("shouldSendClientOutEvent lastTimeMessageClientOutWasSentToController=%f"%self.lastTimeMessageClientOutWasSentToController) timeDiff = time.time( ) - self.lastTimeMessageClientOutWasSentToController #have we sent a client out since the last client in? if so we dont need to throw another if self.lastTimeMessageClientOutWasSentToController < self.lastTimeMessageClientInWasSentToController: #do we have enought qualifying out events. we dont want to throw one too soon if self.__countClientOutOfRange >= self.collectionPointConfig[ 'BtleClientOutCountThreshold']: self.logClientEventSend( "SHOULD ClientOUT event to controller for") self.zeroEventRangeCounters() return True #lets check to see if we need to clean up the out count --- not sure this is the best idea else: if self.__countClientOutOfRange > self.collectionPointConfig[ 'BtleClientOutCountThreshold']: self.clientEventLogger.debug( "Client out count %i is past max. Resetting." % self.__countClientOutOfRange) self.__countClientOutOfRange = 0 else: #lets check to see if we need to clean up the out count --- not sure this is the best idea if self.__countClientOutOfRange > self.collectionPointConfig[ 'BtleClientOutCountThreshold']: self.clientEventLogger.debug( "Client out count %i is past max. Resetting." % self.__countClientOutOfRange) self.__countClientOutOfRange = 0 #TODO add in other types of gateway types return False #part of interface for Registered Client def sweepShouldSendClientOutEvent(self): if self.collectionPointConfig['gatewayType'] == 'proximity': #has an out event already been sent? if so we dont need to throw another on sweep if self.lastTimeMessageClientOutWasSentToController > 0: #was there a in event sent after the last out? if self.lastTimeMessageClientInWasSentToController > self.lastTimeMessageClientOutWasSentToController: self.logClientEventSend( "Sweep case a is sending ClientOUT on") self.zeroEventRangeCounters() return True else: return False else: self.logClientEventSend("Sweep case b is sending ClientOUT on") self.zeroEventRangeCounters() return True #TODO add in other types of gateway types return True #part of interface for Registered Client def getUdid(self): return self.detectedClient.extraData["beaconMac"] def getTxPower(self): return self.txPower #zero out the BTLE event counters def zeroEventRangeCounters(self): self.__countClientOutOfRange = 0 self.__countClientInRange = 0 def logClientEventSend(self, message): self.clientEventSendLogger.debug("") self.clientEventSendLogger.debug( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" ) self.clientEventSendLogger.debug( "%%%%%%%%%%%%%%%%%% %s %%%%%%%%%%%%%%%%%%" % message) self.clientEventSendLogger.debug("UDID is %s " % self.getUdid()) self.clientEventSendLogger.debug("Beacon ID is %s " % self.beaconId) self.clientEventSendLogger.debug("RSSI %i" % self.detectedClient.extraData['rssi']) self.clientEventSendLogger.debug( "BTLE RSSI client in threshold %i" % self.collectionPointConfig['BtleRssiClientInThreshold']) self.clientEventSendLogger.debug("BTLE RSSI client out threshold %i" % self.__clientOutThresholdMin) self.clientEventSendLogger.debug( "inCount %i : outCount %i" % (self.__countClientInRange, self.__countClientOutOfRange)) self.clientEventSendLogger.debug( "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" ) self.clientEventSendLogger.debug("") #part of interface for Registered Client def getExtenedDataForEvent(self): extraData = {} extraData['lastRegisteredTime'] = self.lastRegisteredTime extraData['firstRegisteredTime'] = self.firstRegisteredTime extraData[ 'lastTimeMessageClientInWasSentToController'] = self.lastTimeMessageClientInWasSentToController extraData[ 'lastTimeMessageClientOutWasSentToController'] = self.lastTimeMessageClientOutWasSentToController extraData[ 'timeInCollectionPointInMilliseconds'] = self.timeInCollectionPointInMilliseconds extraData['rssi'] = self.detectedClient.extraData['rssi'] extraData['averageRssi'] = self.detectedClient.extraData['rssi'] extraData['txPower'] = self.getTxPower() #TODO INSTALL FIX extraData['beaconId'] = self.beaconId return extraData #part of interface for Registered Client def setClientInMessageSentToController(self): self.lastTimeMessageClientInWasSentToController = time.time() self.__countClientInRange = 0 #part of interface for Registered Client def setClientOutMessageSentToController(self): self.lastTimeMessageClientOutWasSentToController = time.time() self.__countClientOutOfRange = 0
import time from loggingEngine import LoggingEngine from threadsafeLogger import ThreadsafeLogger import msvcrt from select import select import configLoader # List of threads to handle threads = [] # Dict of threadsafe queues to handle queues = {} # Logging queue setup loggingQueue = mp.Queue() logger = ThreadsafeLogger(loggingQueue, "main") # Logging output engine loggingEngine = LoggingEngine(loggingQueue=loggingQueue) threads.append(loggingEngine) loggingEngine.start() # Config baseConfig = configLoader.load(logger) _collectionModuleNames = baseConfig['CollectionModules'] _communicationModuleNames = baseConfig['CommunicationModules'] _collectionModules = {} _communicationModules = {} # Collection point queues