class BtleThreadCollectionPoint(object):
    def __init__(self,
                 clientEventHandler,
                 btleConfig,
                 loggingQueue,
                 debugMode=False):
        # Logger
        self.loggingQueue = loggingQueue
        self.logger = ThreadsafeLogger(loggingQueue, __name__)

        self.btleConfig = btleConfig
        self.clientEventHandler = clientEventHandler
        self.debug = debugMode
        # define basic BGAPI parser
        self.bgapi_rx_buffer = []
        self.bgapi_rx_expected_length = 0

    def start(self):
        packet_mode = False

        # create BGLib object
        self.ble = BGLib()
        self.ble.packet_mode = packet_mode
        self.ble.debug = self.debug

        # add handler for BGAPI timeout condition (hopefully won't happen)
        self.ble.on_timeout += self.my_timeout

        # on busy hander
        self.ble.on_busy = self.on_busy

        # add handler for the gap_scan_response event
        self.ble.ble_evt_gap_scan_response += self.clientEventHandler

        # create serial port object and flush buffers
        self.logger.info(
            "Establishing serial connection to BLED112 on com port %s at baud rate %s"
            % (self.btleConfig['BtleDeviceId'],
               self.btleConfig['BtleDeviceBaudRate']))
        self.serial = Serial(port=self.btleConfig['BtleDeviceId'],
                             baudrate=self.btleConfig['BtleDeviceBaudRate'],
                             timeout=1)
        self.serial.flushInput()
        self.serial.flushOutput()

        # disconnect if we are connected already
        self.ble.send_command(self.serial,
                              self.ble.ble_cmd_connection_disconnect(0))
        self.ble.check_activity(self.serial, 1)

        # stop advertising if we are advertising already
        self.ble.send_command(self.serial, self.ble.ble_cmd_gap_set_mode(0, 0))
        self.ble.check_activity(self.serial, 1)

        # stop scanning if we are scanning already
        self.ble.send_command(self.serial,
                              self.ble.ble_cmd_gap_end_procedure())
        self.ble.check_activity(self.serial, 1)

        # set the TX
        # range 0 to 15 (real TX power from -23 to +3dBm)
        #self.ble.send_command(self.serial, self.ble.ble_cmd_hardware_set_txpower(self.btleConfig['btleDeviceTxPower']))
        #self.ble.check_activity(self.serial,1)

        #ble_cmd_connection_update connection: 0 (0x00) interval_min: 30 (0x001e) interval_max: 46 (0x002e) latency: 0 (0x0000) timeout: 100 (0x0064)
        #interval_min 6-3200
        #interval_man 6-3200
        #latency 0-500
        #timeout 10-3200
        self.ble.send_command(
            self.serial,
            self.ble.ble_cmd_connection_update(0x00, 0x001e, 0x002e, 0x0000,
                                               0x0064))
        self.ble.check_activity(self.serial, 1)

        # set scan parameters
        #scan_interval 0x4 - 0x4000
        #Scan interval defines the interval when scanning is re-started in units of 625us
        # Range: 0x4 - 0x4000
        # Default: 0x4B (75ms)
        # After every scan interval the scanner will change the frequency it operates at
        # at it will cycle through all the three advertisements channels in a round robin
        # fashion. According to the Bluetooth specification all three channels must be
        # used by a scanner.
        #
        #scan_window 0x4 - 0x4000
        # Scan Window defines how long time the scanner will listen on a certain
        # frequency and try to pick up advertisement packets. Scan window is defined
        # as units of 625us
        # Range: 0x4 - 0x4000
        # Default: 0x32 (50 ms)
        # Scan windows must be equal or smaller than scan interval
        # If scan window is equal to the scan interval value, then the Bluetooth module
        # will be scanning at a 100% duty cycle.
        # If scan window is half of the scan interval value, then the Bluetooth module
        # will be scanning at a 50% duty cycle.
        #
        #active 1=active 0=passive
        # 1: Active scanning is used. When an advertisement packet is received the
        # Bluetooth stack will send a scan request packet to the advertiser to try and
        # read the scan response data.
        # 0: Passive scanning is used. No scan request is made.
        #self.ble.send_command(self.serial, self.ble.ble_cmd_gap_set_scan_parameters(0x4B,0x32,1))
        self.ble.send_command(
            self.serial,
            self.ble.ble_cmd_gap_set_scan_parameters(0xC8, 0xC8, 0))
        self.ble.check_activity(self.serial, 1)

        # start scanning now
        self.ble.send_command(self.serial, self.ble.ble_cmd_gap_discover(1))
        self.ble.check_activity(self.serial, 1)

    # handler to notify of an API parser timeout condition
    def my_timeout(self, sender, args):
        self.logger.error(
            "BGAPI timed out. Make sure the BLE device is in a known/idle state."
        )
        # might want to try the following lines to reset, though it probably
        # wouldn't work at this point if it's already timed out:
        self.ble.send_command(self.serial, self.ble.ble_cmd_system_reset(0))
        self.ble.check_activity(self.serial, 1)
        self.ble.send_command(self.serial, self.ble.ble_cmd_gap_discover(1))
        self.ble.check_activity(self.serial, 1)

    def on_busy(self, sender, args):
        self.logger.warn("BGAPI device is busy.")

    def scan(self):
        # check for all incoming data (no timeout, non-blocking)
        self.ble.check_activity(self.serial)
Exemplo n.º 2
0
class BtleCollectionPoint(Thread):
    def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue,
                 loggingQueue):
        """ Initialize new CamCollectionPoint instance.
        Setup queues, variables, configs, constants and loggers.
        """
        super(BtleCollectionPoint, self).__init__()
        # Queues
        self.outQueue = pOutBoundQueue  #messages from this thread to the main process
        self.inQueue = pInBoundQueue
        self.loggingQueue = loggingQueue
        self.queueBLE = mp.Queue()
        # Configs
        self.moduleConfig = configLoader.load(
            self.loggingQueue)  #Get the config for this module
        self.config = baseConfig

        # Logger
        self.logger = ThreadsafeLogger(loggingQueue, __name__)
        # Variables
        self.registeredClientRegistry = RegisteredClientRegistry(
            self.moduleConfig, self.loggingQueue)
        self.eventManager = EventManager(self.moduleConfig, pOutBoundQueue,
                                         self.registeredClientRegistry,
                                         self.loggingQueue)
        self.alive = True
        self.btleThread = None
        self.BLEThread = None
        self.repeatTimerSweepClients = None

    # main start method
    def run(self):
        ###Pausing Startup to wait for things to start after a system restart
        self.logger.info(
            "Pausing execution 15 seconds waiting for other system services to start"
        )
        time.sleep(15)
        self.logger.info(
            "Done with our nap.  Time to start looking for clients")

        #########  setup global client registry start #########
        # self.registeredClientRegistry = RegisteredClientRegistry(self.moduleConfig, self.loggingQueue)
        #########  setup global client registry end #########
        self.logger.info('here 1')
        self.btleThread = BlueGigaBtleCollectionPointThread(
            self.queueBLE, self.moduleConfig, self.loggingQueue)
        self.BLEThread = Thread(target=self.btleThread.bleDetect,
                                args=(__name__, 10))
        self.BLEThread.daemon = True
        self.BLEThread.start()
        self.logger.info('here 2')

        #Setup repeat task to run the sweep every X interval
        self.repeatTimerSweepClients = RepeatedTimer(
            (self.moduleConfig['AbandonedClientCleanupIntervalInMilliseconds']
             / 1000), self.registeredClientRegistry.sweepOldClients)

        # Process queue from main thread for shutdown messages
        self.threadProcessQueue = Thread(target=self.processQueue)
        self.threadProcessQueue.setDaemon(True)
        self.threadProcessQueue.start()
        self.logger.info('here 3')

        #read the queue
        while self.alive:
            if not self.queueBLE.empty():
                self.logger.info(
                    'got a thing here herhehrehfhve!~ ~ ~@~@~!#~ ~ #~ #@@~ ~@# @~#'
                )
                result = self.queueBLE.get(block=False, timeout=1)
                self.__handleBtleClientEvents(result)

    def processQueue(self):
        self.logger.info(
            "Starting to watch collection point inbound message queue")
        while self.alive:
            if not self.inQueue.empty():
                self.logger.info("Queue size is %s" % self.inQueue.qsize())
                try:
                    message = self.inQueue.get(block=False, timeout=1)
                    if message is not None:
                        if message == "SHUTDOWN":
                            self.logger.info("SHUTDOWN command handled on %s" %
                                             __name__)
                            self.shutdown()
                        else:
                            self.sendOutMessage(message)
                except Exception as e:
                    self.logger.error("Unable to read queue, error: %s " % e)
                    self.shutdown()
                self.logger.info("Queue size is %s after" %
                                 self.inQueue.qsize())
            else:
                time.sleep(.25)

    #handle btle reads
    def __handleBtleClientEvents(self, detectedClients):
        self.logger.debug("doing handleBtleClientEvents: %s" % detectedClients)
        for client in detectedClients:
            self.logger.debug("--- Found client ---")
            self.logger.debug(vars(client))
            self.logger.debug("--- Found client end ---")
            self.eventManager.registerDetectedClient(client)

    def shutdown(self):
        self.logger.info("Shutting down")
        # self.threadProcessQueue.join()
        self.repeatTimerSweepClients.stop()
        self.btleThread.stop()
        self.alive = False
        time.sleep(1)
        self.exit = True
class WebsocketClientModule(Thread):
    def __init__(self, baseConfig, pInBoundEventQueue, pOutBoundEventQueue,
                 loggingQueue):

        super(WebsocketClientModule, self).__init__()
        self.alive = True
        self.config = baseConfig
        self.inQueue = pInBoundEventQueue  # inQueue are messages from the main process to websocket clients
        self.outQueue = pOutBoundEventQueue  # outQueue are messages from clients to main process
        self.websocketClient = None
        self.loggingQueue = loggingQueue
        self.threadProcessQueue = None

        # Constants
        self._port = self.config['WebsocketPort']
        self._host = self.config['WebsocketHost']

        # logging setup
        self.logger = ThreadsafeLogger(loggingQueue, __name__)

    def run(self):
        """ Main thread entry point.

        Sets up websocket server and event callbacks.
        Starts thread to monitor inbound message queue.
        """

        self.logger.info("Starting websocket %s" % __name__)
        self.connect()

    def listen(self):
        self.threadProcessQueue = Thread(target=self.processQueue)
        self.threadProcessQueue.setDaemon(True)
        self.threadProcessQueue.start()

    def connect(self):
        #websocket.enableTrace(True)
        ws = websocket.WebSocketApp("ws://%s:%s" % (self._host, self._port),
                                    on_message=self.onMessage,
                                    on_error=self.onError,
                                    on_close=self.onClose)
        ws.on_open = self.onOpen
        ws.run_forever()

    def onError(self, ws, message):
        self.logger.error("Error from websocket client: %s" % message)

    def onClose(self, ws):
        if self.alive:
            self.logger.warn("Closed")
            self.alive = False
            # TODO: reconnect timer
        else:
            self.logger.info("Closed")

    def onMessage(self, ws, message):
        self.logger.info("Message from websocket server: %s" % message)

    def onOpen(self, ws):
        self.alive = True
        self.websocketClient = ws
        self.listen()

    def shutdown(self):
        """ Handle shutdown message. 
        Close and shutdown websocket server.
        Join queue processing thread.
        """

        self.logger.info("Shutting down websocket server %s" %
                         (multiprocessing.current_process().name))

        try:
            self.logger.info("Closing websocket")
            self.websocketClient.close()
        except Exception as e:
            self.logger.error("Websocket close error : %s " % e)

        self.alive = False

        self.threadProcessQueue.join()

        time.sleep(1)
        self.exit = True

    def sendOutMessage(self, message):
        """ Send message to server """

        self.websocketClient.send(json.dumps(message.__dict__))

    def processQueue(self):
        """ Monitor queue of messages from main process to this thread. """

        while self.alive:
            if (self.inQueue.empty() == False):
                try:
                    message = self.inQueue.get(block=False, timeout=1)
                    if message is not None:
                        if message == "SHUTDOWN":
                            self.logger.debug("SHUTDOWN handled")
                            self.shutdown()
                        else:
                            self.sendOutMessage(message)
                except Exception as e:
                    self.logger.error("Websocket unable to read queue : %s " %
                                      e)
            else:
                time.sleep(.25)
Exemplo n.º 4
0
class MQTTClientModule(Thread):
    """ Threaded MQTT client for processing and publishing outbound messages"""
    def __init__(self, baseConfig, pInBoundEventQueue, pOutBoundEventQueue,
                 loggingQueue):

        super(MQTTClientModule, self).__init__()
        self.config = baseConfig
        self.alive = True
        self.inQueue = pInBoundEventQueue

        # Constants
        self._keepAlive = self.config['MqttKeepAlive']
        self._feedName = self.config['MqttFeedName']
        self._username = self.config['MqttUsername']
        self._key = self.config['MqttKey']
        self._host = self.config['MqttHost']
        self._port = self.config['MqttPort']
        self._publishJson = self.config['MqttPublishJson']
        self._publishFaceValues = self.config['MqttPublishFaceValues']

        # MQTT setup
        self._client = mqtt.Client()
        self._client.username_pw_set(self._username, self._key)
        self._client.on_connect = self.onConnect
        self._client.on_disconnect = self.onDisconnect
        self._client.on_message = self.onMessage
        self.mqttConnected = False

        # Logging setup
        self.logger = ThreadsafeLogger(loggingQueue, "MQTT")

    def onConnect(self, client, userdata, flags, rc):
        self.logger.debug('MQTT onConnect called')
        # Result code 0 is success
        if rc == 0:
            self.mqttConnected = True

            # Subscribe to feed here
        else:
            self.logger.error('MQTT failed to connect: %s' % rc)
            raise RuntimeError('MQTT failed to connect: %s' % rc)

    def onDisconnect(self, client, userdata, rc):
        self.logger.debug('MQTT onDisconnect called')
        self.mqttConnected = False
        if rc != 0:
            self.logger.debug('MQTT disconnected unexpectedly: %s' % rc)
            self.handleReconnect(rc)

    def onMessage(self, client, userdata, msg):
        self.logger.debug('MQTT onMessage called for client: %s' % client)

    def connect(self):
        """ Connect to MQTT broker
        Skip calling connect if already connected.
        """
        if self.mqttConnected:
            return

        self._client.connect(self._host,
                             port=self._port,
                             keepalive=self._keepAlive)

    def disconnect(self):
        """ Check if connected"""
        if self.mqttConnected:
            self._client.disconnect()

    def subscribe(self, feed=False):
        """Subscribe to feed, defaults to feed specified in config"""
        if not feed: feed = _feedName
        self._client.subscribe('{0}/feeds/{1}'.format(self._username, feed))

    def publish(self, value, feed=False):
        """Publish a value to a feed"""
        if not feed: feed = _feedName
        self._client.publish('{0}/feeds/{1}'.format(self._username, feed),
                             payload=value)

    def publishFaceValues(self, message):
        """ Publish face detection values to individual MQTT feeds
        Parses _extendedData.predictions.faceAttributes property
        Works with Azure face API responses and 
        """
        try:
            for face in message._extendedData['predictions']:
                faceAttrs = face['faceAttributes']
                for key in faceAttrs:
                    if type(faceAttrs[key]) is dict:
                        val = self.flattenDict(faceAttrs[key])
                        print('val: ', val)
                    else:
                        val = faceAttrs[key]
                    self.publish(val, key)
        except Exception as e:
            self.logger.error('Error publishing values: %s' % e)

    def flattenDict(self, aDict):
        """ Get average of simple dictionary of numerical values """
        try:
            val = float(sum(aDict[key] for key in aDict)) / len(aDict)
        except Exception as e:
            self.logger.error('Error flattening dict, returning 0: %s' % e)
        return val or 0

    def publishJsonMessage(self, message):
        msg_str = self.stringifyMessage(message)
        self.publish(msg_str)

    def stringifyMessage(self, message):
        """ Dump into JSON string """
        return json.dumps(message.__dict__).encode('utf8')

    def processQueue(self):
        self.logger.info('Processing queue')

        while self.alive:
            # Pump the loop
            self._client.loop(timeout=1)
            if (self.inQueue.empty() == False):
                try:
                    message = self.inQueue.get(block=False, timeout=1)
                    if message is not None and self.mqttConnected:
                        if message == "SHUTDOWN":
                            self.logger.debug("SHUTDOWN command handled")
                            self.shutdown()
                        else:
                            # Send message as string or split into channels
                            if self._publishJson:
                                self.publishJsonMessage(message)
                            elif self._publishFaceData:
                                self.publishFaceValues(message)
                            else:
                                self.publishValues(message)

                except Exception as e:
                    self.logger.error("MQTT unable to read queue : %s " % e)
            else:
                time.sleep(.25)

    def shutdown(self):
        self.logger.info("Shutting down MQTT %s" % (mp.current_process().name))
        self.alive = False
        time.sleep(1)
        self.exit = True

    def run(self):
        """ Thread start method"""
        self.logger.info("Running MQTT")

        self.connect()
        self.alive = True

        # Start queue loop
        self.processQueue()
class TVCollectionPoint(Thread):
    def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue,
                 loggingQueue):
        """ Initialize new TVCollectionPoint instance.
        Setup queues, variables, configs, constants and loggers.
        """

        super(TVCollectionPoint, self).__init__()

        if not self.check_opencv_version("3.", cv2):
            print(
                "OpenCV version {0} is not supported. Use 3.x for best results."
                .format(self.get_opencv_version()))

        # Queues
        self.outQueue = pOutBoundQueue  #messages from this thread to the main process
        self.inQueue = pInBoundQueue
        self.loggingQueue = loggingQueue

        # Variables
        self.video = None
        self.alive = True
        self.ix = -1
        self.iy = -1
        self.fx = -1
        self.fy = -1
        self.clicking = False
        self.boundSet = False

        self.x1, self.x2, self.y1, self.y2 = 0, 0, 0, 0

        # Configs
        #self.moduleConfig = camConfigLoader.load(self.loggingQueue) #Get the config for this module
        self.config = baseConfig

        # Constants
        self._captureWidth = 1600
        self._captureHeight = 900
        self._numLEDs = 60
        self._collectionPointId = "tvcam1"
        self._collectionPointType = "ambiLED"
        self._showVideoStream = True
        self._delimiter = ';'
        self._colorMode = 'edgeDominant'
        # self._colorMode = 'edgeMean'
        self._perimeterDepth = 20
        self._topSegments = 3
        self._sideSegments = 2

        # Logger
        self.logger = ThreadsafeLogger(loggingQueue, __name__)

    def run(self):
        """ Main thread method, run when the thread's start() function is called.
        Controls flow of detected faces and the MultiTracker. 
        Sends color data in string format, like "#fffff;#f1f1f1;..."
        """

        # Monitor inbound queue on own thread
        self.threadProcessQueue = Thread(target=self.processQueue)
        self.threadProcessQueue.setDaemon(True)
        self.threadProcessQueue.start()

        self.initializeCamera()

        # Setup timer for FPS calculations
        start = time.time()
        frameCounter = 1
        fps = 0

        # Start timer for collection events
        self.collectionStart = time.time()

        ok, frame = self.video.read()
        if not ok:
            self.logger.error('Cannot read video file')
            self.shutdown()
        else:
            framecopy = frame.copy()
            cont = True
            while cont or not self.boundSet:
                cv2.imshow('Set ROI', framecopy)
                cv2.setMouseCallback('Set ROI', self.getROI, frame)
                k = cv2.waitKey(0)
                if k == 32 and self.boundSet:
                    # on space, user wants to finalize bounds, only allow them to exit if bounds set
                    cont = False
                # elif k != 27:
                # any other key clears rectangles
                # framecopy = frame.copy()
                #ok, frame = self.video.read()
                # cv2.imshow('Set ROI', framecopy)
                # cv2.setMouseCallback('Set ROI', self.getROI, framecopy)
        cv2.destroyWindow('Set ROI')

        self.initKMeans()

        # Set up for all modes
        top_length_pixels = self.fx - self.ix
        side_length_pixels = self.fy - self.iy
        perimeter_length_pixels = top_length_pixels * 2 + side_length_pixels * 2

        # mode specific setup
        if self._colorMode == 'dominant':
            pass
        if self._colorMode == 'edgeDominant' or self._colorMode == 'edgeMean':
            perimeter_depth = 0
            if self._perimeterDepth < side_length_pixels / 2 and self._perimeterDepth < top_length_pixels / 2:
                perimeter_depth = self._perimeterDepth
            else:
                perimeter_depth = min(side_length_pixels / 2,
                                      top_length_pixels / 2)

        while self.alive:
            ok, ogframe = self.video.read()
            if not ok:
                self.logger.error('Error while reading frame')
                break
            frame = ogframe.copy()

            # Dominant color
            if self._colorMode == 'dominant':
                data = self.getDominantColor(
                    cv2.resize(frame[:, :, :], (0, 0), fx=0.4, fy=0.4),
                    self.ix, self.fx, self.iy, self.fy)
                #self.putCPMessage(data, 'light-dominant')
                #print('data: ',data)

            elif self._colorMode == 'edgeMean':
                data = self.getEdgeMeanColors(frame, top_length_pixels,
                                              side_length_pixels,
                                              perimeter_length_pixels,
                                              perimeter_depth)
                print('data: ', data)

            elif self._colorMode == 'edgeDominant':
                # this is the most promising
                colorData = self.getEdgeDominantColors(
                    frame, top_length_pixels, side_length_pixels,
                    perimeter_length_pixels, perimeter_depth)

                # assuming LEDs are evenly distributed, find number for each edge of ROI
                top_num_leds = self._numLEDs * (top_length_pixels /
                                                perimeter_length_pixels)
                side_num_leds = self._numLEDs * (side_length_pixels /
                                                 perimeter_length_pixels)
                data = self.getColorString(colorData, top_num_leds,
                                           side_num_leds)
                self.putCPMessage(data, 'light-edges')
                # print('data: ', data)

            if self._showVideoStream:
                cv2.rectangle(frame, (self.ix, self.iy), (self.fx, self.fy),
                              (255, 0, 0), 1)
                cv2.imshow("output", frame)
                cv2.waitKey(1)

    def getMeanColor(self, frame):
        color = [frame[:, :, i].mean() for i in range(frame.shape[-1])]
        return color

    def initKMeans(self):
        # kmeans vars
        self.n_colors = 5
        self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                         200, .1)
        self.flags = cv2.KMEANS_RANDOM_CENTERS

    def getColorString(self, colorData, top_num_leds, side_num_leds):
        toReturn = ''
        for key in colorData:
            if key == 'top' or key == 'bottom':
                for i in range(len(colorData[key])):
                    toReturn += (colorData[key][i] + self._delimiter) * int(
                        top_num_leds / self._topSegments)
            if key == 'right' or key == 'left':
                for i in range(len(colorData[key])):
                    toReturn += (colorData[key][i] + self._delimiter) * int(
                        side_num_leds / self._sideSegments)
        return toReturn

    def getDominantSegmentColor(self, segment):
        average_color = [
            segment[:, :, i].mean() for i in range(segment.shape[-1])
        ]
        arr = np.float32(segment)
        pixels = arr.reshape((-1, 3))

        # kmeans clustering
        _, labels, centroids = cv2.kmeans(pixels, self.n_colors, None,
                                          self.criteria, 10, self.flags)

        palette = np.uint8(centroids)
        quantized = palette[labels.flatten()]
        quantized = quantized.reshape(segment.shape)

        dominant_color = palette[np.argmax(itemfreq(labels)[:, -1])]

        return dominant_color

    def getEdgeMeanColors(self, frame, top_length_pixels, side_length_pixels,
                          perimeter_length_pixels, perimeter_depth):
        # assuming LEDs are evenly distributed, find number for each edge of ROI
        top_num_leds = self._numLEDs * (top_length_pixels /
                                        perimeter_length_pixels)
        side_num_leds = self._numLEDs * (side_length_pixels /
                                         perimeter_length_pixels)
        top_segment_length = top_length_pixels / self._topSegments
        side_segment_length = side_length_pixels / self._sideSegments

        for i in range(0, self._topSegments):
            ix = int(self.ix + i * top_segment_length)
            fx = int(self.ix + (i + 1) * top_segment_length)
            iy = int(self.iy)
            fy = int(self.iy + perimeter_depth)
            c = self.getMeanColor(
                cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2))
            data['top'][i] = self.getRGBHexString(c)
            if self._showVideoStream:
                cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 0, 255), 1)
                cv2.rectangle(frame, (ix, iy - (10 + perimeter_depth)),
                              (fx, fy - perimeter_depth),
                              (int(c[0]), int(c[1]), int(c[2])), 10)

        for i in range(0, self._sideSegments):
            ix = int(self.fx - perimeter_depth)
            fx = int(self.fx)
            iy = int(self.iy + i * side_segment_length)
            fy = int(self.iy + (i + 1) * side_segment_length)
            c = self.getMeanColor(
                cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2))
            data['right'][i] = self.getRGBHexString(c)
            if self._showVideoStream:
                cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 255, 0), 1)
                cv2.rectangle(frame, (ix + perimeter_depth, iy),
                              (fx + (10 + perimeter_depth), fy),
                              (int(c[0]), int(c[1]), int(c[2])), 10)

        for i in range(0, self._topSegments):
            ix = int(self.fx - (i + 1) * top_segment_length)
            fx = int(self.fx - i * top_segment_length)
            iy = int(self.fy - perimeter_depth)
            fy = int(self.fy)
            c = self.getMeanColor(
                cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2))
            data['bottom'][i] = self.getRGBHexString(c)
            if self._showVideoStream:
                cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 0, 255), 1)
                cv2.rectangle(frame, (ix, iy + perimeter_depth),
                              (fx, fy + (10 + perimeter_depth)),
                              (int(c[0]), int(c[1]), int(c[2])), 10)

        for i in range(0, self._sideSegments):
            ix = int(self.ix)
            fx = int(self.ix + perimeter_depth)
            iy = int(self.fy - (i + 1) * side_segment_length)
            fy = int(self.fy - i * side_segment_length)
            c = self.getMeanColor(
                cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2))
            data['left'][i] = self.getRGBHexString(c)
            if self._showVideoStream:
                cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 255, 0), 1)
                cv2.rectangle(frame, (ix - (10 + perimeter_depth), iy),
                              (fx - perimeter_depth, fy),
                              (int(c[0]), int(c[1]), int(c[2])), 10)
        return data

    def getEdgeDominantColors(self, frame, top_length_pixels,
                              side_length_pixels, perimeter_length_pixels,
                              perimeter_depth):
        top_segment_length = top_length_pixels / self._topSegments
        side_segment_length = side_length_pixels / self._sideSegments
        data = {}
        data['top'] = [None] * self._topSegments
        data['right'] = [None] * self._sideSegments
        data['bottom'] = [None] * self._topSegments
        data['left'] = [None] * self._sideSegments
        for i in range(0, self._topSegments):
            ix = int(self.ix + i * top_segment_length)
            fx = int(self.ix + (i + 1) * top_segment_length)
            iy = int(self.iy)
            fy = int(self.iy + perimeter_depth)
            c = self.getDominantSegmentColor(
                cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2))
            data['top'][i] = self.getRGBHexString(c)
            if self._showVideoStream:
                cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 0, 255), 1)
                cv2.rectangle(frame, (ix, iy - (10 + perimeter_depth)),
                              (fx, fy - perimeter_depth),
                              (int(c[0]), int(c[1]), int(c[2])), 10)

        for i in range(0, self._sideSegments):
            ix = int(self.fx - perimeter_depth)
            fx = int(self.fx)
            iy = int(self.iy + i * side_segment_length)
            fy = int(self.iy + (i + 1) * side_segment_length)
            c = self.getDominantSegmentColor(
                cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2))
            data['right'][i] = self.getRGBHexString(c)
            if self._showVideoStream:
                cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 255, 0), 1)
                cv2.rectangle(frame, (ix + perimeter_depth, iy),
                              (fx + (10 + perimeter_depth), fy),
                              (int(c[0]), int(c[1]), int(c[2])), 10)

        for i in range(0, self._topSegments):
            ix = int(self.fx - (i + 1) * top_segment_length)
            fx = int(self.fx - i * top_segment_length)
            iy = int(self.fy - perimeter_depth)
            fy = int(self.fy)
            c = self.getDominantSegmentColor(
                cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2))
            data['bottom'][i] = self.getRGBHexString(c)
            if self._showVideoStream:
                cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 0, 255), 1)
                cv2.rectangle(frame, (ix, iy + perimeter_depth),
                              (fx, fy + (10 + perimeter_depth)),
                              (int(c[0]), int(c[1]), int(c[2])), 10)

        for i in range(0, self._sideSegments):
            ix = int(self.ix)
            fx = int(self.ix + perimeter_depth)
            iy = int(self.fy - (i + 1) * side_segment_length)
            fy = int(self.fy - i * side_segment_length)
            c = self.getDominantSegmentColor(
                cv2.resize(frame[iy:fy, ix:fx, :], (0, 0), fx=0.2, fy=0.2))
            data['left'][i] = self.getRGBHexString(c)
            if self._showVideoStream:
                cv2.rectangle(frame, (ix, iy), (fx, fy), (0, 255, 0), 1)
                cv2.rectangle(frame, (ix - (10 + perimeter_depth), iy),
                              (fx - perimeter_depth, fy),
                              (int(c[0]), int(c[1]), int(c[2])), 10)

        return data

    def getRGBHexString(self, bgr):
        return "%x%x%x" % (bgr[2], bgr[1], bgr[0])

    def getDominantColor(self, img, ix, fx, iy, fy):
        ix = int(ix)
        fx = int(fx)
        iy = int(iy)
        fy = int(fy)
        average_color = [
            img[iy:fy, ix:fx, i].mean() for i in range(img.shape[-1])
        ]
        arr = np.float32(img)
        pixels = arr.reshape((-1, 3))

        # kmeans clustering
        _, labels, centroids = cv2.kmeans(pixels, self.n_colors, None,
                                          self.criteria, 10, self.flags)

        palette = np.uint8(centroids)
        quantized = palette[labels.flatten()]
        quantized = quantized.reshape(img.shape)

        dominant_color = palette[np.argmax(itemfreq(labels)[:, -1])]

        return dominant_color

    def initializeCamera(self):
        # open first webcam available
        self.video = cv2.VideoCapture(0)
        if not self.video.isOpened():
            self.video.open()

        #set the resolution from config
        self.video.set(cv2.CAP_PROP_FRAME_WIDTH, self._captureWidth)
        self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, self._captureHeight)

    def getROI(self, event, x, y, flags, frame):
        framecopy = frame.copy()
        if event == cv2.EVENT_LBUTTONDOWN:
            self.clicking = True
            self.ix, self.iy = x, y

        elif event == cv2.EVENT_MOUSEMOVE:
            if self.clicking:
                cv2.rectangle(framecopy, (self.ix, self.iy), (x, y),
                              (0, 255, 0), -1)
                cv2.imshow('Set ROI', framecopy)

        elif event == cv2.EVENT_LBUTTONUP:
            self.clicking = False
            cv2.rectangle(framecopy, (self.ix, self.iy), (x, y), (0, 255, 0),
                          -1)
            cv2.imshow('Set ROI', framecopy)
            self.fx, self.fy = x, y
            self.boundSet = True

    def processQueue(self):
        self.logger.info(
            "Starting to watch collection point inbound message queue")
        while self.alive:
            if (self.inQueue.empty() == False):
                self.logger.info("Queue size is %s" % self.inQueue.qsize())
                try:
                    message = self.inQueue.get(block=False, timeout=1)
                    if message is not None:
                        if message == "SHUTDOWN":
                            self.logger.info("SHUTDOWN command handled on %s" %
                                             __name__)
                            self.shutdown()
                        else:
                            self.handleMessage(message)
                except Exception as e:
                    self.logger.error("Unable to read queue, error: %s " % e)
                    self.shutdown()
                self.logger.info("Queue size is %s after" %
                                 self.inQueue.qsize())
            else:
                time.sleep(.25)

    def handleMessage(self, message):
        self.logger.info("handleMessage not implemented!")

    def putCPMessage(self, data, type):
        if type == "off":
            # Send off message
            self.logger.info('Sending off message')
            msg = CollectionPointEvent(self._collectionPointId,
                                       self._collectionPointType, 'off', None)
            self.outQueue.put(msg)

        elif type == "light-edges":
            # Reset collection start and now needs needs reset
            collectionStart = time.time()

            self.logger.info('Sending light message')
            msg = CollectionPointEvent(self._collectionPointId,
                                       self._collectionPointType,
                                       'light-edges', data)
            self.outQueue.put(msg)

        elif type == "light-dominant":
            # Reset collection start and now needs needs reset
            collectionStart = time.time()

            self.logger.info('Sending light message')
            msg = CollectionPointEvent(self._collectionPointId,
                                       self._collectionPointType,
                                       'light-dominant', data)
            self.outQueue.put(msg)

    def shutdown(self):
        self.alive = False
        self.logger.info("Shutting down")
        # self.putCPMessage(None, 'off')
        cv2.destroyAllWindows()
        time.sleep(1)
        self.exit = True

    def get_opencv_version(self):
        import cv2 as lib
        return lib.__version__

    def check_opencv_version(self, major, lib=None):
        # if the supplied library is None, import OpenCV
        if lib is None:
            import cv2 as lib

        # return whether or not the current OpenCV version matches the
        # major version number
        return lib.__version__.startswith(major)
Exemplo n.º 6
0
class CollectionPoint(Thread):
    """ Sample class to show basic structure of collecting data and passing it to communication channels """

    def __init__(self,baseConfig,pOutBoundEventQueue, pInBoundEventQueue, loggingQueue):
        # Standard initialization that most collection points would do
        super(CollectionPoint, self).__init__()
        self.alive = True
        self.config = baseConfig
        self.outBoundEventQueue = pOutBoundEventQueue
        self.inBoundEventQueue = pInBoundEventQueue
        self.logger = ThreadsafeLogger(loggingQueue,__name__)

        # Initialize collection point specific variables
        self.video = None

        # Set constants from config
        self._collectionPointId = self.config['CollectionPointId']
        self._collectionPointType = self.config['CollectionPointType']
        self._testMode = self.config['TestMode']


        if not self.check_opencv_version("3.",cv2):
            self.logger.critical("open CV is the wrong version {0}.  We require version 3.x".format(self.get_opencv_version()))

    def run(self):
        """ Sample run function for a collection point class.
        Starting point for when the thread is start()'d from main.py
        Extend this to create your own, or understand how to perform specific actions.
        """

        # Start a thread to monitor the inbound queue
        self.threadProcessQueue = Thread(target=self.processQueue)
        self.threadProcessQueue.setDaemon(True)
        self.threadProcessQueue.start()

        # Load the OpenCV classifier to detect faces
        faceCascade = cv2.CascadeClassifier('./classifiers/haarcascades/haarcascade_frontalface_default.xml')

        tracker = cv2.Tracker_create("KCF")

        # Get first camera connected
        video = cv2.VideoCapture(0)
        if not video.isOpened():
            video.open()
        
        # Set resolution of capture
        video.set(cv2.CAP_PROP_FRAME_WIDTH, 1080)
        video.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)

        ok, frame = video.read()
        if not ok:
            self.logger.error('Cannot read video file')
            self.shutdown()

        while self.alive:
            # Read a new frame
            ok, frame = video.read()
            if not ok:
                self.logger.error('Cannot read video file')
                break

            # Convert to grayscale
            grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # Copy the frame to allow manipulation
            outputImage = frame.copy()

            # Detect faces
            faces = faceCascade.detectMultiScale(
                grayFrame,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(5, 5)
            )

            self.logger.info("Found " + str(len(faces)) + " faces")

            # Draw a rectangle around each face
            for (x, y, w, h) in faces:
                cv2.rectangle(outputImage, (x, y), (x+w, y+h), (0, 255, 0), 2)

            if self._testMode:
                if len(faces) > 0:
                    msg = CollectionPointEvent(self._collectionPointId,self._collectionPointType,('Found {0} faces'.format(len(faces))))
                    self.outBoundEventQueue.put(msg)

            # Display the image
            cv2.imshow("Faces found", outputImage)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27: #esc key
                self.shutdown()
                break

        video.release()

    def shutdown(self):
        self.logger.info("Shutting down collection point")
        cv2.destroyAllWindows()
        self.threadProcessQueue.join()
        self.alive = False
        time.sleep(1)
        self.exit = True

    def get_opencv_version(self):
        import cv2 as lib
        return lib.__version__

    def check_opencv_version(self,major, lib=None):
        # if the supplied library is None, import OpenCV
        if lib is None:
            import cv2 as lib

        # return whether or not the current OpenCV version matches the
        # major version number
        return lib.__version__.startswith(major)
Exemplo n.º 7
0
class IdsWrapper(object):
    def __init__(self, loggingQueue, moduleConfig):
        """ Create a new IdsWrapper instance. 
        IdsWrapper uses the Windows IDS DLL, wraps C calls in Python.
        Tested using the IDS XS 2.0 USB camera
        """

        self.config = moduleConfig
        self.isOpen = False
        self.width = 0
        self.height = 0
        self.bitspixels = 0
        self.py_width = 0
        self.py_height = 0
        self.py_bitspixel = 0
        self.cam = None
        self.pcImgMem = c_char_p() #create placeholder for image memory
        self.pid=c_int()

        # Setup logger
        self.logger = ThreadsafeLogger(loggingQueue, __name__)
        
        # Load the correct dll
        try:
            if architecture()[0] == '64bit':
                self.logger.info('Using IDS camera with 64-bit architecture')
                self.uEyeDll = cdll.LoadLibrary("C:/Windows/System32/uEye_api_64.dll")
            else:
                self.logger.info('Using IDS camera with 32-bit architecture')
                self.uEyeDll = cdll.LoadLibrary("C:/Windows/System32/uEye_api.dll")
        except Exception as e:
            self.logger.error('Failed to load IDS DLL: %s . Are you sure you are using an IDS camera?'%e)

    def isOpened(self):
        """ Return camera open status"""
        return self.isOpen
    
    def set(self, key, value):
        """ Set the py_width, py_height or py_bitspixel properties """
        if key == 'py_width':
            self.py_width = value
        elif key == 'py_height':
            self.py_height = value
        else:
            self.py_bitspixel = value
    
    def allocateImageMemory(self):
        """ Wrapped call to allocate image memory """
        ret=self.uEyeDll.is_AllocImageMem(self.cam, self.width, self.height, self.bitspixel, byref(self.pcImgMem), byref(self.pid))
        if ret == IS_SUCCESS:
            self.logger.info("Successfully allocated image memory")
        else:
            self.logger.error('Memory allocation failed, no camera with value ' + str(self.cam.value) + ' | Error code: ' + str(ret))
            return

    def setImageMemory(self):
        """ Wrapped call to set image memory """
        ret = self.uEyeDll.is_SetImageMem(self.cam, self.pcImgMem, self.pid)
        if ret == IS_SUCCESS:
            self.logger.info("Successfully set image memory")
        else:
            self.logger.error("Failed to set image memory; error code: " + str(ret))
            return

    def beginCapture(self):
        """ Wrapped call to begin capture """
        ret = self.uEyeDll.is_CaptureVideo (self.cam, c_long(IS_DONT_WAIT))  
        if ret == IS_SUCCESS:
            self.logger.info("Successfully began video capture")
        else:
            self.logger.error("Failed to begin video capture; error code: " + str(ret))
            return

    def initImageData(self):
        """ Initialize the ImageData numpy array """
        self.ImageData = np.ones((self.py_height,self.py_width),dtype=np.uint8)

    def setCTypes(self):
        """ Set C Types for width, height, and bitspixel properties"""
        self.width = c_int(self.py_width)
        self.height = c_int(self.py_height) 
        self.bitspixel = c_int(self.py_bitspixel)

    def start(self):
        """ Start capturing frames on another thread as a daemon """
        self.updateThread = Thread(target=self.update)
        self.updateThread.setDaemon(True)
        self.updateThread.start()
        return self

    def initializeCamera(self):
        """ Wrapped call to initialize camera """
        ret = self.uEyeDll.is_InitCamera(byref(self.cam), self.hWnd)
        if ret == IS_SUCCESS:
            self.logger.info("Successfully initialized camera")
        else:
            self.logger.error("Failed to initialize camera; error code: " + str(ret))
            return

    def enableAutoExit(self):
        """ Wrapped call to allow allocated memory to be dropped on exit. """
        ret = self.uEyeDll.is_EnableAutoExit (self.cam, c_uint(1))
        if ret == IS_SUCCESS:
            self.logger.info("Successfully enabled auto exit")
        else:
            self.logger.error("Failed to enable auto exit; error code: " + str(ret))
            return

    def setDisplayMode(self):
        """ Wrapped call to set display mode to DIB """
        ret = self.uEyeDll.is_SetDisplayMode (self.cam, c_int(IS_SET_DM_DIB))
        if ret == IS_SUCCESS:
            self.logger.info("Successfully set camera to DIB mode")
        else:
            self.logger.error("Failed to set camera mode; error code: " + str(ret))
            return

    def setColorMode(self):
        """ Wrapped call to set camera color capture mode """
        ret = self.uEyeDll.is_SetColorMode(self.cam, c_int(IS_CM_SENSOR_RAW8))
        if ret == IS_SUCCESS:
            self.logger.info("Successfully set color mode")
        else:
            self.logger.error("Failed to set color mode; error code: " + str(ret))
            return
    
    def setCompressionFactor(self):
        """ Wrapped call to set image compression factor.
        Required for long USB lengths when bandwidth is constrained, lowers quality.
        """
        ret = self.uEyeDll.is_DeviceFeature(self.cam, IS_DEVICE_FEATURE_CMD_SET_JPEG_COMPRESSION, byref(c_int(self.config['CompressionFactor'])), c_uint(INT_BYTE_SIZE));
        if ret == IS_SUCCESS:
            self.logger.info("Successfully set compression factor to: " + str(self.config['CompressionFactor']))
        else:
            self.logger.error("Failed to set compression factor; error code: " + str(ret))
            return

    def setPixelClock(self):
        """ Wrapped call to set pixel clock.
        Required for long USB lengths when bandwidth is constrained
        Lowers frame rate and increases motion blur. 
        """
        ret = self.uEyeDll.is_PixelClock(self.cam, IS_PIXELCLOCK_CMD_SET, byref(c_uint(self.config['PixelClock'])), c_uint(INT_BYTE_SIZE));
        if ret == IS_SUCCESS:
            self.logger.info("Successfully set pixel clock to: " + str(self.config['PixelClock']))
        else:
            self.logger.error("Failed to set pixel clock; error code: " + str(ret))
            return

    def setTrigger(self):
        """ Wrapped call to set trigger type to software trigger. """
        ret = self.uEyeDll.is_SetExternalTrigger(self.cam, c_uint(IS_SET_TRIGGER_SOFTWARE))
        if ret == IS_SUCCESS:
            self.logger.info("Successfully set software trigger")
        else:
            self.logger.error("Failed to set software trigger; error code: " + str(ret))
            return

    def setImageProfile(self):
        """ Wrapped call to set image format.
        Sets resolution of the capture to UXGA. More modes available in idsConsts.py.
        """
        ret = self.uEyeDll.is_ImageFormat(self.cam, c_uint(IMGFRMT_CMD_SET_FORMAT), byref(c_int(UXGA)), c_uint(INT_BYTE_SIZE))
        if ret == IS_SUCCESS:
            self.logger.info("Successfully set camera image profile")
        else:
            self.logger.error("Failed to set camera image profile; error code: " + str(ret))
            return

    def open(self):
        """ Open connection to IDS camera, set various modes. """
        self.cam = c_uint32(0)
        self.hWnd = c_voidp()
        
        self.initializeCamera()
        self.enableAutoExit()
        self.setDisplayMode()
        self.setColorMode()
        self.setCompressionFactor()
        self.setPixelClock()
        self.setTrigger()
        self.setImageProfile()

        # Declare video open
        self.isOpen = True

        self.logger.info('Successfully opened camera')
    
    def update(self):
        """ Loop to update frames and copy to ImageData variable. """
        while True:
            if not self.isOpen:
                return
            self.uEyeDll.is_CopyImageMem (self.cam, self.pcImgMem, self.pid, self.ImageData.ctypes.data_as(c_char_p))
    
    def read(self):
        """ Read frame currently available in ImageData variable. """
        try:
            return True, self.ImageData
        except Exception as e:
            self.logger.error('Error getting image data: %r'% e)
            return False, None


    def exit(self):
        """ Close camera down, release memory. """
        self.uEyeDll.is_ExitCamera(self.cam)
        self.isOpen = False
        self.logger.info('Closing wrapper and camera')
        return
            
Exemplo n.º 8
0
class CamCollectionPoint(Thread):
    def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue,
                 loggingQueue):
        """ Initialize new CamCollectionPoint instance.
        Setup queues, variables, configs, predictionEngines, constants and loggers.
        """

        super(CamCollectionPoint, self).__init__()

        if not self.check_opencv_version("3.", cv2):
            print(
                "OpenCV version {0} is not supported. Use 3.x for best results."
                .format(self.get_opencv_version()))

        # Queues
        self.outQueue = pOutBoundQueue  #messages from this thread to the main process
        self.inQueue = pInBoundQueue
        self.loggingQueue = loggingQueue

        # Variables
        self.video = None
        self.needsReset = False
        self.needsResetMux = False
        self.alive = True

        # Configs
        self.moduleConfig = camConfigLoader.load(
            self.loggingQueue)  #Get the config for this module
        self.config = baseConfig

        # Prediction engine
        self.imagePredictionEngine = AzureImagePrediction(
            moduleConfig=self.moduleConfig, loggingQueue=loggingQueue)

        # Constants
        self._useIdsCamera = self.moduleConfig['UseIdsCamera']
        self._minFaceWidth = self.moduleConfig['MinFaceWidth']
        self._minFaceHeight = self.moduleConfig['MinFaceHeight']
        self._minNearestNeighbors = self.moduleConfig['MinNearestNeighbors']
        self._maximumPeople = self.moduleConfig['MaximumPeople']
        self._facePixelBuffer = self.moduleConfig['FacePixelBuffer']
        self._collectionThreshold = self.moduleConfig['CollectionThreshold']
        self._showVideoStream = self.moduleConfig['ShowVideoStream']
        self._sendBlobs = self.moduleConfig['SendBlobs']
        self._blobWidth = self.moduleConfig['BlobWidth']
        self._blobHeight = self.moduleConfig['BlobHeight']
        self._captureWidth = self.moduleConfig['CaptureWidth']
        self._captureHeight = self.moduleConfig['CaptureHeight']
        self._bitsPerPixel = self.moduleConfig['BitsPerPixel']
        self._resetEventTimer = self.moduleConfig['ResetEventTimer']

        self._collectionPointType = self.config['CollectionPointType']
        self._collectionPointId = self.config['CollectionPointId']

        # Logger
        self.logger = ThreadsafeLogger(loggingQueue, __name__)

    def run(self):
        """ Main thread method, run when the thread's start() function is called.
        Controls flow of detected faces and the MultiTracker. 
        Determines when to send 'reset' events to clients and when to send 'found' events. 
        This function contains various comments along the way to help understand the flow.
        You can use this flow, extend it, or build your own.
        """

        # Monitor inbound queue on own thread
        self.threadProcessQueue = Thread(target=self.processQueue)
        self.threadProcessQueue.setDaemon(True)
        self.threadProcessQueue.start()

        self.initializeCamera()

        # Load the OpenCV Haar classifier to detect faces
        curdir = os.path.dirname(__file__)
        cascadePath = os.path.join(curdir, 'classifiers', 'haarcascades',
                                   'haarcascade_frontalface_default.xml')
        faceCascade = cv2.CascadeClassifier(cascadePath)

        self.mmTracker = MultiTracker("KCF", self.moduleConfig,
                                      self.loggingQueue)

        # Setup timer for FPS calculations
        start = time.time()
        frameCounter = 1
        fps = 0

        # Start timer for collection events
        self.collectionStart = time.time()

        ok, frame = self.video.read()
        if not ok:
            self.logger.error('Cannot read video file')
            self.shutdown()

        while self.alive:
            ok, frame = self.video.read()
            if not ok:
                self.logger.error('Error while reading frame')
                break

            # Image alts
            if self._useIdsCamera:
                grayFrame = frame.copy()
                outputImage = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
            else:
                outputImage = frame.copy()
                grayFrame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

            # Detect faces
            faces = faceCascade.detectMultiScale(
                grayFrame,
                scaleFactor=1.1,
                minNeighbors=self._minNearestNeighbors,
                minSize=(self._minFaceWidth, self._minFaceHeight))

            # If no faces in frame, clear tracker and start reset timer
            if len(faces
                   ) == 0 or self.mmTracker.length() > self._maximumPeople:
                self.mmTracker.clear()
                self.startReset()

            # If there are trackers, update
            if self.mmTracker.length() > 0:
                ok, bboxes, failed = self.mmTracker.update(outputImage)
                if failed:
                    self.logger.error('Update trackers failed on: %s' %
                                      ''.join(str(s) for s in failed))

            for (x, y, w, h) in faces:
                # If faces are detected, engagement exists, do not reset
                self.needsReset = False

                # Optionally add buffer to face, can improve tracking/classification accuracy
                if self._facePixelBuffer > 0:
                    (x, y, w,
                     h) = self.applyFaceBuffer(x, y, w, h,
                                               self._facePixelBuffer,
                                               outputImage.shape)

                # Get region of interest
                roi_gray = grayFrame[y:y + h, x:x + w]
                roi_color = outputImage[y:y + h, x:x + w]

                # If the tracker is valid and doesn't already exist, add it
                if self.validTracker(x, y, w, h):
                    self.logger.info('Adding tracker')
                    ok = self.mmTracker.add(bbox={
                        'x': x,
                        'y': y,
                        'w': w,
                        'h': h
                    },
                                            frame=outputImage)

                # Draw box around face
                if self._showVideoStream:
                    cv2.rectangle(outputImage, (x, y), (x + w, y + h),
                                  (0, 255, 0), 2)

            # If the time since last collection is more than the set threshold
            if not self.needsReset or (time.time() - self.collectionStart >
                                       self._collectionThreshold):
                # Check if the focal face has changed
                check, face = self.mmTracker.checkFocus()
                if check:
                    predictions = self.getPredictions(grayFrame, face)
                    if predictions:
                        self.putCPMessage(data={
                            'detectedTime':
                            datetime.now().isoformat('T'),
                            'predictions':
                            predictions
                        },
                                          type="update")

            frameCounter += 1
            elapsed = time.time() - start
            fps = frameCounter / max(abs(elapsed), 0.0001)
            if frameCounter > sys.maxsize:
                start = time.time()
                frameCounter = 1

            if self._showVideoStream:
                cv2.putText(outputImage, "%s FPS" % fps, (20, 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
                            cv2.LINE_AA)
                cv2.imshow("Faces found", outputImage)
                cv2.waitKey(1)

            if self._sendBlobs and frameCounter % 6 == 0:
                self.putCPMessage(data={
                    'imageArr':
                    cv2.resize(outputImage,
                               (self._blobWidth, self._blobHeight)),
                    'time':
                    datetime.now().isoformat('T')
                },
                                  type="blob")

    def getPredictions(self, grayFrame, face):
        """ Send face to predictionEngine as JPEG.
        Return predictions array or false if no face is found. 
        """

        faceArr = grayFrame[int(face[1]):int(face[1] + face[3]),
                            int(face[0]):int(face[0] + face[2])]
        img = Image.fromarray(faceArr)
        buff = io.BytesIO()
        img.save(buff, format="JPEG")

        try:
            predictions = self.imagePredictionEngine.getPrediction(
                buff.getvalue())
        except Exception as e:
            predictions = False

        return predictions

    def validTracker(self, x, y, w, h):
        """ Check if the coordinates are a newly detected face or already present in MultiTracker.
        Only accepts new tracker candidates every _collectionThreshold seconds.
        Return true if the object in those coordinates should be tracked.
        """
        if not self.needsReset or (time.time() - self.collectionStart >
                                   self._collectionThreshold):
            if (self.mmTracker.length() == 0
                    or not self.mmTracker.contains(bbox={
                        'x': x,
                        'y': y,
                        'w': w,
                        'h': h
                    })):
                return True
        return False

    def startReset(self):
        """Start a timer from reset event.
        If timer completes and the reset event should still be sent, send it.
        """
        if self.needsResetMux:
            self.needsReset = True
            self.needsResetMux = False
            self.resetStart = time.time()

        if self.needsReset:
            if (time.time() - self.resetStart
                ) > 10:  # 10 seconds after last face detected
                self.putCPMessage(data=None, type="reset")
                self.needsReset = False

    def applyFaceBuffer(self, x, y, w, h, b, shape):
        x = x - b if x - b >= 0 else 0
        y = y - b if y - b >= 0 else 0
        w = w + b if w + b <= shape[1] else shape[1]
        h = h + b if h + b <= shape[0] else shape[0]
        return (x, y, w, h)

    def initializeCamera(self):
        # Using IDS camera
        if self._useIdsCamera:
            self.logger.info("Using IDS Camera")
            self.wrapper = IdsWrapper(self.loggingQueue, self.moduleConfig)
            if not (self.wrapper.isOpened()):
                self.wrapper.open()
            self.wrapper.set('py_width', self._captureWidth)
            self.wrapper.set('py_height', self._captureHeight)
            self.wrapper.set('py_bitspixel', self._bitsPerPixel)

            # Convert values to ctypes, prep memory locations
            self.wrapper.setCTypes()

            self.wrapper.allocateImageMemory()
            self.wrapper.setImageMemory()
            self.wrapper.beginCapture()
            self.wrapper.initImageData()

            # Start video update thread
            self.video = self.wrapper.start()

        # Not using IDS camera
        else:
            # open first webcam available
            self.video = cv2.VideoCapture(0)
            if not (self.video.isOpened()):
                self.video.open()

            #set the resolution from config
            self.video.set(cv2.CAP_PROP_FRAME_WIDTH, self._captureWidth)
            self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, self._captureHeight)

    def processQueue(self):
        self.logger.info(
            "Starting to watch collection point inbound message queue")
        while self.alive:
            if (self.inQueue.empty() == False):
                self.logger.info("Queue size is %s" % self.inQueue.qsize())
                try:
                    message = self.inQueue.get(block=False, timeout=1)
                    if message is not None:
                        if message == "SHUTDOWN":
                            self.logger.info("SHUTDOWN command handled on %s" %
                                             __name__)
                            self.shutdown()
                        else:
                            self.sendOutMessage(message)
                except Exception as e:
                    self.logger.error("Unable to read queue, error: %s " % e)
                    self.shutdown()
                self.logger.info("Queue size is %s after" %
                                 self.inQueue.qsize())
            else:
                time.sleep(.25)

    def putCPMessage(self, data, type):
        if type == "reset":
            # Send reset message
            self.logger.info('Sending reset message')
            msg = CollectionPointEvent(self._collectionPointId,
                                       self._collectionPointType, 'Reset mBox',
                                       None)
            self.outQueue.put(msg)

        elif type == "update":
            # Reset collection start and now needs needs reset
            collectionStart = time.time()
            self.needsResetMux = True

            self.logger.info('Sending found message')
            msg = CollectionPointEvent(self._collectionPointId,
                                       self._collectionPointType, 'Found face',
                                       data['predictions'])
            self.outQueue.put(msg)

        elif type == "blob":
            # Get numpy array as bytes
            img = Image.fromarray(data['imageArr'])
            buff = io.BytesIO()
            img.save(buff, format="JPEG")
            s = base64.b64encode(buff.getvalue()).decode("utf-8")

            eventExtraData = {}
            eventExtraData['imageData'] = s
            eventExtraData['dataType'] = 'image/jpeg'

            # Send found message
            # self.logger.info('Sending blob message')
            msg = CollectionPointEvent(self._collectionPointId,
                                       self._collectionPointType, 'blob',
                                       eventExtraData, True)
            self.outQueue.put(msg)

    def shutdown(self):
        self.alive = False
        self.logger.info("Shutting down")
        # self.outQueue.put("SHUTDOWN")
        if self._useIdsCamera and self.wrapper.isOpened():
            self.wrapper.exit()
        cv2.destroyAllWindows()
        # self.threadProcessQueue.join()
        time.sleep(1)
        self.exit = True

    #Custom methods for demo
    def get_opencv_version(self):
        import cv2 as lib
        return lib.__version__

    def check_opencv_version(self, major, lib=None):
        # if the supplied library is None, import OpenCV
        if lib is None:
            import cv2 as lib

        # return whether or not the current OpenCV version matches the
        # major version number
        return lib.__version__.startswith(major)