예제 #1
0
class DashboardService(object):
    def __init__(self, service_name, comm_config):
        self.service_name = service_name
        self.comm_config = comm_config
        self.comm_config['subscribe'][
            'berrynet/engine/tensorflow/result'] = self.update
        self.comm_config['subscribe'][
            'berrynet/engine/mvclassification/result'] = self.update
        self.comm = Communicator(self.comm_config, debug=True)
        self.basedir = '/usr/local/berrynet/dashboard/www/freeboard'

    def update(self, pl):
        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(payload_json['bytes'])
        inference_result = [
            '{0}: {1}<br>'.format(anno['label'], anno['confidence'])
            for anno in payload_json['annotations']
        ]
        logger.debug('inference results: {}'.format(inference_result))

        with open(pjoin(self.basedir, 'snapshot.jpg'), 'wb') as f:
            f.write(jpg_bytes)
        self.comm.send('berrynet/dashboard/snapshot', 'snapshot.jpg')
        self.comm.send('berrynet/dashboard/inferenceResult',
                       json.dumps(inference_result))

    def run(self, args):
        """Infinite loop serving inference requests"""
        self.comm.run()
예제 #2
0
class DydaConfigUpdateClient(object):
    def __init__(self, comm_config, debug=False):
        self.comm_config = comm_config
        for topic, functor in self.comm_config['subscribe'].items():
            self.comm_config['subscribe'][topic] = self.handleResult
        self.comm = Communicator(self.comm_config, debug=True)

    def sendConfig(self, jsonPayload):
        self.comm.send(self.comm_config['publish'], jsonPayload)

    def handleResult(self, pl):
        try:
            payload_json = payload.deserialize_payload(pl.decode('utf-8'))
            print(payload_json)
            self.comm.stop_nb()
            sys.exit(0)
        except Exception as e:
            logger.info(e)

    def run(self, args):
        """Infinite loop serving inference requests"""
        with open(args['payload']) as f:
            payload = f.read()
            self.sendConfig(payload)
        self.comm.run()
예제 #3
0
class DydaConfigUpdateService(object):
    def __init__(self, comm_config, debug=False):
        self.comm_config = comm_config
        for topic, functor in self.comm_config['subscribe'].items():
            self.comm_config['subscribe'][topic] = self.handleConfig
        self.comm = Communicator(self.comm_config, debug=True)

    def sendConfig(self, jsonPayload):
        self.comm.send(self.comm_config['publish'], jsonPayload)
        
    def handleConfig(self, pl):
        payload_json = ""
        try:
            payload_json = payload.deserialize_payload(pl.decode('utf-8'))
            self.comm.send(self.comm_config['publish'], payload.serialize_payload(payload_json))
        except Exception as e:
            logger.info(e)

        # output config file
        with open(self.comm_config['configfile'], 'w') as configfile:
            configfile.write(payload.serialize_payload(payload_json))
            configfile.close()

        # restart service
        subprocess.run(["supervisorctl", "restart", "bnpipeline-bndyda"])
            
    def run(self, args):
        """Infinite loop serving inference requests"""
        self.comm.run()
예제 #4
0
def main():
    args = parse_args()

    comm_config = {
        'subscribe': {},
        'broker': {
            'address': args['broker_ip'],
            'port': args['broker_port']
        }
    }
    comm = Communicator(comm_config, debug=True)

    duration = lambda t: (datetime.now() - t).microseconds / 1000

    if args['mode'] == 'stream':
        counter = 0
        capture = cv2.VideoCapture(0)
        while True:
            status, im = capture.read()
            if (status is False):
                logger.warn('ERROR: Failure happened when reading frame')

            t = datetime.now()
            retval, jpg_bytes = cv2.imencode('.jpg', im)
            mqtt_payload = payload.serialize_jpg(jpg_bytes)
            comm.send('berrynet/data/rgbimage', mqtt_payload)
            logger.debug('send: {} ms'.format(duration(t)))

            time.sleep(1.0 / args['fps'])
    elif args['mode'] == 'file':
        # Prepare MQTT payload
        im = cv2.imread(args['filepath'])
        retval, jpg_bytes = cv2.imencode('.jpg', im)

        t = datetime.now()
        mqtt_payload = payload.serialize_jpg(jpg_bytes)
        logger.debug('payload: {} ms'.format(duration(t)))
        logger.debug('payload size: {}'.format(len(mqtt_payload)))

        # Client publishes payload
        t = datetime.now()
        comm.send('berrynet/data/rgbimage', mqtt_payload)
        logger.debug('mqtt.publish: {} ms'.format(duration(t)))
        logger.debug('publish at {}'.format(datetime.now().isoformat()))
    else:
        logger.error('User assigned unknown mode {}'.format(args['mode']))
예제 #5
0
class DydaConfigUpdateService(object):
    def __init__(self, comm_config, debug=False):
        self.comm_config = comm_config
        for topic, functor in self.comm_config['subscribe'].items():
            self.comm_config['subscribe'][topic] = self.handleConfig
        self.comm = Communicator(self.comm_config, debug=True)
        idlistConfig = configparser.ConfigParser()
        idlistConfig.read(self.comm_config['idlist'])
        self.idlist = idlistConfig["ID"]

    def sendConfig(self, jsonPayload):
        self.comm.send(self.comm_config['publish'], jsonPayload)

    def handleConfig(self, pl):
        payload_json = ""
        try:
            id = pl.decode('utf-8')
            if (id in self.idlist):
                configFilename = self.idlist[id]
                f = open(configFilename)
                payload_json = payload.deserialize_payload(f.read())
                self.sendConfig(payload.serialize_payload(payload_json))
            else:
                logger.warning("ID %s is not in idlist" % (id))
                return
        except Exception as e:
            logger.info(e)

        # output config file
        with open(self.comm_config['configfile'], 'w') as configfile:
            configfile.write(payload.serialize_payload(payload_json))
            configfile.close()

        # restart service
        subprocess.run(["supervisorctl", "restart", "bnpipeline-bndyda"])

    def run(self, args):
        """Infinite loop serving inference requests"""
        self.comm.run()
예제 #6
0
class SnapshotService(object):
    def __init__(self, comm_config):
        self.comm_config = comm_config
        for topic, functor in self.comm_config['subscribe'].items():
            self.comm_config['subscribe'][topic] = eval(functor)
        self.comm_config['subscribe'][
            'berrynet/trigger/controller/snapshot'] = self.snapshot
        self.comm = Communicator(self.comm_config, debug=True)

    def snapshot(self, pl):
        '''Send camera snapshot.

        The functionality is the same as using camera client in file mode.

        The difference is that snapshot client retrieves image from camera
        instead of given filepath.
        '''
        duration = lambda t: (datetime.now() - t).microseconds / 1000

        # WORKAROUND: Prevent VideoCapture from buffering frames.
        #     VideoCapture will buffer frames automatically, and we need
        #     to find a way to disable it.
        self.capture = cv2.VideoCapture(0)
        status, im = self.capture.read()
        if (status is False):
            logger.warn('ERROR: Failure happened when reading frame')

        t = datetime.now()
        retval, jpg_bytes = cv2.imencode('.jpg', im)
        mqtt_payload = payload.serialize_jpg(jpg_bytes)
        self.comm.send('berrynet/data/rgbimage', mqtt_payload)
        logger.debug('send: {} ms'.format(duration(t)))
        self.capture.release()

    def run(self, args):
        """Infinite loop serving inference requests"""
        self.comm.run()
예제 #7
0
class DataCollectorService(object):
    def __init__(self, comm_config, data_dirpath):
        self.comm_config = comm_config
        for topic, functor in self.comm_config['subscribe'].items():
            self.comm_config['subscribe'][topic] = eval(functor)
        #self.comm_config['subscribe']['berrynet/data/rgbimage'] = self.update
        self.comm_config['subscribe']['berrynet/engine/pipeline/result'] = self.save_pipeline_result
        self.comm = Communicator(self.comm_config, debug=True)
        self.data_dirpath = data_dirpath

    def update(self, pl):
        payload_json = payload.deserialize_payload(pl.decode('utf-8'))

        # update UI with the latest inference result
        self.ui.update(payload_json, 'bytes')

        if self.data_dirpath:
            if not os.path.exists(self.data_dirpath):
                try:
                    os.mkdir(self.data_dirpath)
                except Exception as e:
                    logger.warn('Failed to create {}'.format(self.data_dirpath))
                    raise(e)

            jpg_bytes = payload.destringify_jpg(payload_json['bytes'])
            payload_json.pop('bytes')
            logger.debug('inference text result: {}'.format(payload_json))

            timestamp = datetime.now().isoformat()
            with open(pjoin(self.data_dirpath, timestamp + '.jpg'), 'wb') as f:
                f.write(jpg_bytes)
            with open(pjoin(self.data_dirpath, timestamp + '.json'), 'w') as f:
                f.write(json.dumps(payload_json, indent=4))

    def save_pipeline_result(self, pl):
        payload_json = payload.deserialize_payload(pl.decode('utf-8'))

        # update UI with the latest inference result
        self.ui.update(payload_json, 'image_blob')

        if self.data_dirpath:
            if not os.path.exists(self.data_dirpath):
                try:
                    os.mkdir(self.data_dirpath)
                except Exception as e:
                    logger.warn('Failed to create {}'.format(self.data_dirpath))
                    raise(e)

            jpg_bytes = payload.destringify_jpg(payload_json['image_blob'])
            payload_json.pop('image_blob')
            logger.debug('inference text result: {}'.format(payload_json))

            timestamp = datetime.now().isoformat()
            with open(pjoin(self.data_dirpath, timestamp + '.jpg'), 'wb') as f:
                f.write(jpg_bytes)
            with open(pjoin(self.data_dirpath, timestamp + '.json'), 'w') as f:
                f.write(json.dumps(payload_json, indent=4))

    def send_snapshot_trigger(self):
        payload = {}
        payload['timestamp'] = datetime.now().isoformat()
        mqtt_payload = json.dumps(payload)
        self.comm.send('berrynet/trigger/controller/snapshot', mqtt_payload)

    def run(self, args):
        """Infinite loop serving inference requests"""
        self.comm.run()
예제 #8
0
def main():
    args = parse_args()
    if args['debug']:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    comm_config = {
        'subscribe': {},
        'broker': {
            'address': args['broker_ip'],
            'port': args['broker_port']
        }
    }
    comm = Communicator(comm_config, debug=True)

    duration = lambda t: (datetime.now() - t).microseconds / 1000

    if args['mode'] == 'stream':
        counter = 0
        # Check input stream source
        if args['stream_src'].isdigit():
            # source is a physically connected camera
            stream_source = '/dev/video{}'.format(int(args['stream_src']))
            capture = cv2.VideoCapture(int(args['stream_src']))
        else:
            # source is an IP camera
            stream_source = args['stream_src']
            capture = cv2.VideoCapture(args['stream_src'])
        cam_fps = capture.get(cv2.CAP_PROP_FPS)
        if cam_fps > 30 or cam_fps < 1:
            logger.warn(
                'Camera FPS is {} (>30 or <1). Set it to 30.'.format(cam_fps))
            cam_fps = 30
        out_fps = args['fps']
        interval = int(cam_fps / out_fps)

        # warmup
        #t_warmup_start = time.time()
        #t_warmup_now = time.time()
        #warmup_counter = 0
        #while t_warmup_now - t_warmup_start < 1:
        #    capture.read()
        #    warmup_counter += 1
        #    t_warmup_now = time.time()

        logger.debug('===== VideoCapture Information =====')
        logger.debug('Stream Source: {}'.format(stream_source))
        logger.debug('Camera FPS: {}'.format(cam_fps))
        logger.debug('Output FPS: {}'.format(out_fps))
        logger.debug('Interval: {}'.format(interval))
        #logger.debug('Warmup Counter: {}'.format(warmup_counter))
        logger.debug('====================================')

        while True:
            status, im = capture.read()
            if (status is False):
                logger.warn('ERROR: Failure happened when reading frame')

            # NOTE: Hard-coding rotation for AIKEA onboard camera.
            #       We will add parameter support in the future.
            im = tinycv.rotate_ccw_opencv(im)

            counter += 1
            if counter == interval:
                logger.debug('Drop frames: {}'.format(counter - 1))
                counter = 0

                # Open a window and display the ready-to-send frame.
                # This is useful for development and debugging.
                if args['display']:
                    cv2.imshow('Frame', im)
                    cv2.waitKey(1)

                t = datetime.now()
                #logger.debug('write frame to /tmp/output.jpg')
                #cv2.imwrite('/tmp/output.jpg', im)
                retval, jpg_bytes = cv2.imencode('.jpg', im)
                obj = {}
                obj['timestamp'] = datetime.now().isoformat()
                obj['bytes'] = payload.stringify_jpg(jpg_bytes)
                obj['meta'] = {
                    'roi': [{
                        'top': 50,
                        #'left': 341,
                        #'bottom': 500,
                        #'right': 682,
                        #'left': 640,
                        #'bottom': 980,
                        #'right': 1280,
                        'left': 10,
                        'bottom': 600,
                        'right': 600,
                        'overlap_threshold': 0.5
                    }]
                }
                logger.debug('timestamp: {}'.format(obj['timestamp']))
                logger.debug('bytes len: {}'.format(len(obj['bytes'])))
                logger.debug('meta: {}'.format(obj['meta']))
                mqtt_payload = payload.serialize_payload([obj])
                comm.send('berrynet/data/rgbimage', mqtt_payload)
                logger.debug('send: {} ms'.format(duration(t)))
            else:
                pass
    elif args['mode'] == 'file':
        # Prepare MQTT payload
        im = cv2.imread(args['filepath'])
        retval, jpg_bytes = cv2.imencode('.jpg', im)

        t = datetime.now()
        obj = {}
        obj['timestamp'] = datetime.now().isoformat()
        obj['bytes'] = payload.stringify_jpg(jpg_bytes)
        obj['meta'] = {
            'roi': [{
                'top': 50,
                'left': 10,
                'bottom': 600,
                'right': 600,
                'overlap_threshold': 0.5
            }]
        }
        mqtt_payload = payload.serialize_payload([obj])
        logger.debug('payload: {} ms'.format(duration(t)))
        logger.debug('payload size: {}'.format(len(mqtt_payload)))

        # Client publishes payload
        t = datetime.now()
        comm.send('berrynet/data/rgbimage', mqtt_payload)
        logger.debug('mqtt.publish: {} ms'.format(duration(t)))
        logger.debug('publish at {}'.format(datetime.now().isoformat()))
    else:
        logger.error('User assigned unknown mode {}'.format(args['mode']))
예제 #9
0
def main():
    args = parse_args()
    if args['debug']:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    comm_config = {
        'subscribe': {},
        'broker': {
            'address': args['broker_ip'],
            'port': args['broker_port']
        }
    }
    comm = Communicator(comm_config, debug=True)

    duration = lambda t: (datetime.now() - t).microseconds / 1000

    metadata = json.loads(args.get('meta', '{}'))

    if args['mode'] == 'stream':
        counter = 0
        fail_counter = 0

        # Check input stream source
        if args['stream_src'].isdigit():
            # source is a physically connected camera
            stream_source = int(args['stream_src'])
        else:
            # source is an IP camera
            stream_source = args['stream_src']
        capture = cv2.VideoCapture(stream_source)
        cam_fps = capture.get(cv2.CAP_PROP_FPS)
        if cam_fps > 30 or cam_fps < 1:
            logger.warn('Camera FPS is {} (>30 or <1). Set it to 30.'.format(cam_fps))
            cam_fps = 30
        out_fps = args['fps']
        interval = int(cam_fps / out_fps)

        # warmup
        #t_warmup_start = time.time()
        #t_warmup_now = time.time()
        #warmup_counter = 0
        #while t_warmup_now - t_warmup_start < 1:
        #    capture.read()
        #    warmup_counter += 1
        #    t_warmup_now = time.time()

        logger.debug('===== VideoCapture Information =====')
        if stream_source.isdigit():
            stream_source_uri = '/dev/video{}'.format(stream_source)
        else:
            stream_source_uri = stream_source
        logger.debug('Stream Source: {}'.format(stream_source_uri))
        logger.debug('Camera FPS: {}'.format(cam_fps))
        logger.debug('Output FPS: {}'.format(out_fps))
        logger.debug('Interval: {}'.format(interval))
        logger.debug('Send MQTT Topic: {}'.format(args['topic']))
        #logger.debug('Warmup Counter: {}'.format(warmup_counter))
        logger.debug('====================================')

        while True:
            status, im = capture.read()

            # To verify whether the input source is alive, you should use the
            # return value of capture.read(). It will not work by capturing
            # exception of a capture instance, or by checking the return value
            # of capture.isOpened().
            #
            # Two reasons:
            # 1. If a dead stream is alive again, capture will not notify
            #    that input source is dead.
            #
            # 2. If you check capture.isOpened(), it will keep retruning
            #    True if a stream is dead afterward. So you can not use
            #    the capture return value (capture status) to determine
            #    whether a stream is alive or not.
            if (status is True):
                counter += 1
                if counter == interval:
                    logger.debug('Drop frames: {}'.format(counter-1))
                    counter = 0

                    # Open a window and display the ready-to-send frame.
                    # This is useful for development and debugging.
                    if args['display']:
                        cv2.imshow('Frame', im)
                        cv2.waitKey(1)

                    t = datetime.now()
                    retval, jpg_bytes = cv2.imencode('.jpg', im)
                    mqtt_payload = payload.serialize_jpg(jpg_bytes, args['hash'], metadata)
                    comm.send(args['topic'], mqtt_payload)
                    logger.debug('send: {} ms'.format(duration(t)))
                else:
                    pass
            else:
                fail_counter += 1
                logger.critical('ERROR: Failure #{} happened when reading frame'.format(fail_counter))

                # Re-create capture.
                capture.release()
                logger.critical('Re-create a capture and reconnect to {} after 5s'.format(stream_source))
                time.sleep(5)
                capture = cv2.VideoCapture(stream_source)
    elif args['mode'] == 'file':
        # Prepare MQTT payload
        im = cv2.imread(args['filepath'])
        retval, jpg_bytes = cv2.imencode('.jpg', im)

        t = datetime.now()
        mqtt_payload = payload.serialize_jpg(jpg_bytes, args['hash'], metadata)
        logger.debug('payload: {} ms'.format(duration(t)))
        logger.debug('payload size: {}'.format(len(mqtt_payload)))

        # Client publishes payload
        t = datetime.now()
        comm.send(args['topic'], mqtt_payload)
        logger.debug('mqtt.publish: {} ms'.format(duration(t)))
        logger.debug('publish at {}'.format(datetime.now().isoformat()))
    else:
        logger.error('User assigned unknown mode {}'.format(args['mode']))
예제 #10
0
def main():
    args = parse_args()
    if args['debug']:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    comm_config = {
        'subscribe': {},
        'broker': {
            'address': args['broker_ip'],
            'port': args['broker_port']
        }
    }
    comm = Communicator(comm_config, debug=True)

    duration = lambda t: (datetime.now() - t).microseconds / 1000

    if args['mode'] == 'stream':
        counter = 0
        # Check input stream source
        if args['stream_src'].isdigit():
            # source is a physically connected camera
            stream_source = '/dev/video{}'.format(int(args['stream_src']))
            capture = cv2.VideoCapture(int(args['stream_src']))
        else:
            # source is an IP camera
            stream_source = args['stream_src']
            capture = cv2.VideoCapture(args['stream_src'])
        cam_fps = capture.get(cv2.CAP_PROP_FPS)
        if cam_fps > 30 or cam_fps < 1:
            logger.warn(
                'Camera FPS is {} (>30 or <1). Set it to 30.'.format(cam_fps))
            cam_fps = 30
        out_fps = args['fps']
        interval = int(cam_fps / out_fps)

        # warmup
        #t_warmup_start = time.time()
        #t_warmup_now = time.time()
        #warmup_counter = 0
        #while t_warmup_now - t_warmup_start < 1:
        #    capture.read()
        #    warmup_counter += 1
        #    t_warmup_now = time.time()

        logger.debug('===== VideoCapture Information =====')
        logger.debug('Stream Source: {}'.format(stream_source))
        logger.debug('Camera FPS: {}'.format(cam_fps))
        logger.debug('Output FPS: {}'.format(out_fps))
        logger.debug('Interval: {}'.format(interval))
        #logger.debug('Warmup Counter: {}'.format(warmup_counter))
        logger.debug('====================================')

        while True:
            status, im = capture.read()
            if (status is False):
                logger.warn('ERROR: Failure happened when reading frame')

            counter += 1
            if counter == interval:
                logger.debug('Drop frames: {}'.format(counter - 1))
                counter = 0

                # Open a window and display the ready-to-send frame.
                # This is useful for development and debugging.
                if args['display']:
                    cv2.imshow('Frame', im)
                    cv2.waitKey(1)

                t = datetime.now()
                retval, jpg_bytes = cv2.imencode('.jpg', im)
                mqtt_payload = payload.serialize_jpg(jpg_bytes)
                comm.send('berrynet/data/rgbimage', mqtt_payload)
                logger.debug('send: {} ms'.format(duration(t)))
            else:
                pass
    elif args['mode'] == 'file':
        # Prepare MQTT payload
        im = cv2.imread(args['filepath'])
        retval, jpg_bytes = cv2.imencode('.jpg', im)

        t = datetime.now()
        mqtt_payload = payload.serialize_jpg(jpg_bytes)
        logger.debug('payload: {} ms'.format(duration(t)))
        logger.debug('payload size: {}'.format(len(mqtt_payload)))

        # Client publishes payload
        t = datetime.now()
        comm.send('berrynet/data/rgbimage', mqtt_payload)
        logger.debug('mqtt.publish: {} ms'.format(duration(t)))
        logger.debug('publish at {}'.format(datetime.now().isoformat()))
    else:
        logger.error('User assigned unknown mode {}'.format(args['mode']))