Beispiel #1
0
    def update(self, pl):
        if not os.path.exists(self.data_dirpath):
            try:
                os.mkdir(self.data_dirpath)
            except Exception as e:
                logger.warn('Failed to create {}'.format(self.data_dirpath))
                raise (e)

        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        if 'bytes' in payload_json.keys():
            img_k = 'bytes'
        elif 'image_blob' in payload_json.keys():
            img_k = 'image_blob'
        else:
            raise Exception('No image data in MQTT payload')
        jpg_bytes = payload.destringify_jpg(payload_json[img_k])
        payload_json.pop(img_k)
        logger.debug('inference text result: {}'.format(payload_json))

        img = payload.jpg2rgb(jpg_bytes)
        try:
            res = payload_json['annotations']
        except KeyError:
            res = [{
                'label': 'hello',
                'confidence': 0.42,
                'left': random.randint(50, 60),
                'top': random.randint(50, 60),
                'right': random.randint(300, 400),
                'bottom': random.randint(300, 400)
            }]
        self.frame = overlay_on_image(img, res)
Beispiel #2
0
def main():
    args = parse_args()

    comm_config = {
        'subscribe': {},
        'broker': {
            'address': args['broker_ip'],
            'port': args['broker_port']
        }
    }
    comm = Communicator(comm_config, debug=True)

    duration = lambda t: (datetime.now() - t).microseconds / 1000

    if args['mode'] == 'stream':
        counter = 0
        capture = cv2.VideoCapture(0)
        while True:
            status, im = capture.read()
            if (status is False):
                logger.warn('ERROR: Failure happened when reading frame')

            t = datetime.now()
            retval, jpg_bytes = cv2.imencode('.jpg', im)
            mqtt_payload = payload.serialize_jpg(jpg_bytes)
            comm.send('berrynet/data/rgbimage', mqtt_payload)
            logger.debug('send: {} ms'.format(duration(t)))

            time.sleep(1.0 / args['fps'])
    elif args['mode'] == 'file':
        # Prepare MQTT payload
        im = cv2.imread(args['filepath'])
        retval, jpg_bytes = cv2.imencode('.jpg', im)

        t = datetime.now()
        mqtt_payload = payload.serialize_jpg(jpg_bytes)
        logger.debug('payload: {} ms'.format(duration(t)))
        logger.debug('payload size: {}'.format(len(mqtt_payload)))

        # Client publishes payload
        t = datetime.now()
        comm.send('berrynet/data/rgbimage', mqtt_payload)
        logger.debug('mqtt.publish: {} ms'.format(duration(t)))
        logger.debug('publish at {}'.format(datetime.now().isoformat()))
    else:
        logger.error('User assigned unknown mode {}'.format(args['mode']))
Beispiel #3
0
    def send_result(self,
                    generalized_result,
                    topic='berrynet/engine/pipeline/result'):
        # NOTE: There are numpy float in pipeline output, so we use
        # tools.dump_json instead of payload.serialize_payload
        for r in generalized_result:
            try:
                c_id = r.get('channel', None) or r['meta']['channel_id']
                topic += '/{}'.format(c_id)
            except KeyError:
                logger.warn(
                    'No channel id found, set topic to {}'.format(topic))

            logger.debug('output topic: {}'.format(topic))
            publish.single(topic,
                           payload=tools.dump_json(generalized_result),
                           hostname=self.output_broker_ip)
Beispiel #4
0
    def update(self, pl):
        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        if 'bytes' in payload_json.keys():
            img_k = 'bytes'
        elif 'image_blob' in payload_json.keys():
            img_k = 'image_blob'
        else:
            raise Exception('No image data in MQTT payload')
        jpg_bytes = payload.destringify_jpg(payload_json[img_k])
        payload_json.pop(img_k)
        logger.debug('inference text result: {}'.format(payload_json))

        img = payload.jpg2rgb(jpg_bytes)

        if self.no_decoration:
            self.frame = img
        else:
            try:
                res = payload_json['annotations']
            except KeyError:
                res = [{
                    'label': 'hello',
                    'confidence': 0.42,
                    'left': random.randint(50, 60),
                    'top': random.randint(50, 60),
                    'right': random.randint(300, 400),
                    'bottom': random.randint(300, 400)
                }]
            self.frame = overlay_on_image(img, res)

        # Save frames for analysis or debugging
        if self.debug and self.save_frame:
            if not os.path.exists(self.data_dirpath):
                try:
                    os.mkdir(self.data_dirpath)
                except Exception as e:
                    logger.warn('Failed to create {}'.format(
                        self.data_dirpath))
                    raise (e)

            timestamp = datetime.now().isoformat()
            with open(pjoin(self.data_dirpath, timestamp + '.jpg'), 'wb') as f:
                f.write(jpg_bytes)
            with open(pjoin(self.data_dirpath, timestamp + '.json'), 'w') as f:
                f.write(json.dumps(payload_json, indent=4))
    def update(self, pl):
        if not os.path.exists(self.data_dirpath):
            try:
                os.mkdir(self.data_dirpath)
            except Exception as e:
                logger.warn('Failed to create {}'.format(self.data_dirpath))
                raise (e)

        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(payload_json['bytes'])
        payload_json.pop('bytes')
        logger.debug('inference text result: {}'.format(payload_json))

        timestamp = datetime.now().isoformat()
        with open(pjoin(self.data_dirpath, timestamp + '.jpg'), 'wb') as f:
            f.write(jpg_bytes)
        with open(pjoin(self.data_dirpath, timestamp + '.json'), 'w') as f:
            f.write(json.dumps(payload_json, indent=4))
Beispiel #6
0
    def update(self, pl):
        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        if self.pipeline_compatible:
            b64img_key = 'image_blob'
        else:
            b64img_key = 'bytes'
        jpg_bytes = payload.destringify_jpg(payload_json[b64img_key])
        payload_json.pop(b64img_key)
        logger.debug('inference text result: {}'.format(payload_json))

        match_target_label = self.find_target_label(self.target_label,
                                                    payload_json)

        logger.debug('Find target label {0}: {1}'.format(
            self.target_label, match_target_label))

        if match_target_label:
            timestamp = datetime.now().isoformat()
            notification_image = pjoin('/tmp', timestamp + '.jpg')
            notification_text = pjoin('/tmp', timestamp + '.json')
            with open(notification_image, 'wb') as f:
                f.write(jpg_bytes)
            with open(notification_text, 'w') as f:
                f.write(json.dumps(payload_json, indent=4))

            try:
                send_email_text(self.email['sender_address'],
                                self.email['sender_password'],
                                self.email['receiver_address'],
                                body=('Target label {} is found. '
                                      'Please check the attachments.'
                                      ''.format(self.target_label)),
                                subject='BerryNet mail client notification',
                                attachments=set(
                                    [notification_image, notification_text]))
            except Exception as e:
                logger.warn(e)

            os.remove(notification_image)
            os.remove(notification_text)
        else:
            # target label is not in generalized result, do nothing
            pass
Beispiel #7
0
    def update(self, pl):
        if not os.path.exists(self.data_dirpath):
            try:
                os.mkdir(self.data_dirpath)
            except Exception as e:
                logger.warn('Failed to create {}'.format(self.data_dirpath))
                raise (e)

        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        #payload_json = payload_json[0]
        if 'bytes' in payload_json.keys():
            img_k = 'bytes'
        else:
            raise Exception('No image data in MQTT payload')
        jpg_bytes = payload.destringify_jpg(payload_json[img_k])
        payload_json.pop(img_k)
        #logger.debug('inference text result: {}'.format(payload_json))

        img = payload.jpg2rgb(jpg_bytes)
        self.frame = img
Beispiel #8
0
    def snapshot(self, pl):
        '''Send camera snapshot.

        The functionality is the same as using camera client in file mode.

        The difference is that snapshot client retrieves image from camera
        instead of given filepath.
        '''
        duration = lambda t: (datetime.now() - t).microseconds / 1000

        # WORKAROUND: Prevent VideoCapture from buffering frames.
        #     VideoCapture will buffer frames automatically, and we need
        #     to find a way to disable it.
        self.capture = cv2.VideoCapture(0)
        status, im = self.capture.read()
        if (status is False):
            logger.warn('ERROR: Failure happened when reading frame')

        t = datetime.now()
        retval, jpg_bytes = cv2.imencode('.jpg', im)
        mqtt_payload = payload.serialize_jpg(jpg_bytes)
        self.comm.send('berrynet/data/rgbimage', mqtt_payload)
        logger.debug('send: {} ms'.format(duration(t)))
        self.capture.release()
Beispiel #9
0
def main():
    args = parse_args()
    if args['debug']:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    comm_config = {
        'subscribe': {},
        'broker': {
            'address': args['broker_ip'],
            'port': args['broker_port']
        }
    }
    comm = Communicator(comm_config, debug=True)

    duration = lambda t: (datetime.now() - t).microseconds / 1000

    if args['mode'] == 'stream':
        counter = 0
        # Check input stream source
        if args['stream_src'].isdigit():
            # source is a physically connected camera
            stream_source = '/dev/video{}'.format(int(args['stream_src']))
            capture = cv2.VideoCapture(int(args['stream_src']))
        else:
            # source is an IP camera
            stream_source = args['stream_src']
            capture = cv2.VideoCapture(args['stream_src'])
        cam_fps = capture.get(cv2.CAP_PROP_FPS)
        if cam_fps > 30 or cam_fps < 1:
            logger.warn(
                'Camera FPS is {} (>30 or <1). Set it to 30.'.format(cam_fps))
            cam_fps = 30
        out_fps = args['fps']
        interval = int(cam_fps / out_fps)

        # warmup
        #t_warmup_start = time.time()
        #t_warmup_now = time.time()
        #warmup_counter = 0
        #while t_warmup_now - t_warmup_start < 1:
        #    capture.read()
        #    warmup_counter += 1
        #    t_warmup_now = time.time()

        logger.debug('===== VideoCapture Information =====')
        logger.debug('Stream Source: {}'.format(stream_source))
        logger.debug('Camera FPS: {}'.format(cam_fps))
        logger.debug('Output FPS: {}'.format(out_fps))
        logger.debug('Interval: {}'.format(interval))
        #logger.debug('Warmup Counter: {}'.format(warmup_counter))
        logger.debug('====================================')

        while True:
            status, im = capture.read()
            if (status is False):
                logger.warn('ERROR: Failure happened when reading frame')

            # NOTE: Hard-coding rotation for AIKEA onboard camera.
            #       We will add parameter support in the future.
            im = tinycv.rotate_ccw_opencv(im)

            counter += 1
            if counter == interval:
                logger.debug('Drop frames: {}'.format(counter - 1))
                counter = 0

                # Open a window and display the ready-to-send frame.
                # This is useful for development and debugging.
                if args['display']:
                    cv2.imshow('Frame', im)
                    cv2.waitKey(1)

                t = datetime.now()
                #logger.debug('write frame to /tmp/output.jpg')
                #cv2.imwrite('/tmp/output.jpg', im)
                retval, jpg_bytes = cv2.imencode('.jpg', im)
                obj = {}
                obj['timestamp'] = datetime.now().isoformat()
                obj['bytes'] = payload.stringify_jpg(jpg_bytes)
                obj['meta'] = {
                    'roi': [{
                        'top': 50,
                        #'left': 341,
                        #'bottom': 500,
                        #'right': 682,
                        #'left': 640,
                        #'bottom': 980,
                        #'right': 1280,
                        'left': 10,
                        'bottom': 600,
                        'right': 600,
                        'overlap_threshold': 0.5
                    }]
                }
                logger.debug('timestamp: {}'.format(obj['timestamp']))
                logger.debug('bytes len: {}'.format(len(obj['bytes'])))
                logger.debug('meta: {}'.format(obj['meta']))
                mqtt_payload = payload.serialize_payload([obj])
                comm.send('berrynet/data/rgbimage', mqtt_payload)
                logger.debug('send: {} ms'.format(duration(t)))
            else:
                pass
    elif args['mode'] == 'file':
        # Prepare MQTT payload
        im = cv2.imread(args['filepath'])
        retval, jpg_bytes = cv2.imencode('.jpg', im)

        t = datetime.now()
        obj = {}
        obj['timestamp'] = datetime.now().isoformat()
        obj['bytes'] = payload.stringify_jpg(jpg_bytes)
        obj['meta'] = {
            'roi': [{
                'top': 50,
                'left': 10,
                'bottom': 600,
                'right': 600,
                'overlap_threshold': 0.5
            }]
        }
        mqtt_payload = payload.serialize_payload([obj])
        logger.debug('payload: {} ms'.format(duration(t)))
        logger.debug('payload size: {}'.format(len(mqtt_payload)))

        # Client publishes payload
        t = datetime.now()
        comm.send('berrynet/data/rgbimage', mqtt_payload)
        logger.debug('mqtt.publish: {} ms'.format(duration(t)))
        logger.debug('publish at {}'.format(datetime.now().isoformat()))
    else:
        logger.error('User assigned unknown mode {}'.format(args['mode']))
Beispiel #10
0
def main():
    args = parse_args()
    if args['debug']:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    comm_config = {
        'subscribe': {},
        'broker': {
            'address': args['broker_ip'],
            'port': args['broker_port']
        }
    }
    comm = Communicator(comm_config, debug=True)

    duration = lambda t: (datetime.now() - t).microseconds / 1000

    metadata = json.loads(args.get('meta', '{}'))

    if args['mode'] == 'stream':
        counter = 0
        fail_counter = 0

        # Check input stream source
        if args['stream_src'].isdigit():
            # source is a physically connected camera
            stream_source = int(args['stream_src'])
        else:
            # source is an IP camera
            stream_source = args['stream_src']
        capture = cv2.VideoCapture(stream_source)
        cam_fps = capture.get(cv2.CAP_PROP_FPS)
        if cam_fps > 30 or cam_fps < 1:
            logger.warn('Camera FPS is {} (>30 or <1). Set it to 30.'.format(cam_fps))
            cam_fps = 30
        out_fps = args['fps']
        interval = int(cam_fps / out_fps)

        # warmup
        #t_warmup_start = time.time()
        #t_warmup_now = time.time()
        #warmup_counter = 0
        #while t_warmup_now - t_warmup_start < 1:
        #    capture.read()
        #    warmup_counter += 1
        #    t_warmup_now = time.time()

        logger.debug('===== VideoCapture Information =====')
        if stream_source.isdigit():
            stream_source_uri = '/dev/video{}'.format(stream_source)
        else:
            stream_source_uri = stream_source
        logger.debug('Stream Source: {}'.format(stream_source_uri))
        logger.debug('Camera FPS: {}'.format(cam_fps))
        logger.debug('Output FPS: {}'.format(out_fps))
        logger.debug('Interval: {}'.format(interval))
        logger.debug('Send MQTT Topic: {}'.format(args['topic']))
        #logger.debug('Warmup Counter: {}'.format(warmup_counter))
        logger.debug('====================================')

        while True:
            status, im = capture.read()

            # To verify whether the input source is alive, you should use the
            # return value of capture.read(). It will not work by capturing
            # exception of a capture instance, or by checking the return value
            # of capture.isOpened().
            #
            # Two reasons:
            # 1. If a dead stream is alive again, capture will not notify
            #    that input source is dead.
            #
            # 2. If you check capture.isOpened(), it will keep retruning
            #    True if a stream is dead afterward. So you can not use
            #    the capture return value (capture status) to determine
            #    whether a stream is alive or not.
            if (status is True):
                counter += 1
                if counter == interval:
                    logger.debug('Drop frames: {}'.format(counter-1))
                    counter = 0

                    # Open a window and display the ready-to-send frame.
                    # This is useful for development and debugging.
                    if args['display']:
                        cv2.imshow('Frame', im)
                        cv2.waitKey(1)

                    t = datetime.now()
                    retval, jpg_bytes = cv2.imencode('.jpg', im)
                    mqtt_payload = payload.serialize_jpg(jpg_bytes, args['hash'], metadata)
                    comm.send(args['topic'], mqtt_payload)
                    logger.debug('send: {} ms'.format(duration(t)))
                else:
                    pass
            else:
                fail_counter += 1
                logger.critical('ERROR: Failure #{} happened when reading frame'.format(fail_counter))

                # Re-create capture.
                capture.release()
                logger.critical('Re-create a capture and reconnect to {} after 5s'.format(stream_source))
                time.sleep(5)
                capture = cv2.VideoCapture(stream_source)
    elif args['mode'] == 'file':
        # Prepare MQTT payload
        im = cv2.imread(args['filepath'])
        retval, jpg_bytes = cv2.imencode('.jpg', im)

        t = datetime.now()
        mqtt_payload = payload.serialize_jpg(jpg_bytes, args['hash'], metadata)
        logger.debug('payload: {} ms'.format(duration(t)))
        logger.debug('payload size: {}'.format(len(mqtt_payload)))

        # Client publishes payload
        t = datetime.now()
        comm.send(args['topic'], mqtt_payload)
        logger.debug('mqtt.publish: {} ms'.format(duration(t)))
        logger.debug('publish at {}'.format(datetime.now().isoformat()))
    else:
        logger.error('User assigned unknown mode {}'.format(args['mode']))
Beispiel #11
0
def main():
    args = parse_args()
    if args['debug']:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    comm_config = {
        'subscribe': {},
        'broker': {
            'address': args['broker_ip'],
            'port': args['broker_port']
        }
    }
    comm = Communicator(comm_config, debug=True)

    duration = lambda t: (datetime.now() - t).microseconds / 1000

    if args['mode'] == 'stream':
        counter = 0
        # Check input stream source
        if args['stream_src'].isdigit():
            # source is a physically connected camera
            stream_source = '/dev/video{}'.format(int(args['stream_src']))
            capture = cv2.VideoCapture(int(args['stream_src']))
        else:
            # source is an IP camera
            stream_source = args['stream_src']
            capture = cv2.VideoCapture(args['stream_src'])
        cam_fps = capture.get(cv2.CAP_PROP_FPS)
        if cam_fps > 30 or cam_fps < 1:
            logger.warn(
                'Camera FPS is {} (>30 or <1). Set it to 30.'.format(cam_fps))
            cam_fps = 30
        out_fps = args['fps']
        interval = int(cam_fps / out_fps)

        # warmup
        #t_warmup_start = time.time()
        #t_warmup_now = time.time()
        #warmup_counter = 0
        #while t_warmup_now - t_warmup_start < 1:
        #    capture.read()
        #    warmup_counter += 1
        #    t_warmup_now = time.time()

        logger.debug('===== VideoCapture Information =====')
        logger.debug('Stream Source: {}'.format(stream_source))
        logger.debug('Camera FPS: {}'.format(cam_fps))
        logger.debug('Output FPS: {}'.format(out_fps))
        logger.debug('Interval: {}'.format(interval))
        #logger.debug('Warmup Counter: {}'.format(warmup_counter))
        logger.debug('====================================')

        while True:
            status, im = capture.read()
            if (status is False):
                logger.warn('ERROR: Failure happened when reading frame')

            counter += 1
            if counter == interval:
                logger.debug('Drop frames: {}'.format(counter - 1))
                counter = 0

                # Open a window and display the ready-to-send frame.
                # This is useful for development and debugging.
                if args['display']:
                    cv2.imshow('Frame', im)
                    cv2.waitKey(1)

                t = datetime.now()
                retval, jpg_bytes = cv2.imencode('.jpg', im)
                mqtt_payload = payload.serialize_jpg(jpg_bytes)
                comm.send('berrynet/data/rgbimage', mqtt_payload)
                logger.debug('send: {} ms'.format(duration(t)))
            else:
                pass
    elif args['mode'] == 'file':
        # Prepare MQTT payload
        im = cv2.imread(args['filepath'])
        retval, jpg_bytes = cv2.imencode('.jpg', im)

        t = datetime.now()
        mqtt_payload = payload.serialize_jpg(jpg_bytes)
        logger.debug('payload: {} ms'.format(duration(t)))
        logger.debug('payload size: {}'.format(len(mqtt_payload)))

        # Client publishes payload
        t = datetime.now()
        comm.send('berrynet/data/rgbimage', mqtt_payload)
        logger.debug('mqtt.publish: {} ms'.format(duration(t)))
        logger.debug('publish at {}'.format(datetime.now().isoformat()))
    else:
        logger.error('User assigned unknown mode {}'.format(args['mode']))