예제 #1
0
    def single_shot(self, pl):
        """Capture an image from camera client and send to the client.
        """
        if self.shot is True:
            try:
                payload_json = payload.deserialize_payload(pl.decode('utf-8'))
                # WORKAROUND: Support customized camera client.
                #
                # Original camera client sends an `obj` in payload,
                # Customized camera client sends an `[obj]` in payload.
                #
                # We are unifying the rules. Before that, checking the type
                # as workaround.
                if type(payload_json) is list:
                    logger.debug('WORDAROUND: receive and unpack [obj]')
                    payload_json = payload_json[0]
                jpg_bytes = payload.destringify_jpg(payload_json["bytes"])
                jpg_file_descriptor = io.BytesIO(jpg_bytes)

                logger.info('Send single shot')
                self.updater.bot.send_photo(chat_id=self.single_shot_chat_id,
                                            photo=jpg_file_descriptor)
            except Exception as e:
                logger.info(e)

            self.shot = False
        else:
            logger.debug('Single shot is disabled, do nothing.')
예제 #2
0
    def inference(self, pl):
        duration = lambda t: (datetime.now() - t).microseconds / 1000

        t = datetime.now()
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])
        logger.debug('destringify_jpg: {} ms'.format(duration(t)))

        t = datetime.now()
        bgr_array = payload.jpg2bgr(jpg_bytes)
        logger.debug('jpg2bgr: {} ms'.format(duration(t)))

        t = datetime.now()
        image_data = self.engine.process_input(bgr_array)
        output = self.engine.inference(image_data)
        model_outputs = self.engine.process_output(output)
        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Detection takes {} ms'.format(duration(t)))

        #classes = self.engine.classes
        #labels = self.engine.labels

        logger.debug('draw = {}'.format(self.draw))
        if self.draw is False:
            self.result_hook(self.generalize_result(jpg_json, model_outputs))
        else:
            #self.result_hook(
            #    draw_bb(bgr_array,
            #            self.generalize_result(jpg_json, model_outputs),
            #            generate_class_color(class_num=classes),
            #            labels))
            self.result_hook(self.generalize_result(jpg_json, model_outputs))
예제 #3
0
    def inference(self, pl):
        duration = lambda t: (datetime.now() - t).microseconds / 1000

        t = datetime.now()
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])
        logger.debug('destringify_jpg: {} ms'.format(duration(t)))

        t = datetime.now()
        rgb_array = payload.jpg2rgb(jpg_bytes)
        logger.debug('jpg2rgb: {} ms'.format(duration(t)))

        t = datetime.now()
        image_data = self.engine.process_input(rgb_array)
        output = self.engine.inference(image_data)
        model_outputs = self.engine.process_output(output)
        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Classification takes {} ms'.format(duration(t)))

        #self.engine.cache_data('model_output', model_outputs)
        #self.engine.cache_data('model_output_filepath', output_name)
        #self.engine.save_cache()

        self.result_hook(self.generalize_result(jpg_json, model_outputs))
예제 #4
0
    def update(self, pl):
        try:
            payload_json = payload.deserialize_payload(pl.decode('utf-8'))
            jpg_bytes = payload.destringify_jpg(payload_json["bytes"])
            jpg_file_descriptor = io.BytesIO(jpg_bytes)

            for u in self.cameraHandlers:
                if self.updater is None:
                    continue

                if self.target_label == '':
                    if len(payload_json['annotations']) > 0:
                        logger.debug("Send photo to %s" % u)
                        self.updater.bot.send_photo(chat_id=u,
                                                    photo=jpg_file_descriptor)
                    else:
                        logger.debug("Does not detect any object, no action")
                elif self.match_target_label(self.target_label, payload_json):
                    logger.info("Send notification photo with result to %s" %
                                u)
                    self.updater.bot.send_photo(chat_id=u,
                                                photo=jpg_file_descriptor)
                else:
                    pass
        except Exception as e:
            logger.info(e)
예제 #5
0
    def update(self, data, imgkey='bytes'):
        '''
        Args:
            data: Inference result loaded from JSON object
        '''
        # Retrieve result image
        jpg_bytes = payload.destringify_jpg(data[imgkey])
        img = payload.jpg2rgb(jpg_bytes)

        # Retrieve result text, and update text area
        data.pop(imgkey)
        self.result.delete('0.0', tk.END)
        result_text = self.process_output(data)
        self.result.tag_add('counter', '1.0', '1.1')
        if 'safely' in result_text:
            self.result.tag_config('counter', foreground='blue')
        else:
            self.result.tag_config('counter', foreground='red')
        self.result.insert(tk.INSERT, result_text, 'counter')

        # update image area
        resized_img = Image.fromarray(img).resize((self.canvas_h, self.canvas_w))
        self.photo = ImageTk.PhotoImage(image=resized_img)
        win_w = self.photo.width() + self.result.winfo_width()
        win_h = self.photo.height() + self.snapshot_button.winfo_height()
        self.window.geometry('{}x{}'.format(win_w, win_h))
        self.canvas.itemconfig(self.image_id, image=self.photo)
예제 #6
0
    def update(self, pl):
        if not os.path.exists(self.data_dirpath):
            try:
                os.mkdir(self.data_dirpath)
            except Exception as e:
                logger.warn('Failed to create {}'.format(self.data_dirpath))
                raise (e)

        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        if 'bytes' in payload_json.keys():
            img_k = 'bytes'
        elif 'image_blob' in payload_json.keys():
            img_k = 'image_blob'
        else:
            raise Exception('No image data in MQTT payload')
        jpg_bytes = payload.destringify_jpg(payload_json[img_k])
        payload_json.pop(img_k)
        logger.debug('inference text result: {}'.format(payload_json))

        img = payload.jpg2rgb(jpg_bytes)
        try:
            res = payload_json['annotations']
        except KeyError:
            res = [{
                'label': 'hello',
                'confidence': 0.42,
                'left': random.randint(50, 60),
                'top': random.randint(50, 60),
                'right': random.randint(300, 400),
                'bottom': random.randint(300, 400)
            }]
        self.frame = overlay_on_image(img, res)
예제 #7
0
    def inference(self, pl):
        t0 = time.time()
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])
        logger.debug('destringify_jpg: {} ms'.format(time.time() - t0))

        t1 = time.time()
        bgr_array = payload.jpg2bgr(jpg_bytes)
        logger.debug('jpg2bgr: {} ms'.format(time.time() - t1))

        t2 = time.time()
        image_data = self.engine.process_input(bgr_array)
        output = self.engine.inference(image_data)
        model_outputs = self.engine.process_output(output)
        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Detection takes {} ms'.format(time.time() - t2))

        classes = self.engine.classes
        labels = self.engine.labels

        logger.debug('draw = {}'.format(self.draw))
        if self.draw is False:
            self.result_hook(self.generalize_result(jpg_json, model_outputs))
        else:
            self.result_hook(
                draw_bb(bgr_array,
                        self.generalize_result(jpg_json, model_outputs),
                        generate_class_color(class_num=classes), labels))
예제 #8
0
 def result_hook(self, generalized_result):
     gr = generalized_result
     jpg_bytes = payload.destringify_jpg(gr.pop('bytes'))
     logger.debug('generalized result (readable only): {}'.format(gr))
     with open('/tmp/mockup/{}.jpg'.format(gr['timestamp']), 'wb') as f:
         f.write(jpg_bytes)
     with open('/tmp/mockup/{}.json'.format(gr['timestamp']), 'w') as f:
         f.write(json.dumps(gr, indent=4))
예제 #9
0
    def inference(self, pl):
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])

        bgr_array = payload.jpg2bgr(jpg_bytes)

        image_data = self.engine.process_input(bgr_array)
        output = self.engine.inference(image_data)
        model_outputs = self.engine.process_output(output)

        self.result_hook(self.generalize_result(jpg_json, model_outputs))
예제 #10
0
    def update(self, pl):
        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(payload_json['bytes'])
        inference_result = [
            '{0}: {1}<br>'.format(anno['label'], anno['confidence'])
            for anno in payload_json['annotations']
        ]
        logger.debug('inference results: {}'.format(inference_result))

        with open(pjoin(self.basedir, 'snapshot.jpg'), 'wb') as f:
            f.write(jpg_bytes)
        self.comm.send('berrynet/dashboard/snapshot', 'snapshot.jpg')
        self.comm.send('berrynet/dashboard/inferenceResult',
                       json.dumps(inference_result))
예제 #11
0
    def update(self, pl):
        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        if 'bytes' in payload_json.keys():
            img_k = 'bytes'
        elif 'image_blob' in payload_json.keys():
            img_k = 'image_blob'
        else:
            raise Exception('No image data in MQTT payload')
        jpg_bytes = payload.destringify_jpg(payload_json[img_k])
        payload_json.pop(img_k)
        logger.debug('inference text result: {}'.format(payload_json))

        img = payload.jpg2rgb(jpg_bytes)

        if self.no_decoration:
            self.frame = img
        else:
            try:
                res = payload_json['annotations']
            except KeyError:
                res = [{
                    'label': 'hello',
                    'confidence': 0.42,
                    'left': random.randint(50, 60),
                    'top': random.randint(50, 60),
                    'right': random.randint(300, 400),
                    'bottom': random.randint(300, 400)
                }]
            self.frame = overlay_on_image(img, res)

        # Save frames for analysis or debugging
        if self.debug and self.save_frame:
            if not os.path.exists(self.data_dirpath):
                try:
                    os.mkdir(self.data_dirpath)
                except Exception as e:
                    logger.warn('Failed to create {}'.format(
                        self.data_dirpath))
                    raise (e)

            timestamp = datetime.now().isoformat()
            with open(pjoin(self.data_dirpath, timestamp + '.jpg'), 'wb') as f:
                f.write(jpg_bytes)
            with open(pjoin(self.data_dirpath, timestamp + '.json'), 'w') as f:
                f.write(json.dumps(payload_json, indent=4))
예제 #12
0
    def update(self, pl):
        if not os.path.exists(self.data_dirpath):
            try:
                os.mkdir(self.data_dirpath)
            except Exception as e:
                logger.warn('Failed to create {}'.format(self.data_dirpath))
                raise (e)

        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(payload_json['bytes'])
        payload_json.pop('bytes')
        logger.debug('inference text result: {}'.format(payload_json))

        timestamp = datetime.now().isoformat()
        with open(pjoin(self.data_dirpath, timestamp + '.jpg'), 'wb') as f:
            f.write(jpg_bytes)
        with open(pjoin(self.data_dirpath, timestamp + '.json'), 'w') as f:
            f.write(json.dumps(payload_json, indent=4))
예제 #13
0
    def update(self, data, imgkey='bytes'):
        '''
        Args:
            data: Inference result loaded from JSON object
        '''
        # update image area
        jpg_bytes = payload.destringify_jpg(data[imgkey])
        img = payload.jpg2rgb(jpg_bytes)

        self.photo = ImageTk.PhotoImage(image=Image.fromarray(img))
        self.window.geometry('{}x{}'.format(
            self.photo.width(),
            self.photo.height() + self.result.winfo_height()))
        self.canvas.itemconfig(self.image_id, image=self.photo)

        # update text area
        data.pop(imgkey)
        self.result.config(text=json.dumps(data, indent=4))
예제 #14
0
    def update(self, pl):
        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        if self.pipeline_compatible:
            b64img_key = 'image_blob'
        else:
            b64img_key = 'bytes'
        jpg_bytes = payload.destringify_jpg(payload_json[b64img_key])
        payload_json.pop(b64img_key)
        logger.debug('inference text result: {}'.format(payload_json))

        match_target_label = self.find_target_label(self.target_label,
                                                    payload_json)

        logger.debug('Find target label {0}: {1}'.format(
            self.target_label, match_target_label))

        if match_target_label:
            timestamp = datetime.now().isoformat()
            notification_image = pjoin('/tmp', timestamp + '.jpg')
            notification_text = pjoin('/tmp', timestamp + '.json')
            with open(notification_image, 'wb') as f:
                f.write(jpg_bytes)
            with open(notification_text, 'w') as f:
                f.write(json.dumps(payload_json, indent=4))

            try:
                send_email_text(self.email['sender_address'],
                                self.email['sender_password'],
                                self.email['receiver_address'],
                                body=('Target label {} is found. '
                                      'Please check the attachments.'
                                      ''.format(self.target_label)),
                                subject='BerryNet mail client notification',
                                attachments=set(
                                    [notification_image, notification_text]))
            except Exception as e:
                logger.warn(e)

            os.remove(notification_image)
            os.remove(notification_text)
        else:
            # target label is not in generalized result, do nothing
            pass
예제 #15
0
    def update(self, pl):
        if not os.path.exists(self.data_dirpath):
            try:
                os.mkdir(self.data_dirpath)
            except Exception as e:
                logger.warn('Failed to create {}'.format(self.data_dirpath))
                raise (e)

        payload_json = payload.deserialize_payload(pl.decode('utf-8'))
        #payload_json = payload_json[0]
        if 'bytes' in payload_json.keys():
            img_k = 'bytes'
        else:
            raise Exception('No image data in MQTT payload')
        jpg_bytes = payload.destringify_jpg(payload_json[img_k])
        payload_json.pop(img_k)
        #logger.debug('inference text result: {}'.format(payload_json))

        img = payload.jpg2rgb(jpg_bytes)
        self.frame = img
예제 #16
0
    def inference(self, pl):
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])

        bgr_array = payload.jpg2bgr(jpg_bytes)

        image_data = self.engine.process_input(bgr_array)
        output = self.engine.inference(image_data)
        model_outputs = self.engine.process_output(output)

        classes = self.engine.classes
        labels = self.engine.labels

        if self.draw is False:
            self.result_hook(self.generalize_result(jpg_json, model_outputs))
        else:
            self.result_hook(
                draw_bb(bgr_array,
                        self.generalize_result(jpg_json, model_outputs),
                        generate_class_color(class_num=classes), labels))
예제 #17
0
파일: bnpipeline.py 프로젝트: DT42/BerryNet
    def dl_inference(self, pl):
        def empty_inference_result(count):
            return [{'channel': i, 'annotations': []} for i in range(count)]

        t = datetime.now()
        base_name = None
        logger.debug('counter #{}'.format(self.counter))
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        # Unify the type of input payload to a list, so that
        # bnpipeline can process the input in the same way.
        #
        # If the payload is
        #     - a list of items: keep the list
        #     - a single item: convert to a list with an item
        mqtt_payload = payload.deserialize_payload(pl.decode('utf-8'))
        if isinstance(mqtt_payload, list):
            jpg_json = mqtt_payload
        else:
            jpg_json = [mqtt_payload]
            logger.info('Convert input type from {0} to {1}'.format(
                type(mqtt_payload), type(jpg_json)))

        jpg_bytes_list = [
            payload.destringify_jpg(img['bytes']) for img in jpg_json
        ]
        metas = [img.get('meta', {}) for img in jpg_json]
        logger.debug('destringify_jpg: {} ms'.format(duration(t)))

        t = datetime.now()
        bgr_arrays = [
            payload.jpg2bgr(jpg_bytes) for jpg_bytes in jpg_bytes_list
        ]
        logger.debug('jpg2bgr: {} ms'.format(duration(t)))

        t = datetime.now()
        # FIXME: Galaxy pipeline may or may not use a list as input, so we
        # check the length here and then choose whether to send a list or not.
        # We may drop it when Galaxy Pipline unite their input.
        if len(bgr_arrays) > 1:
            image_data = self.engine.process_input(bgr_arrays)
        else:
            image_data = self.engine.process_input(bgr_arrays[0])
        # FIXME: Galaxy pipeline doesn't support multiple metadata for multiple
        # images at the moment (which will be needed), so we provide the first
        # metadata here. This commit should be revert when Galaxy pipeline
        # support it: https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/120
        meta_data = metas[0]

        try:
            logger.debug(meta_data)
            output = self.engine.inference(image_data,
                                           meta=meta_data,
                                           base_name=base_name)
            model_outputs = self.engine.process_output(output)
        except IndexError as e:
            # FIXME: workaround for pipeline
            # Pipeline throw IndexError when there's no results, see:
            # https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/86
            # So we catch the exception, and produce a dummy result
            # to hook. This workaround should be removed after the issue
            # has been fixed.
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'pipeline raised IndexError'))

        if model_outputs is None:
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'severe error happened in pipeline'))

        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Classification takes {} ms'.format(duration(t)))

        # self.engine.cache_data('model_output', model_outputs)
        # self.engine.cache_data('model_output_filepath', output_name)
        # self.engine.save_cache()

        self.send_result(self.generalize_result(jpg_json, model_outputs))

        self.counter += 1
예제 #18
0
    def dl_inference(self, pl):
        def empty_inference_result(count):
            return [{'channel': i, 'annotations': []} for i in range(count)]

        t = datetime.now()
        base_name = None
        logger.debug('counter #{}'.format(self.counter))
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes_list = [
            payload.destringify_jpg(img['bytes']) for img in jpg_json
        ]
        metas = [img.get('meta', {}) for img in jpg_json]
        logger.debug('destringify_jpg: {} ms'.format(duration(t)))

        t = datetime.now()
        bgr_arrays = [
            payload.jpg2bgr(jpg_bytes) for jpg_bytes in jpg_bytes_list
        ]
        logger.debug('jpg2bgr: {} ms'.format(duration(t)))

        t = datetime.now()
        image_data = self.engine.process_input(bgr_arrays)
        # FIXME: Galaxy pipeline doesn't support multiple metadata for multiple
        # images at the moment (which will be needed), so we provide the first
        # metadata here. This commit should be revert when Galaxy pipeline
        # support it: https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/120
        meta_data = metas[0]
        try:
            logger.debug(meta_data)
            output = self.engine.inference(image_data,
                                           meta=meta_data,
                                           base_name=base_name)
            model_outputs = self.engine.process_output(output)
        except IndexError as e:
            # FIXME: workaround for pipeline
            # Pipeline throw IndexError when there's no results, see:
            # https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/86
            # So we catch the exeception, and produce a dummy result
            # to hook. This workaround should be removed after the issue
            # has been fixed.
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'pipeline raised IndexError'))

        if model_outputs is None:
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'severe error happened in pipeline'))

        if isinstance(model_outputs, dict):
            model_outputs = [model_outputs]
        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Classification takes {} ms'.format(duration(t)))

        # self.engine.cache_data('model_output', model_outputs)
        # self.engine.cache_data('model_output_filepath', output_name)
        # self.engine.save_cache()

        self.send_result(self.generalize_result(jpg_json, model_outputs),
                         self.output_mqtt_topic)

        self.counter += 1