Пример #1
0
    def inference(self, pl):
        t0 = time.time()
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])
        logger.debug('destringify_jpg: {} ms'.format(time.time() - t0))

        t1 = time.time()
        bgr_array = payload.jpg2bgr(jpg_bytes)
        logger.debug('jpg2bgr: {} ms'.format(time.time() - t1))

        t2 = time.time()
        image_data = self.engine.process_input(bgr_array)
        output = self.engine.inference(image_data)
        model_outputs = self.engine.process_output(output)
        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Detection takes {} ms'.format(time.time() - t2))

        classes = self.engine.classes
        labels = self.engine.labels

        logger.debug('draw = {}'.format(self.draw))
        if self.draw is False:
            self.result_hook(self.generalize_result(jpg_json, model_outputs))
        else:
            self.result_hook(
                draw_bb(bgr_array,
                        self.generalize_result(jpg_json, model_outputs),
                        generate_class_color(class_num=classes), labels))
Пример #2
0
    def inference(self, pl):
        duration = lambda t: (datetime.now() - t).microseconds / 1000

        t = datetime.now()
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])
        logger.debug('destringify_jpg: {} ms'.format(duration(t)))

        t = datetime.now()
        bgr_array = payload.jpg2bgr(jpg_bytes)
        logger.debug('jpg2bgr: {} ms'.format(duration(t)))

        t = datetime.now()
        image_data = self.engine.process_input(bgr_array)
        output = self.engine.inference(image_data)
        model_outputs = self.engine.process_output(output)
        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Detection takes {} ms'.format(duration(t)))

        #classes = self.engine.classes
        #labels = self.engine.labels

        logger.debug('draw = {}'.format(self.draw))
        if self.draw is False:
            self.result_hook(self.generalize_result(jpg_json, model_outputs))
        else:
            #self.result_hook(
            #    draw_bb(bgr_array,
            #            self.generalize_result(jpg_json, model_outputs),
            #            generate_class_color(class_num=classes),
            #            labels))
            self.result_hook(self.generalize_result(jpg_json, model_outputs))
Пример #3
0
    def inference(self, pl):
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])

        bgr_array = payload.jpg2bgr(jpg_bytes)

        image_data = self.engine.process_input(bgr_array)
        output = self.engine.inference(image_data)
        model_outputs = self.engine.process_output(output)

        self.result_hook(self.generalize_result(jpg_json, model_outputs))
Пример #4
0
    def inference(self, pl):
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes = payload.destringify_jpg(jpg_json['bytes'])

        bgr_array = payload.jpg2bgr(jpg_bytes)

        image_data = self.engine.process_input(bgr_array)
        output = self.engine.inference(image_data)
        model_outputs = self.engine.process_output(output)

        classes = self.engine.classes
        labels = self.engine.labels

        if self.draw is False:
            self.result_hook(self.generalize_result(jpg_json, model_outputs))
        else:
            self.result_hook(
                draw_bb(bgr_array,
                        self.generalize_result(jpg_json, model_outputs),
                        generate_class_color(class_num=classes), labels))
Пример #5
0
    def dl_inference(self, pl):
        def empty_inference_result(count):
            return [{'channel': i, 'annotations': []} for i in range(count)]

        t = datetime.now()
        base_name = None
        logger.debug('counter #{}'.format(self.counter))
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        # Unify the type of input payload to a list, so that
        # bnpipeline can process the input in the same way.
        #
        # If the payload is
        #     - a list of items: keep the list
        #     - a single item: convert to a list with an item
        mqtt_payload = payload.deserialize_payload(pl.decode('utf-8'))
        if isinstance(mqtt_payload, list):
            jpg_json = mqtt_payload
        else:
            jpg_json = [mqtt_payload]
            logger.info('Convert input type from {0} to {1}'.format(
                type(mqtt_payload), type(jpg_json)))

        jpg_bytes_list = [
            payload.destringify_jpg(img['bytes']) for img in jpg_json
        ]
        metas = [img.get('meta', {}) for img in jpg_json]
        logger.debug('destringify_jpg: {} ms'.format(duration(t)))

        t = datetime.now()
        bgr_arrays = [
            payload.jpg2bgr(jpg_bytes) for jpg_bytes in jpg_bytes_list
        ]
        logger.debug('jpg2bgr: {} ms'.format(duration(t)))

        t = datetime.now()
        # FIXME: Galaxy pipeline may or may not use a list as input, so we
        # check the length here and then choose whether to send a list or not.
        # We may drop it when Galaxy Pipline unite their input.
        if len(bgr_arrays) > 1:
            image_data = self.engine.process_input(bgr_arrays)
        else:
            image_data = self.engine.process_input(bgr_arrays[0])
        # FIXME: Galaxy pipeline doesn't support multiple metadata for multiple
        # images at the moment (which will be needed), so we provide the first
        # metadata here. This commit should be revert when Galaxy pipeline
        # support it: https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/120
        meta_data = metas[0]

        try:
            logger.debug(meta_data)
            output = self.engine.inference(image_data,
                                           meta=meta_data,
                                           base_name=base_name)
            model_outputs = self.engine.process_output(output)
        except IndexError as e:
            # FIXME: workaround for pipeline
            # Pipeline throw IndexError when there's no results, see:
            # https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/86
            # So we catch the exception, and produce a dummy result
            # to hook. This workaround should be removed after the issue
            # has been fixed.
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'pipeline raised IndexError'))

        if model_outputs is None:
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'severe error happened in pipeline'))

        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Classification takes {} ms'.format(duration(t)))

        # self.engine.cache_data('model_output', model_outputs)
        # self.engine.cache_data('model_output_filepath', output_name)
        # self.engine.save_cache()

        self.send_result(self.generalize_result(jpg_json, model_outputs))

        self.counter += 1
Пример #6
0
    def dl_inference(self, pl):
        def empty_inference_result(count):
            return [{'channel': i, 'annotations': []} for i in range(count)]

        t = datetime.now()
        base_name = None
        logger.debug('counter #{}'.format(self.counter))
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes_list = [
            payload.destringify_jpg(img['bytes']) for img in jpg_json
        ]
        metas = [img.get('meta', {}) for img in jpg_json]
        logger.debug('destringify_jpg: {} ms'.format(duration(t)))

        t = datetime.now()
        bgr_arrays = [
            payload.jpg2bgr(jpg_bytes) for jpg_bytes in jpg_bytes_list
        ]
        logger.debug('jpg2bgr: {} ms'.format(duration(t)))

        t = datetime.now()
        image_data = self.engine.process_input(bgr_arrays)
        # FIXME: Galaxy pipeline doesn't support multiple metadata for multiple
        # images at the moment (which will be needed), so we provide the first
        # metadata here. This commit should be revert when Galaxy pipeline
        # support it: https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/120
        meta_data = metas[0]
        try:
            logger.debug(meta_data)
            output = self.engine.inference(image_data,
                                           meta=meta_data,
                                           base_name=base_name)
            model_outputs = self.engine.process_output(output)
        except IndexError as e:
            # FIXME: workaround for pipeline
            # Pipeline throw IndexError when there's no results, see:
            # https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/86
            # So we catch the exeception, and produce a dummy result
            # to hook. This workaround should be removed after the issue
            # has been fixed.
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'pipeline raised IndexError'))

        if model_outputs is None:
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'severe error happened in pipeline'))

        if isinstance(model_outputs, dict):
            model_outputs = [model_outputs]
        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Classification takes {} ms'.format(duration(t)))

        # self.engine.cache_data('model_output', model_outputs)
        # self.engine.cache_data('model_output_filepath', output_name)
        # self.engine.save_cache()

        self.send_result(self.generalize_result(jpg_json, model_outputs),
                         self.output_mqtt_topic)

        self.counter += 1