Пример #1
0
    def generalize_result(self, eng_inputs, eng_outputs):
        # Pipeline returns None if any error happened
        if eng_outputs is None:
            eng_outputs = [{}]

        if len(eng_inputs) != len(eng_outputs):
            logger.warning('Input length != output length: {} != {}'.format(
                len(eng_inputs), len(eng_outputs)))
            # We guarantee len of inputs will always be 1 (at least now), so
            # it's safer to access eng_inputs by index than to eng_outputs
            c_id = int(eng_inputs[0]['meta']['channel_id'])
            eng_outputs = [eng_outputs[c_id]]

        # FIXME: Workaround for spec incompatibility
        # DLBox spec use 'image_blob', but BerryNet use 'bytes', so we have to
        # do a convert here
        for eng_in, eng_out in list(zip(eng_inputs, eng_outputs)):
            if isinstance(eng_out, np.ndarray):
                r, result_img = cv2.imencode('.jpg', eng_out)
                eng_in['bytes'] = payload.stringify_jpg(result_img)
            else:
                try:
                    eng_in.update(eng_out)
                except KeyError as e:
                    logger.exception('{} ({}): {}'.format(
                        e.__class__, e.__doc__, e))
            eng_in['image_blob'] = eng_in.pop('bytes')
        return eng_inputs
Пример #2
0
    def process_input(self, tensor):
        """Resize tensor (if needed) and change layout from HWC to CHW.

        Args:
            tensor: Input BGR tensor (OpenCV convention)

        Returns:
            Resized and transposed tensor
        """
        if tensor.shape[:-1] != (self.h, self.w):
            logger.warning("Input tensor is resized from {} to {}".format(
                tensor.shape[:-1], (self.h, self.w)))
            tensor = cv2.resize(tensor, (self.w, self.h))
        tensor = tensor.transpose((2, 0, 1))  # Change data layout from HWC to CHW
        return tensor
Пример #3
0
    def get_dl_component_config(self, pipeline_config):
        """Get pipeline def list containing only DL components

        Args:
            pipeline_config: pipeline config JSON object

        Returns:
            List of DL components definitions
        """
        dl_comp_config = []
        try:
            pipeline_def = pipeline_config['pipeline_def']
        except KeyError:
            logger.warning('Invalid pipeline config')
            pipeline_def = []
        for comp_config in pipeline_def:
            if ('classifier' in comp_config['name']
                    or 'detector' in comp_config['name']):
                dl_comp_config.append(comp_config)
        return dl_comp_config
Пример #4
0
    def handleConfig(self, pl):
        payload_json = ""
        try:
            id = pl.decode('utf-8')
            if (id in self.idlist):
                configFilename = self.idlist[id]
                f = open(configFilename)
                payload_json = payload.deserialize_payload(f.read())
                self.sendConfig(payload.serialize_payload(payload_json))
            else:
                logger.warning("ID %s is not in idlist" % (id))
                return
        except Exception as e:
            logger.info(e)

        # output config file
        with open(self.comm_config['configfile'], 'w') as configfile:
            configfile.write(payload.serialize_payload(payload_json))
            configfile.close()

        # restart service
        subprocess.run(["supervisorctl", "restart", "bnpipeline-bndyda"])
Пример #5
0
    def dl_inference(self, pl):
        def empty_inference_result(count):
            return [{'channel': i, 'annotations': []} for i in range(count)]

        t = datetime.now()
        base_name = None
        logger.debug('counter #{}'.format(self.counter))
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        # Unify the type of input payload to a list, so that
        # bnpipeline can process the input in the same way.
        #
        # If the payload is
        #     - a list of items: keep the list
        #     - a single item: convert to a list with an item
        mqtt_payload = payload.deserialize_payload(pl.decode('utf-8'))
        if isinstance(mqtt_payload, list):
            jpg_json = mqtt_payload
        else:
            jpg_json = [mqtt_payload]
            logger.info('Convert input type from {0} to {1}'.format(
                type(mqtt_payload), type(jpg_json)))

        jpg_bytes_list = [
            payload.destringify_jpg(img['bytes']) for img in jpg_json
        ]
        metas = [img.get('meta', {}) for img in jpg_json]
        logger.debug('destringify_jpg: {} ms'.format(duration(t)))

        t = datetime.now()
        bgr_arrays = [
            payload.jpg2bgr(jpg_bytes) for jpg_bytes in jpg_bytes_list
        ]
        logger.debug('jpg2bgr: {} ms'.format(duration(t)))

        t = datetime.now()
        # FIXME: Galaxy pipeline may or may not use a list as input, so we
        # check the length here and then choose whether to send a list or not.
        # We may drop it when Galaxy Pipline unite their input.
        if len(bgr_arrays) > 1:
            image_data = self.engine.process_input(bgr_arrays)
        else:
            image_data = self.engine.process_input(bgr_arrays[0])
        # FIXME: Galaxy pipeline doesn't support multiple metadata for multiple
        # images at the moment (which will be needed), so we provide the first
        # metadata here. This commit should be revert when Galaxy pipeline
        # support it: https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/120
        meta_data = metas[0]

        try:
            logger.debug(meta_data)
            output = self.engine.inference(image_data,
                                           meta=meta_data,
                                           base_name=base_name)
            model_outputs = self.engine.process_output(output)
        except IndexError as e:
            # FIXME: workaround for pipeline
            # Pipeline throw IndexError when there's no results, see:
            # https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/86
            # So we catch the exception, and produce a dummy result
            # to hook. This workaround should be removed after the issue
            # has been fixed.
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'pipeline raised IndexError'))

        if model_outputs is None:
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'severe error happened in pipeline'))

        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Classification takes {} ms'.format(duration(t)))

        # self.engine.cache_data('model_output', model_outputs)
        # self.engine.cache_data('model_output_filepath', output_name)
        # self.engine.save_cache()

        self.send_result(self.generalize_result(jpg_json, model_outputs))

        self.counter += 1
Пример #6
0
    def dl_inference(self, pl):
        def empty_inference_result(count):
            return [{'channel': i, 'annotations': []} for i in range(count)]

        t = datetime.now()
        base_name = None
        logger.debug('counter #{}'.format(self.counter))
        logger.debug('payload size: {}'.format(len(pl)))
        logger.debug('payload type: {}'.format(type(pl)))
        jpg_json = payload.deserialize_payload(pl.decode('utf-8'))
        jpg_bytes_list = [
            payload.destringify_jpg(img['bytes']) for img in jpg_json
        ]
        metas = [img.get('meta', {}) for img in jpg_json]
        logger.debug('destringify_jpg: {} ms'.format(duration(t)))

        t = datetime.now()
        bgr_arrays = [
            payload.jpg2bgr(jpg_bytes) for jpg_bytes in jpg_bytes_list
        ]
        logger.debug('jpg2bgr: {} ms'.format(duration(t)))

        t = datetime.now()
        image_data = self.engine.process_input(bgr_arrays)
        # FIXME: Galaxy pipeline doesn't support multiple metadata for multiple
        # images at the moment (which will be needed), so we provide the first
        # metadata here. This commit should be revert when Galaxy pipeline
        # support it: https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/120
        meta_data = metas[0]
        try:
            logger.debug(meta_data)
            output = self.engine.inference(image_data,
                                           meta=meta_data,
                                           base_name=base_name)
            model_outputs = self.engine.process_output(output)
        except IndexError as e:
            # FIXME: workaround for pipeline
            # Pipeline throw IndexError when there's no results, see:
            # https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/86
            # So we catch the exeception, and produce a dummy result
            # to hook. This workaround should be removed after the issue
            # has been fixed.
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'pipeline raised IndexError'))

        if model_outputs is None:
            model_outputs = empty_inference_result(len(jpg_json))
            logger.warning(('inference results are empty because '
                            'severe error happened in pipeline'))

        if isinstance(model_outputs, dict):
            model_outputs = [model_outputs]
        logger.debug('Result: {}'.format(model_outputs))
        logger.debug('Classification takes {} ms'.format(duration(t)))

        # self.engine.cache_data('model_output', model_outputs)
        # self.engine.cache_data('model_output_filepath', output_name)
        # self.engine.save_cache()

        self.send_result(self.generalize_result(jpg_json, model_outputs),
                         self.output_mqtt_topic)

        self.counter += 1