Esempio n. 1
0
    def preprocess(self, request: Request) -> Request:
        import tensorflow as tf

        padded_query_id_list = tf.keras.preprocessing.sequence.pad_sequences(
            request.query, **self.padding_parameter
        )

        request.update_query(padded_query_id_list)

        return request
    def preprocess(self, request: Request) -> Request:
        vocabulary_lookup_table = self.lookup_table_registry["vocabulary"]

        query_id_list = []
        for query_item in request.query:
            query_item_id = [
                vocabulary_lookup_table.lookup(i) for i in query_item
            ]
            query_id_list.append(query_item_id)

        request.update_query(query_id_list)

        return request
Esempio n. 3
0
def test_serving(datadir):
    metadata = {
        "instance": {
            "LookupProcessor_0": {
                "class":
                "deliverable_model.builtin.processor.lookup_processor.LookupProcessor",
                "parameter": {
                    "lookup_table": ["vocabulary", "tag"],
                    "padding_parameter": {},
                },
            }
        },
        "pipeline": {
            "pre": ["LookupProcessor_0"],
            "post": ["LookupProcessor_0"]
        },
    }
    processor_obj = Processor.load(datadir, metadata)

    processor_obj.instance_processor()

    request = Request(["abc", "cba"])
    return_request = processor_obj.call_preprocessor(request)

    assert np.all(return_request.query == [[1, 2, 3], [3, 2, 1]])

    response = Response([[1, 2, 3], [3, 2, 1]])
    return_response = processor_obj.call_postprocessor(response)

    assert np.all(return_response.data == [["tag-a", "tag-b", "tag-c"],
                                           ["tag-c", "tag-b", "tag-a"]])
Esempio n. 4
0
    def _inference(self, model, input_data: list):
        '''
            调用模型对输入数据进行推理,即采用模型对输入数据做实体识别标注,
            并将推理结果保存至指定输出路径,推理结果供人工复查。
        :param model: 模型
        :param input_data: 语料数据
        :return:
        '''
        output = []
        batch_size = 1
        batches = MtModelInference_Deliverable.generate_batch_input(
            input_data, batch_size)
        for batch in batches:
            request = Request(batch)
            response = model.inference(request)
            tmp_result = response['data'][0].sequence
            tmp_result.label = response['cls'][0][0]
            output.append(tmp_result)

        predict_result = Corpus(output)
        predict_result.write_to_file(
            os.path.join(self.config['output_filepath'],
                         'inference_out.conllx'))

        print(
            '*** inference has been done, please check the result through the path below:'
        )
        print('==>{}'.format(self.config['output_filepath']))

        return
def test_serving(datadir):
    class FakedProcessor:
        def preprocess(self, request):
            # Do nothing
            return request

        def postprocess(self, response):
            # Do nothing
            return response

    class FakedModel:
        def inference(self, request):
            return Response([["tag-{}".format(i) for i in j]
                             for j in request.query])

    mock_load_processor = mock.patch.object(Processor,
                                            "_instance_single_processor",
                                            return_value=FakedProcessor())

    mock_load_model = mock.patch.object(Model,
                                        "load",
                                        return_value=FakedModel())

    with mock_load_processor, mock_load_model:
        deliverable_model = DeliverableModel.load(datadir)

        request = Request(["abc", "cba"])

        response = deliverable_model.inference(request)

        assert np.all(response.data == [["tag-a", "tag-b", "tag-c"],
                                        ["tag-c", "tag-b", "tag-a"]])
Esempio n. 6
0
def test_serving(datadir):
    deliverable_model = DeliverableModel.load(datadir)

    request = Request(["abc", "cba"])

    response = deliverable_model.parse(request)

    assert np.all(response.data == [["tag-a", "tag-b", "tag-c"],
                                    ["tag-c", "tag-b", "tag-a"]])
    def _parse(self, msg):
        if not isinstance(msg, list) or not isinstance(msg[0], list):
            msg = [[j for j in i] for i in msg]

        request_obj = Request(msg)

        response_obj = self.server.parse(request_obj)

        for predict_info in response_obj.data:
            yield predict_info
Esempio n. 8
0
 def inference_process(cls, model, input_data, input_ids):
     output = []
     for data, id in zip(input_data, input_ids):
         req = []
         req.append(data)
         request = Request(req)
         response = model.inference(request)
         tmp_result = response['data'][0].sequence
         if not isinstance(tmp_result, Document):
             tmp_result = Document(tmp_result.text, tmp_result.span_set)
         tmp_result.label = response['cls'][0][0]
         tmp_result.id = id
         output.append(tmp_result)
     corpus_inference = Corpus(output)
     return corpus_inference
    def inference(self, request: Request, batch_size=None) -> Response:
        if not batch_size:  # inference without mini batch
            return self._do_inference(request)

        # inference with batch
        batcher = BatchingIterator(batch_size)

        sub_response_list = []

        for sub_request_dict in batcher(request):
            sub_request = Request.from_dict(sub_request_dict)
            sub_response_list.append(self._do_inference(sub_request))

        response = merge_dict_list(*sub_response_list)

        return Response.from_dict(response)
Esempio n. 10
0
def test_serving(datadir, tmpdir):
    parameter = {
        "lookup_table": ["vocabulary", "tag"],
        "padding_parameter": {}
    }

    lookup_processor = LookupProcessor.load(parameter, datadir)

    request = Request(["abc", "cba"])
    return_request = lookup_processor.preprocess(request)

    assert np.all(return_request.query == [[1, 2, 3], [3, 2, 1]])

    response = Response([[1, 2, 3], [3, 2, 1]])
    return_response = lookup_processor.postprocess(response)

    assert np.all(return_response.data == [["tag-a", "tag-b", "tag-c"],
                                           ["tag-c", "tag-b", "tag-a"]])
Esempio n. 11
0
    def _inference(self, model, input_data: list):
        output = []
        batch_size = 1
        batches = MtModelInference_Deliverable.generate_batch_input(
            input_data, batch_size)
        for batch in tqdm.tqdm(batches):
            request = Request(batch)
            response = model.inference(request)
            tmp_result = response['data'][0].sequence
            tmp_result.label = response['cls'][0][0]
            output.append(tmp_result)

        predict_result = Corpus(output)
        predict_result.write_to_file(
            os.path.join(self.config['output_filepath'],
                         'inference_out.conllx'))

        print(
            '*** inference has been done, please check the result through the path below:'
        )
        print('==>{}'.format(self.config['output_filepath']))

        return
        self.model_name = model_name
        self.signature_name = signature_name

        self.channel = grpc.insecure_channel(target)
        self.stub = prediction_service_pb2_grpc.PredictionServiceStub(
            self.channel)

    @classmethod
    def load(cls, target, model_name, signature_name="serving_default"):
        self = cls(target, model_name, signature_name)

        return self

    @classmethod
    def from_config(cls, metadata, config):
        return cls.load(**config)

    def inference(self, request: PredictRequest) -> Any:
        request.model_spec.name = self.model_name
        request.model_spec.signature_name = self.signature_name

        feature = self.stub.Predict(request, 5.0)

        return feature


if __name__ == "__main__":
    tfse = TFServingModelEndpoint.load("127.0.0.1:5000", "ner")
    request = Request(["明天的天气", "播放歌曲"])
    tfse.inference(request)
def make_request(**kwargs) -> Request:
    return Request(**kwargs)