コード例 #1
0
    def test_run_inference_rest(self, resnet_multiple_batch_sizes,
                                start_server_multi_model):

        _, ports = start_server_multi_model
        print("Downloaded model files:", resnet_multiple_batch_sizes)

        img = np.ones((1, 3, 224, 224))
        print("Starting inference using resnet model")
        in_name = 'map/TensorArrayStack/TensorArrayGatherV3'
        out_name = 'softmax_tensor'

        model_name = 'resnet'
        rest_url = 'http://localhost:{}/v1/models/{}:predict'.format(
            ports["rest_port"], model_name)
        output = infer_rest(img, input_tensor=in_name, rest_url=rest_url,
                            output_tensors=[out_name],
                            request_format='column_name')
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (1, 1001), ERROR_SHAPE

        imgs = np.ones((4, 3, 224, 224))
        model_name = 'resnet_bs4'
        rest_url = 'http://localhost:{}/v1/models/{}:predict'.format(
            ports["rest_port"], model_name)
        output = infer_rest(imgs, input_tensor=in_name, rest_url=rest_url,
                            output_tensors=[out_name],
                            request_format='row_noname')
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (4, 1001), ERROR_SHAPE

        imgs = np.ones((8, 3, 224, 224))
        model_name = 'resnet_bs8'
        rest_url = 'http://localhost:{}/v1/models/{}:predict'.format(
            ports["rest_port"], model_name)
        output = infer_rest(imgs, input_tensor=in_name, rest_url=rest_url,
                            output_tensors=[out_name],
                            request_format='row_noname')
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (8, 1001), ERROR_SHAPE

        model_name = 'resnet_s3'
        rest_url = 'http://localhost:{}/v1/models/{}:predict'.format(
            ports["rest_port"], model_name)
        output = infer_rest(img, input_tensor=in_name, rest_url=rest_url,
                            output_tensors=[out_name],
                            request_format='row_name')
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (1, 1001), ERROR_SHAPE

        in_name = 'input'
        out_name = 'resnet_v1_50/predictions/Reshape_1'

        model_name = 'resnet_gs'
        rest_url = 'http://localhost:{}/v1/models/{}:predict'.format(
            ports["rest_port"], model_name)
        output = infer_rest(img, input_tensor=in_name, rest_url=rest_url,
                            output_tensors=[out_name],
                            request_format='column_noname')
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (1, 1000), ERROR_SHAPE
コード例 #2
0
    def test_run_inference_rest(self, download_two_model_versions,
                                start_server_multi_model):

        _, ports = start_server_multi_model
        print("Downloaded model files:", download_two_model_versions)

        face_img = np.ones((1, 3, 300, 300))
        pvb_img = np.ones((1, 3, 1024, 1024))
        out_name = "detection_out"

        in_name = "data"
        rest_url = 'http://localhost:{}/v1/models/{}' \
                   '/versions/1:predict'.format(ports["rest_port"],
                                                self.model_name)
        output = infer_rest(face_img,
                            input_tensor=in_name,
                            rest_url=rest_url,
                            output_tensors=[out_name],
                            request_format='column_name')
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (1, 1, 200, 7), \
            '{} with version 1 has invalid output'.format(self.model_name)

        rest_url = 'http://localhost:{}/v1/models/{}:predict'.format(
            ports["rest_port"], self.model_name)
        output = infer_rest(pvb_img,
                            input_tensor=in_name,
                            rest_url=rest_url,
                            output_tensors=[out_name],
                            request_format='column_name')
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (1, 1, 200, 7), \
            '{} with version latest has invalid output'.format(self.model_name)
コード例 #3
0
    def test_run_inference_rest(self, download_two_model_versions,
                                input_data_downloader_v1_224,
                                start_server_multi_model):
        """
        <b>Description</b>
        Execute inference request using REST API interface with version
        specified and without version set on the client.
        When version is not set server should use the latest version model 2
        When version 1 is selected the model from folder 1 should be used
        and model 2 should be ignored

        <b>input data</b>
        - directory with the model in IR format
        - docker image with ie-serving-py service
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - latest model version serves resnet_v2_50 model - [1,1001]
        output resnet_v2_50/predictions/Reshape_1
        - first model version serves resnet_v1_50 model - [1,1000]
        output resnet_v1_50/predictions/Reshape_1
        """

        print("Downloaded model files:", download_two_model_versions)

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        out_name_v1 = 'resnet_v1_50/predictions/Reshape_1'
        out_name_v2 = 'resnet_v2_50/predictions/Reshape_1'
        print("Starting inference using latest version - no version set")
        rest_url = 'http://localhost:5561/v1/models/resnet:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='input',
                                rest_url=rest_url,
                                output_tensors=[out_name_v2],
                                request_format='column_name')
            print("output shape", output[out_name_v2].shape)
            assert output[out_name_v2].shape == (1, 1001), \
                'resnet model with version 1 has invalid output'

        # both model versions use the same input data shape
        rest_url = 'http://localhost:5561/v1/models/resnet/versions/1:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='input',
                                rest_url=rest_url,
                                output_tensors=[out_name_v1],
                                request_format='column_name')
            print("output shape", output[out_name_v1].shape)
            assert output[out_name_v1].shape == (1, 1000), \
                'resnet model with latest version has invalid output'
コード例 #4
0
    def test_run_inference_rest(self, age_gender_model_downloader,
                                start_server_with_mapping, request_format):
        """
            <b>Description</b>
            Submit request to REST API interface serving a single resnet model

            <b>input data</b>
            - directory with the model in IR format
            - docker image with ie-serving-py service

            <b>fixtures used</b>
            - model downloader
            - service launching

            <b>Expected results</b>
            - response contains proper numpy shape

        """

        print("Downloaded model files:", age_gender_model_downloader)

        imgs_v1_224 = np.ones((1, 3, 62, 62))
        rest_url = 'http://localhost:5556/v1/models/age_gender:predict'
        output = infer_rest(imgs_v1_224,
                            input_tensor='new_key',
                            rest_url=rest_url,
                            output_tensors=['age', 'gender'],
                            request_format=request_format)
        print("output shape", output['age'].shape)
        print("output shape", output['gender'].shape)
        print(output)
        assert output['age'].shape == (1, 1, 1, 1), ERROR_SHAPE
        assert output['gender'].shape == (1, 2, 1, 1), ERROR_SHAPE
コード例 #5
0
    def test_run_inference_rest(self, resnet_multiple_batch_sizes,
                                start_server_single_model, request_format):
        """
        <b>Description</b>
        Submit request to REST API interface serving a single resnet model

        <b>input data</b>
        - directory with the model in IR format
        - docker image with ie-serving-py service

        <b>fixtures used</b>
        - model downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape

        """

        print("Downloaded model files:", resnet_multiple_batch_sizes)

        imgs_v1_224 = np.ones((1, 3, 224, 224))
        in_name = 'map/TensorArrayStack/TensorArrayGatherV3'
        out_name = 'softmax_tensor'
        rest_url = 'http://localhost:5555/v1/models/resnet:predict'
        output = infer_rest(imgs_v1_224,
                            input_tensor=in_name,
                            rest_url=rest_url,
                            output_tensors=[out_name],
                            request_format=request_format)
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (1, 1001), ERROR_SHAPE
コード例 #6
0
    def test_run_inference_rest(self, resnet_v1_50_model_downloader,
                                input_data_downloader_v1_224,
                                start_server_single_model,
                                request_format):
        """
        <b>Description</b>
        Submit request to REST API interface serving a single resnet model

        <b>input data</b>
        - directory with the model in IR format
        - docker image with ie-serving-py service
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape

        """

        print("Downloaded model files:", resnet_v1_50_model_downloader)

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        rest_url = 'http://localhost:5555/v1/models/resnet:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224, slice_number=x,
                                input_tensor='input', rest_url=rest_url,
                                output_tensors=[out_name],
                                request_format=request_format)
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE
コード例 #7
0
    def test_run_inference_bs4_rest(self, age_gender_model_downloader,
                                    start_server_batch_model_auto_bs4_2out,
                                    request_format):
        """
            <b>Description</b>
            Submit request to REST API interface serving
            a single age-gender model with 2 outputs.
            Parameter batch_size explicitly set to 4.

            <b>input data</b>
            - directory with the model in IR format
            - docker image with ie-serving-py service

            <b>fixtures used</b>
            - model downloader
            - service launching

            <b>Expected results</b>
            - response contains proper numpy shape

        """

        print("Downloaded model files:", age_gender_model_downloader)

        batch_input = np.ones((4, 3, 62, 62))
        in_name = 'data'
        out_names = ['age_conv3', 'prob']
        rest_url = 'http://localhost:5562/v1/models/age_gender:predict'
        output = infer_rest(batch_input,
                            input_tensor=in_name,
                            rest_url=rest_url,
                            output_tensors=out_names,
                            request_format=request_format)
        assert output[out_names[0]].shape == (4, 1, 1, 1), ERROR_SHAPE
        assert output[out_names[1]].shape == (4, 2, 1, 1), ERROR_SHAPE
コード例 #8
0
 def run_inference_rest(self, imgs, out_name, out_shape, is_correct,
                        request_format, rest_url):
     if is_correct:
         output = infer_rest(imgs,
                             input_tensor='data',
                             rest_url=rest_url,
                             output_tensors=[out_name],
                             request_format=request_format)
         print("output shape", output[out_name].shape)
         assert output[out_name].shape == out_shape, \
             ERROR_SHAPE
     else:
         output = infer_rest(imgs,
                             input_tensor='data',
                             rest_url=rest_url,
                             output_tensors=[out_name],
                             request_format=request_format)
         assert not output
コード例 #9
0
    def test_run_inference_rest(self, download_two_models,
                                input_data_downloader_v1_224,
                                input_data_downloader_v3_331,
                                start_server_multi_model):
        """
        <b>Description</b>
        Execute inference request using REST API interface hosting multiple
        models

        <b>input data</b>
        - directory with 2 models in IR format
        - docker image
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape for both models set in config
        file: model resnet_v1_50, pnasnet_large
        - both served models handles appropriate input formats

        """

        print("Downloaded model files:", download_two_models)

        input_data = input_data_downloader_v1_224[:2, :, :, :]
        print("Starting inference using resnet model")
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        rest_url = 'http://localhost:5561/v1/models/resnet_V1_50:predict'
        for x in range(0, 10):
            output = infer_batch_rest(input_data,
                                      input_tensor='input',
                                      rest_url=rest_url,
                                      output_tensors=[out_name],
                                      request_format='column_name')
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (2, 1000), ERROR_SHAPE

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        rest_url = 'http://localhost:5561/v1/models/resnet_gs:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='input',
                                rest_url=rest_url,
                                output_tensors=[out_name],
                                request_format='column_noname')
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE

        out_name = 'resnet_v1_50/predictions/Reshape_1'
        rest_url = 'http://localhost:5561/v1/models/resnet_s3:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='input',
                                rest_url=rest_url,
                                output_tensors=[out_name],
                                request_format='row_name')
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE

        input_data = input_data_downloader_v3_331[:4, :, :, :]
        print("Starting inference using pnasnet_large model")
        out_name = 'final_layer/predictions'
        rest_url = 'http://localhost:5561/v1/models/pnasnet_large:predict'
        for x in range(0, 10):
            output = infer_batch_rest(input_data,
                                      input_tensor='input',
                                      rest_url=rest_url,
                                      output_tensors=[out_name],
                                      request_format='row_noname')
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (4, 1001), ERROR_SHAPE