Esempio n. 1
0
    def test_run_inference_rest(self, download_two_model_versions,
                                input_data_downloader_v1_224,
                                start_server_multi_model):
        """
        <b>Description</b>
        Execute inference request using REST API interface with version
        specified and without version set on the client.
        When version is not set server should use the latest version model 2
        When version 1 is selected the model from folder 1 should be used
        and model 2 should be ignored

        <b>input data</b>
        - directory with the model in IR format
        - docker image with ie-serving-py service
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - latest model version serves resnet_v2_50 model - [1,1001]
        output resnet_v2_50/predictions/Reshape_1
        - first model version serves resnet_v1_50 model - [1,1000]
        output resnet_v1_50/predictions/Reshape_1
        """

        print("Downloaded model files:", download_two_model_versions)

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        out_name_v1 = 'resnet_v1_50/predictions/Reshape_1'
        out_name_v2 = 'resnet_v2_50/predictions/Reshape_1'
        print("Starting inference using latest version - no version set")
        rest_url = 'http://localhost:5561/v1/models/resnet:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='input',
                                rest_url=rest_url,
                                output_tensors=[out_name_v2],
                                request_format='column_name')
            print("output shape", output[out_name_v2].shape)
            assert output[out_name_v2].shape == (1, 1001), \
                'resnet model with version 1 has invalid output'

        # both model versions use the same input data shape
        rest_url = 'http://localhost:5561/v1/models/resnet/versions/1:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='input',
                                rest_url=rest_url,
                                output_tensors=[out_name_v1],
                                request_format='column_name')
            print("output shape", output[out_name_v1].shape)
            assert output[out_name_v1].shape == (1, 1000), \
                'resnet model with latest version has invalid output'
Esempio n. 2
0
    def test_run_inference_rest(self, resnet_2_out_model_downloader,
                                input_data_downloader_v1_224,
                                create_channel_for_port_mapping_server,
                                start_server_with_mapping, request_format):
        """
            <b>Description</b>
            Submit request to gRPC interface serving a single resnet model

            <b>input data</b>
            - directory with the model in IR format
            - docker image with ie-serving-py service
            - input data in numpy format

            <b>fixtures used</b>
            - model downloader
            - input data downloader
            - service launching

            <b>Expected results</b>
            - response contains proper numpy shape

        """

        print("Downloaded model files:", resnet_2_out_model_downloader)

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        rest_url = 'http://localhost:5556/v1/models/resnet_2_out:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='new_key',
                                rest_url=rest_url,
                                output_tensors=['mask', 'output'],
                                request_format=request_format)
            print("output shape", output['mask'].shape)
            print("output shape", output['output'].shape)
            assert output['mask'].shape == (1, 2048, 7, 7), ERROR_SHAPE
            assert output['output'].shape == (1, 2048, 7, 7), ERROR_SHAPE
    def test_run_inference_rest(self, resnet_v1_50_model_downloader,
                                input_data_downloader_v1_224,
                                start_server_single_model, request_format):
        """
        <b>Description</b>
        Submit request to REST API interface serving a single resnet model

        <b>input data</b>
        - directory with the model in IR format
        - docker image with ie-serving-py service
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape

        """

        print("Downloaded model files:", resnet_v1_50_model_downloader)

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        rest_url = 'http://localhost:5555/v1/models/resnet:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='input',
                                rest_url=rest_url,
                                output_tensors=[out_name],
                                request_format=request_format)
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE
    def test_run_inference_rest(self, download_two_models,
                                input_data_downloader_v1_224,
                                input_data_downloader_v3_331,
                                start_server_multi_model):
        """
        <b>Description</b>
        Execute inference request using REST API interface hosting multiple
        models

        <b>input data</b>
        - directory with 2 models in IR format
        - docker image
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape for both models set in config
        file: model resnet_v1_50, pnasnet_large
        - both served models handles appropriate input formats

        """

        print("Downloaded model files:", download_two_models)

        input_data = input_data_downloader_v1_224[:2, :, :, :]
        print("Starting inference using resnet model")
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        rest_url = 'http://localhost:5561/v1/models/resnet_V1_50:predict'
        for x in range(0, 10):
            output = infer_batch_rest(input_data,
                                      input_tensor='input',
                                      rest_url=rest_url,
                                      output_tensors=[out_name],
                                      request_format='column_name')
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (2, 1000), ERROR_SHAPE

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        rest_url = 'http://localhost:5561/v1/models/resnet_gs:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='input',
                                rest_url=rest_url,
                                output_tensors=[out_name],
                                request_format='column_noname')
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE

        out_name = 'resnet_v1_50/predictions/Reshape_1'
        rest_url = 'http://localhost:5561/v1/models/resnet_s3:predict'
        for x in range(0, 10):
            output = infer_rest(imgs_v1_224,
                                slice_number=x,
                                input_tensor='input',
                                rest_url=rest_url,
                                output_tensors=[out_name],
                                request_format='row_name')
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE

        input_data = input_data_downloader_v3_331[:4, :, :, :]
        print("Starting inference using pnasnet_large model")
        out_name = 'final_layer/predictions'
        rest_url = 'http://localhost:5561/v1/models/pnasnet_large:predict'
        for x in range(0, 10):
            output = infer_batch_rest(input_data,
                                      input_tensor='input',
                                      rest_url=rest_url,
                                      output_tensors=[out_name],
                                      request_format='row_noname')
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (4, 1001), ERROR_SHAPE