def test_run_inference(self, download_two_model_versions,
                           input_data_downloader_v1_224,
                           start_server_multi_model,
                           create_channel_for_port_multi_server):
        """
        <b>Description</b>
        Execute inference request using gRPC interface with version specified
        and without version set on the client.
        When version is not set server should use the latest version model 2
        When version 1 is selected the model from folder 1 should be used
        and model 2 should be ignored

        <b>input data</b>
        - directory with the model in IR format
        - docker image with ie-serving-py service
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - latest model version serves resnet_v2_50 model - [1,1001]
        output resnet_v2_50/predictions/Reshape_1
        - first model version serves resnet_v1_50 model - [1,1000]
        output resnet_v1_50/predictions/Reshape_1
        """

        print("Downloaded model files:", download_two_model_versions)

        # Connect to grpc service
        stub = create_channel_for_port_multi_server

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        out_name_v1 = 'resnet_v1_50/predictions/Reshape_1'
        out_name_v2 = 'resnet_v2_50/predictions/Reshape_1'
        print("Starting inference using latest version - no version set")
        for x in range(0, 10):
            output = infer(imgs_v1_224, slice_number=x, input_tensor='input',
                           grpc_stub=stub, model_spec_name='resnet',
                           model_spec_version=None,
                           output_tensors=[out_name_v2])
            print("output shape", output[out_name_v2].shape)
            assert output[out_name_v2].shape == (1, 1001),\
                'resnet model with version 1 has invalid output'

        # both model versions use the same input data shape
        for x in range(0, 10):
            output = infer(imgs_v1_224, slice_number=x, input_tensor='input',
                           grpc_stub=stub, model_spec_name='resnet',
                           model_spec_version=1,
                           output_tensors=[out_name_v1])
            print("output shape", output[out_name_v1].shape)
            assert output[out_name_v1].shape == (1, 1000),\
                'resnet model with latest version has invalid output'
Beispiel #2
0
    def test_run_inference(self, input_data_downloader_v1_224,
                           start_server_single_model_from_gc,
                           create_channel_for_port_single_server):
        """
        <b>Description</b>
        Submit request to gRPC interface serving a single resnet model

        <b>input data</b>
        - directory with the model in IR format
        - docker image with ie-serving-py service
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape

        """

        # Connect to grpc service
        stub = create_channel_for_port_single_server

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        for x in range(0, 10):
            output = infer(imgs_v1_224, slice_number=x,
                           input_tensor='input', grpc_stub=stub,
                           model_spec_name='resnet',
                           model_spec_version=None,
                           output_tensors=[out_name])
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (1, 1000), ERROR_SHAPE
    def test_run_inference(self, resnet_2_out_model_downloader,
                           input_data_downloader_v1_224,
                           create_channel_for_port_mapping_server,
                           start_server_with_mapping):
        """
        <b>Description</b>
        Submit request to gRPC interface serving a single resnet model

        <b>input data</b>
        - directory with the model in IR format
        - docker image with ie-serving-py service
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape

        """

        print("Downloaded model files:", resnet_2_out_model_downloader)

        # Starting docker with ie-serving
        result = start_server_with_mapping
        print("docker starting status:", result)
        time.sleep(20)  # Waiting for inference service to load models
        assert result == 0, "docker container was not started successfully"

        # Connect to grpc service
        stub = create_channel_for_port_mapping_server

        imgs_v1_224 = np.array(input_data_downloader_v1_224)

        for x in range(0, 10):
            output = infer(imgs_v1_224,
                           slice_number=x,
                           input_tensor='new_key',
                           grpc_stub=stub,
                           model_spec_name='resnet_2_out',
                           model_spec_version=None,
                           output_tensors=['mask', 'output'])
        print("output shape", output['mask'].shape)
        print("output shape", output['output'].shape)
        assert output['mask'].shape == (1, 2048, 7, 7), ERROR_SHAPE
        assert output['output'].shape == (1, 2048, 7, 7), ERROR_SHAPE
Beispiel #4
0
    def test_run_inference(self, download_two_models,
                           input_data_downloader_v1_224,
                           input_data_downloader_v3_331,
                           start_server_multi_model,
                           create_channel_for_port_multi_server):
        """
        <b>Description</b>
        Execute inference request using gRPC interface hosting multiple models

        <b>input data</b>
        - directory with 2 models in IR format
        - docker image
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape for both models set in config
        file: model resnet_v1_50, pnasnet_large
        - both served models handles appropriate input formats

        """

        print("Downloaded model files:", download_two_models)

        # Starting docker with ie-serving

        result = start_server_multi_model
        print("docker starting multi model server status:", result)
        time.sleep(30)  # Waiting for inference service to load models
        assert result == 0, "docker container was not started successfully"

        # Connect to grpc service
        stub = create_channel_for_port_multi_server

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        print("Starting inference using resnet model")
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        for x in range(0, 10):
            output = infer(imgs_v1_224,
                           slice_number=x,
                           input_tensor='input',
                           grpc_stub=stub,
                           model_spec_name='resnet_V1_50',
                           model_spec_version=None,
                           output_tensors=[out_name])
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE

        out_name = 'resnet_v1_50/predictions/Reshape_1'
        for x in range(0, 10):
            output = infer(imgs_v1_224,
                           slice_number=x,
                           input_tensor='input',
                           grpc_stub=stub,
                           model_spec_name='resnet_gs',
                           model_spec_version=None,
                           output_tensors=[out_name])
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE

        out_name = 'resnet_v1_50/predictions/Reshape_1'
        for x in range(0, 10):
            output = infer(imgs_v1_224,
                           slice_number=x,
                           input_tensor='input',
                           grpc_stub=stub,
                           model_spec_name='resnet_s3',
                           model_spec_version=None,
                           output_tensors=[out_name])
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE

        imgs_v3_331 = np.array(input_data_downloader_v3_331)
        print("Starting inference using pnasnet_large model")
        out_name = 'final_layer/predictions'
        for x in range(0, 10):
            output = infer(imgs_v3_331,
                           slice_number=x,
                           input_tensor='input',
                           grpc_stub=stub,
                           model_spec_name='pnasnet_large',
                           model_spec_version=None,
                           output_tensors=[out_name])
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1001), ERROR_SHAPE