def test_run_inference_auto(self, resnet_8_batch_model_downloader,
                                input_data_downloader_v1_224,
                                start_server_batch_model_auto,
                                create_channel_for_batching_server_auto):

        print("Downloaded model files:", resnet_8_batch_model_downloader)

        # Connect to grpc service
        stub = create_channel_for_batching_server_auto

        batch_input = input_data_downloader_v1_224[:6, :, :, :]
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        output = infer_batch(batch_input=batch_input,
                             input_tensor='input',
                             grpc_stub=stub,
                             model_spec_name='resnet',
                             model_spec_version=None,
                             output_tensors=[out_name])
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (6, 1000), ERROR_SHAPE

        batch_input = input_data_downloader_v1_224[:1, :, :, :]
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        output = infer_batch(batch_input=batch_input,
                             input_tensor='input',
                             grpc_stub=stub,
                             model_spec_name='resnet',
                             model_spec_version=None,
                             output_tensors=[out_name])
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (1, 1000), ERROR_SHAPE
    def test_run_inference(self, resnet_8_batch_model_downloader,
                           input_data_downloader_v1_224,
                           start_server_batch_model,
                           create_channel_for_batching_server):
        """
        <b>Description</b>
        Submit request to gRPC interface serving a single resnet model

        <b>input data</b>
        - directory with the model in IR format
        - docker image with ie-serving-py service
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape

        """

        print("Downloaded model files:", resnet_8_batch_model_downloader)

        # Starting docker with ie-serving
        result = start_server_batch_model
        print("docker starting status:", result)
        time.sleep(40)  # Waiting for inference service to load models
        assert result == 0, "docker container was not started successfully"

        # Connect to grpc service
        stub = create_channel_for_batching_server

        batch_input = input_data_downloader_v1_224[:8, :, :, :]
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        output = infer_batch(batch_input=batch_input,
                             input_tensor='input',
                             grpc_stub=stub,
                             model_spec_name='resnet',
                             model_spec_version=None,
                             output_tensors=[out_name])
        print("output shape", output[out_name].shape)
        assert output[out_name].shape == (8, 1000), ERROR_SHAPE
Esempio n. 3
0
    def test_run_inference(self, download_two_models,
                           input_data_downloader_v1_224,
                           input_data_downloader_v3_331,
                           start_server_multi_model,
                           create_channel_for_port_multi_server):
        """
        <b>Description</b>
        Execute inference request using gRPC interface hosting multiple models

        <b>input data</b>
        - directory with 2 models in IR format
        - docker image
        - input data in numpy format

        <b>fixtures used</b>
        - model downloader
        - input data downloader
        - service launching

        <b>Expected results</b>
        - response contains proper numpy shape for both models set in config
        file: model resnet_v1_50, pnasnet_large
        - both served models handles appropriate input formats

        """

        print("Downloaded model files:", download_two_models)

        # Starting docker with ie-serving

        result = start_server_multi_model
        print("docker starting multi model server status:", result)
        time.sleep(30)  # Waiting for inference service to load models
        assert result == 0, "docker container was not started successfully"

        # Connect to grpc service
        stub = create_channel_for_port_multi_server

        input_data = input_data_downloader_v1_224[:2, :, :, :]
        print("Starting inference using resnet model")
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        for x in range(0, 10):
            output = infer_batch(input_data, input_tensor='input',
                                 grpc_stub=stub,
                                 model_spec_name='resnet_V1_50',
                                 model_spec_version=None,
                                 output_tensors=[out_name])
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (2, 1000), ERROR_SHAPE

        imgs_v1_224 = np.array(input_data_downloader_v1_224)
        out_name = 'resnet_v1_50/predictions/Reshape_1'
        for x in range(0, 10):
            output = infer(imgs_v1_224, slice_number=x,
                           input_tensor='input', grpc_stub=stub,
                           model_spec_name='resnet_gs',
                           model_spec_version=None,
                           output_tensors=[out_name])
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE

        out_name = 'resnet_v1_50/predictions/Reshape_1'
        for x in range(0, 10):
            output = infer(imgs_v1_224, slice_number=x,
                           input_tensor='input', grpc_stub=stub,
                           model_spec_name='resnet_s3',
                           model_spec_version=None,
                           output_tensors=[out_name])
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (1, 1000), ERROR_SHAPE

        input_data = input_data_downloader_v3_331[:4, :, :, :]
        print("Starting inference using pnasnet_large model")
        out_name = 'final_layer/predictions'
        for x in range(0, 10):
            output = infer_batch(input_data, input_tensor='input',
                                 grpc_stub=stub,
                                 model_spec_name='pnasnet_large',
                                 model_spec_version=None,
                                 output_tensors=[out_name])
            print("output shape", output[out_name].shape)
            assert output[out_name].shape == (4, 1001), ERROR_SHAPE