Ejemplo n.º 1
0
    def setUp(self):
        super(LocalDockerRunnerTest, self).setUp()

        base_dir = os.path.join(
            os.path.dirname(  # components/
                os.path.dirname(  # infra_validator/
                    os.path.dirname(__file__))),  # model_server_runners/
            'testdata')
        self._model = standard_artifacts.Model()
        self._model.uri = os.path.join(base_dir, 'trainer', 'current')
        self._model_name = 'chicago-taxi'
        self._model_path = path_utils.serving_model_path(self._model.uri)

        # Mock docker.DockerClient
        patcher = mock.patch('docker.DockerClient')
        self._docker_client = patcher.start().return_value
        self.addCleanup(patcher.stop)

        self._serving_spec = _create_serving_spec({
            'tensorflow_serving': {
                'tags': ['1.15.0']
            },
            'local_docker': {},
            'model_name':
            self._model_name,
        })
        self._serving_binary = serving_bins.parse_serving_binaries(
            self._serving_spec)[0]
        patcher = mock.patch.object(self._serving_binary, 'MakeClient')
        self._model_server_client = patcher.start().return_value
        self.addCleanup(patcher.stop)
Ejemplo n.º 2
0
  def setUp(self):
    super(ExecutorTest, self).setUp()

    # Setup Mocks

    patcher = mock.patch.object(request_builder, 'build_requests')
    self.build_requests_mock = patcher.start()
    self.addCleanup(patcher.stop)

    # Setup directories

    source_data_dir = os.path.join(
        os.path.dirname(os.path.dirname(__file__)), 'testdata')
    base_output_dir = os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR',
                                     self.get_temp_dir())
    output_data_dir = os.path.join(base_output_dir, self._testMethodName)

    # Setup input_dict.

    self._model = standard_artifacts.Model()
    self._model.uri = os.path.join(source_data_dir, 'trainer', 'current')
    self._model_path = path_utils.serving_model_path(self._model.uri)
    examples = standard_artifacts.Examples()
    examples.uri = os.path.join(source_data_dir, 'transform',
                                'transformed_examples', 'eval')
    examples.split_names = artifact_utils.encode_split_names(['eval'])

    self._input_dict = {
        'model': [self._model],
        'examples': [examples],
    }
    self._blessing = standard_artifacts.InfraBlessing()
    self._blessing.uri = os.path.join(output_data_dir, 'blessing')
    self._output_dict = {'blessing': [self._blessing]}
    temp_dir = os.path.join(output_data_dir, '.temp')
    self._context = executor.Executor.Context(tmp_dir=temp_dir, unique_id='1')
    self._serving_spec = _make_serving_spec({
        'tensorflow_serving': {
            'tags': ['1.15.0']
        },
        'local_docker': {},
        'model_name': 'chicago-taxi',
    })
    self._serving_binary = serving_bins.parse_serving_binaries(
        self._serving_spec)[0]
    self._validation_spec = _make_validation_spec({
        'max_loading_time_seconds': 10,
        'num_tries': 3
    })
    self._request_spec = _make_request_spec({
        'tensorflow_serving': {
            'rpc_kind': 'CLASSIFY'
        },
        'max_examples': 1
    })
    self._exec_properties = {
        'serving_spec': json_format.MessageToJson(self._serving_spec),
        'validation_spec': json_format.MessageToJson(self._validation_spec),
        'request_spec': json_format.MessageToJson(self._request_spec),
    }
Ejemplo n.º 3
0
  def testParseServingBinaries_TensorFlowServing_DefaultImageName(self):
    spec = infra_validator_pb2.ServingSpec(
        tensorflow_serving=infra_validator_pb2.TensorFlowServing(
            tags=['latest']))
    result = serving_bins.parse_serving_binaries(spec)

    self.assertLen(result, 1)
    self.assertIsInstance(result[0], serving_bins.TensorFlowServing)
    self.assertEqual(result[0].image, 'tensorflow/serving:latest')
Ejemplo n.º 4
0
  def _CreateKubernetesRunner(self, k8s_config_dict=None):
    self._serving_spec = infra_validator_pb2.ServingSpec()
    json_format.ParseDict({
        'tensorflow_serving': {
            'tags': ['1.15.0']},
        'kubernetes': k8s_config_dict or {},
        'model_name': self._model_name,
    }, self._serving_spec)
    serving_binary = serving_bins.parse_serving_binaries(self._serving_spec)[0]

    return kubernetes_runner.KubernetesRunner(
        model_path=path_utils.serving_model_path(self._model.uri),
        serving_binary=serving_binary,
        serving_spec=self._serving_spec)
Ejemplo n.º 5
0
  def testParseServingBinaries_TensorFlowServing(self):
    spec = infra_validator_pb2.ServingSpec(
        tensorflow_serving=infra_validator_pb2.TensorFlowServing(
            image_name='gcr.io/my_project/my_serving_image',
            tags=['t1', 't2'],
            digests=['sha256:d1', 'sha256:d2']))
    result = serving_bins.parse_serving_binaries(spec)

    self.assertLen(result, 4)
    for item in result:
      self.assertIsInstance(item, serving_bins.TensorFlowServing)
    self.assertCountEqual([item.image for item in result], [
        'gcr.io/my_project/my_serving_image:t1',
        'gcr.io/my_project/my_serving_image:t2',
        'gcr.io/my_project/my_serving_image@sha256:d1',
        'gcr.io/my_project/my_serving_image@sha256:d2',
    ])
Ejemplo n.º 6
0
    def _Do(
        self,
        model: types.Artifact,
        examples: Optional[types.Artifact],
        blessing: types.Artifact,
        serving_spec: infra_validator_pb2.ServingSpec,
        validation_spec: infra_validator_pb2.ValidationSpec,
        request_spec: Optional[infra_validator_pb2.RequestSpec],
    ):

        if examples and request_spec:
            logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
            requests = request_builder.build_requests(
                model_name=serving_spec.model_name,
                model=model,
                examples=examples,
                request_spec=request_spec)
        else:
            logging.info('InfraValidator will be run in LOAD_ONLY mode.')
            requests = []

        model_path = self._PrepareModelPath(model, serving_spec)
        # TODO(jjong): Make logic parallel.
        all_passed = True
        for serving_binary in serving_bins.parse_serving_binaries(
                serving_spec):
            all_passed &= self._ValidateWithRetry(
                model_path=model_path,
                serving_binary=serving_binary,
                serving_spec=serving_spec,
                validation_spec=validation_spec,
                requests=requests)

        if all_passed:
            _mark_blessed(blessing)
            if requests and request_spec.make_warmup:
                self._CreateWarmupModel(blessing,
                                        model_path,
                                        warmup_requests=requests)
        else:
            _mark_not_blessed(blessing)
Ejemplo n.º 7
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Contract for running InfraValidator Executor.

    Args:
      input_dict:
        - `model`: Single `Model` artifact that we're validating.
        - `examples`: `Examples` artifacts to be used for test requests.
      output_dict:
        - `blessing`: Single `InfraBlessing` artifact containing the validated
          result. It is an empty file with the name either of INFRA_BLESSED or
          INFRA_NOT_BLESSED.
      exec_properties:
        - `serving_spec`: Serialized `ServingSpec` configuration.
        - `validation_spec`: Serialized `ValidationSpec` configuration.
        - `request_spec`: Serialized `RequestSpec` configuration.
    """
        self._log_startup(input_dict, output_dict, exec_properties)

        model = artifact_utils.get_single_instance(input_dict['model'])
        blessing = artifact_utils.get_single_instance(output_dict['blessing'])

        serving_spec = infra_validator_pb2.ServingSpec()
        json_format.Parse(exec_properties['serving_spec'], serving_spec)
        if not serving_spec.model_name:
            serving_spec.model_name = _DEFAULT_MODEL_NAME

        validation_spec = infra_validator_pb2.ValidationSpec()
        if 'validation_spec' in exec_properties:
            json_format.Parse(exec_properties['validation_spec'],
                              validation_spec)
        if not validation_spec.num_tries:
            validation_spec.num_tries = _DEFAULT_NUM_TRIES
        if not validation_spec.max_loading_time_seconds:
            validation_spec.max_loading_time_seconds = _DEFAULT_MAX_LOADING_TIME_SEC

        if _is_query_mode(input_dict, exec_properties):
            logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
            request_spec = infra_validator_pb2.RequestSpec()
            json_format.Parse(exec_properties['request_spec'], request_spec)
            examples = artifact_utils.get_single_instance(
                input_dict['examples'])
            requests = request_builder.build_requests(
                model_name=os.path.basename(
                    os.path.dirname(path_utils.serving_model_path(model.uri))),
                examples=examples,
                request_spec=request_spec)
        else:
            logging.info('InfraValidator will be run in LOAD_ONLY mode.')
            requests = []

        model_path = self._PrepareModelPath(model.uri, serving_spec)
        try:
            # TODO(jjong): Make logic parallel.
            all_passed = True
            for serving_binary in serving_bins.parse_serving_binaries(
                    serving_spec):
                all_passed &= self._ValidateWithRetry(
                    model_path=model_path,
                    serving_binary=serving_binary,
                    serving_spec=serving_spec,
                    validation_spec=validation_spec,
                    requests=requests)
        finally:
            io_utils.delete_dir(self._get_tmp_dir())

        if all_passed:
            _mark_blessed(blessing)
        else:
            _mark_not_blessed(blessing)