Пример #1
0
  def testBuildRequests_TFServing(self):
    builder_cls = self._PrepareTFServingRequestBuilder()
    builder = builder_cls.return_value

    request_builder.build_requests(
        model_name='foo',
        model=self._model,
        examples=self._examples,
        request_spec=_make_request_spec({
            'tensorflow_serving': {
                'signature_names': ['serving_default']
            },
            'split_name': 'eval',
            'num_examples': 1
        })
    )

    builder_cls.assert_called_with(
        model_name='foo',
        signatures={'serving_default': mock.ANY})
    builder.ReadExamplesArtifact.assert_called_with(
        self._examples,
        split_name='eval',
        num_examples=1)
    builder.BuildRequests.assert_called()
Пример #2
0
    def testEmptyServingBinary(self):
        # Prepare empty request spec and examples.
        request_spec = _create_request_spec({})
        examples = standard_artifacts.Examples()

        with self.assertRaisesRegexp(ValueError, 'Invalid RequestSpec'):
            request_builder.build_requests(model_name='foo',
                                           examples=examples,
                                           request_spec=request_spec)
Пример #3
0
    def testInvalidTensorFlowServingRpcKind(self):
        # Prepare arguments.
        request_spec = _create_request_spec({
            'tensorflow_serving': {
                'rpc_kind': 'TF_SERVING_RPC_KIND_UNSPECIFIED'
            }
        })
        examples = standard_artifacts.Examples()

        with self.assertRaisesRegexp(ValueError,
                                     'Invalid TensorFlowServingRpcKind'):
            request_builder.build_requests(model_name='foo',
                                           examples=examples,
                                           request_spec=request_spec)
Пример #4
0
    def testTensorFlowServingRegress(self):
        # Prepare arguments.
        request_spec = _create_request_spec(
            {'tensorflow_serving': {
                'rpc_kind': 'REGRESS'
            }})
        examples = standard_artifacts.Examples()

        # Call build_requests.
        request_builder.build_requests(model_name='foo',
                                       examples=examples,
                                       request_spec=request_spec)

        # Check RequestBuilder calls.
        self.builder_cls.assert_called_with(model_name='foo', max_examples=1)
        self.builder.BuildRegressionRequests.assert_called()
Пример #5
0
    def testTensorFlowServingClassify(self):
        # Prepare arguments.
        request_spec = _create_request_spec(
            {'tensorflow_serving': {
                'rpc_kind': 'CLASSIFY'
            }})
        examples = mock.Mock()

        # Call build_requests.
        request_builder.build_requests(model_name='foo',
                                       examples=examples,
                                       request_spec=request_spec)

        # Check RequestBuilder calls.
        self.builder_cls.assert_called_with(model_name='foo', max_examples=1)
        self.builder.BuildClassificationRequests.assert_called()
Пример #6
0
    def testBuildRequests_NumberOfRequests(self):
        result = request_builder.build_requests(
            model_name='foo',
            model=self._model,
            examples=self._examples,
            request_spec=_make_request_spec({
                'tensorflow_serving': {
                    'signature_names': ['classification', 'regression']
                },
                'split_name': 'eval',
                'num_examples': 3
            }))

        # Total 6 requests (3 requests for each signature)
        self.assertEqual(len(result), 6)
        self.assertEqual(
            len([
                r for r in result
                if r.model_spec.signature_name == 'classification'
            ]), 3)
        self.assertEqual(
            len([
                r for r in result
                if r.model_spec.signature_name == 'regression'
            ]), 3)
Пример #7
0
    def testSignatureName(self):
        # Prepare arguments.
        request_spec = _create_request_spec({
            'tensorflow_serving': {
                'rpc_kind': 'CLASSIFY',
                'signature_name': 'my_signature_name'
            }
        })
        examples = standard_artifacts.Examples()

        # Call build_requests.
        request_builder.build_requests(model_name='foo',
                                       examples=examples,
                                       request_spec=request_spec)

        # Check RequestBuilder calls.
        self.builder.SetSignatureName.assert_called_with('my_signature_name')
Пример #8
0
    def testMaxExamples(self):
        # Prepare arguments.
        request_spec = _create_request_spec({
            'tensorflow_serving': {
                'rpc_kind': 'CLASSIFY'
            },
            'max_examples': 123
        })
        examples = standard_artifacts.Examples()

        # Call build_requests.
        request_builder.build_requests(model_name='foo',
                                       examples=examples,
                                       request_spec=request_spec)

        # Check RequestBuilder calls.
        self.builder_cls.assert_called_with(model_name='foo', max_examples=123)
Пример #9
0
    def testSplitNames(self):
        # Prepare arguments.
        request_spec = _create_request_spec({
            'tensorflow_serving': {
                'rpc_kind': 'CLASSIFY'
            },
            'split_name': 'train'
        })
        examples = standard_artifacts.Examples()

        # Call build_requests.
        request_builder.build_requests(model_name='foo',
                                       examples=examples,
                                       request_spec=request_spec)

        # Check RequestBuilder calls.
        self.builder.ReadFromExamplesArtifact.assert_called_with(
            examples, split_name='train')
Пример #10
0
    def testBuildRequests_DefaultArgument(self):
        builder_cls = self._PrepareTFServingRequestBuilder()
        builder = builder_cls.return_value

        request_builder.build_requests(
            model_name='foo',
            model=self._model,
            examples=self._examples,
            request_spec=_make_request_spec({
                'tensorflow_serving': {
                    # 'signature_names': ['serving_default']
                },
                # 'split_name': 'eval',
                # 'num_examples': 1
            }))

        builder.ReadExamplesArtifact.assert_called_with(
            self._examples,
            split_name=None,  # Without split_name (will choose any split).
            num_examples=1)  # Default num_examples = 1.
Пример #11
0
    def _Do(
        self,
        model: types.Artifact,
        examples: Optional[types.Artifact],
        blessing: types.Artifact,
        serving_spec: infra_validator_pb2.ServingSpec,
        validation_spec: infra_validator_pb2.ValidationSpec,
        request_spec: Optional[infra_validator_pb2.RequestSpec],
    ):

        if examples and request_spec:
            logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
            requests = request_builder.build_requests(
                model_name=serving_spec.model_name,
                model=model,
                examples=examples,
                request_spec=request_spec)
        else:
            logging.info('InfraValidator will be run in LOAD_ONLY mode.')
            requests = []

        model_path = self._PrepareModelPath(model, serving_spec)
        # TODO(jjong): Make logic parallel.
        all_passed = True
        for serving_binary in serving_bins.parse_serving_binaries(
                serving_spec):
            all_passed &= self._ValidateWithRetry(
                model_path=model_path,
                serving_binary=serving_binary,
                serving_spec=serving_spec,
                validation_spec=validation_spec,
                requests=requests)

        if all_passed:
            _mark_blessed(blessing)
            if requests and request_spec.make_warmup:
                self._CreateWarmupModel(blessing,
                                        model_path,
                                        warmup_requests=requests)
        else:
            _mark_not_blessed(blessing)
Пример #12
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Contract for running InfraValidator Executor.

    Args:
      input_dict:
        - `model`: Single `Model` artifact that we're validating.
        - `examples`: `Examples` artifacts to be used for test requests.
      output_dict:
        - `blessing`: Single `InfraBlessing` artifact containing the validated
          result. It is an empty file with the name either of INFRA_BLESSED or
          INFRA_NOT_BLESSED.
      exec_properties:
        - `serving_spec`: Serialized `ServingSpec` configuration.
        - `validation_spec`: Serialized `ValidationSpec` configuration.
        - `request_spec`: Serialized `RequestSpec` configuration.
    """
        self._log_startup(input_dict, output_dict, exec_properties)

        model = artifact_utils.get_single_instance(input_dict['model'])
        blessing = artifact_utils.get_single_instance(output_dict['blessing'])

        serving_spec = infra_validator_pb2.ServingSpec()
        json_format.Parse(exec_properties['serving_spec'], serving_spec)
        if not serving_spec.model_name:
            serving_spec.model_name = _DEFAULT_MODEL_NAME

        validation_spec = infra_validator_pb2.ValidationSpec()
        if 'validation_spec' in exec_properties:
            json_format.Parse(exec_properties['validation_spec'],
                              validation_spec)
        if not validation_spec.num_tries:
            validation_spec.num_tries = _DEFAULT_NUM_TRIES
        if not validation_spec.max_loading_time_seconds:
            validation_spec.max_loading_time_seconds = _DEFAULT_MAX_LOADING_TIME_SEC

        if _is_query_mode(input_dict, exec_properties):
            logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
            request_spec = infra_validator_pb2.RequestSpec()
            json_format.Parse(exec_properties['request_spec'], request_spec)
            examples = artifact_utils.get_single_instance(
                input_dict['examples'])
            requests = request_builder.build_requests(
                model_name=os.path.basename(
                    os.path.dirname(path_utils.serving_model_path(model.uri))),
                examples=examples,
                request_spec=request_spec)
        else:
            logging.info('InfraValidator will be run in LOAD_ONLY mode.')
            requests = []

        model_path = self._PrepareModelPath(model.uri, serving_spec)
        try:
            # TODO(jjong): Make logic parallel.
            all_passed = True
            for serving_binary in serving_bins.parse_serving_binaries(
                    serving_spec):
                all_passed &= self._ValidateWithRetry(
                    model_path=model_path,
                    serving_binary=serving_binary,
                    serving_spec=serving_spec,
                    validation_spec=validation_spec,
                    requests=requests)
        finally:
            io_utils.delete_dir(self._get_tmp_dir())

        if all_passed:
            _mark_blessed(blessing)
        else:
            _mark_not_blessed(blessing)
Пример #13
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Contract for running InfraValidator Executor.

    Args:
      input_dict:
        - `model`: Single `Model` artifact that we're validating.
        - `examples`: `Examples` artifacts to be used for test requests.
      output_dict:
        - `blessing`: Single `InfraBlessing` artifact containing the validated
          result. It is an empty file with the name either of INFRA_BLESSED or
          INFRA_NOT_BLESSED.
      exec_properties:
        - `serving_spec`: Serialized `ServingSpec` configuration.
        - `validation_spec`: Serialized `ValidationSpec` configuration.
        - `request_spec`: Serialized `RequestSpec` configuration.
    """
        model = artifact_utils.get_single_instance(input_dict['model'])
        blessing = artifact_utils.get_single_instance(output_dict['blessing'])

        serving_spec = infra_validator_pb2.ServingSpec()
        json_format.Parse(exec_properties['serving_spec'], serving_spec)
        validation_spec = infra_validator_pb2.ValidationSpec()
        json_format.Parse(exec_properties['validation_spec'], validation_spec)

        if _is_query_mode(input_dict, exec_properties):
            logging.info('InfraValidator will be run in LOAD_AND_QUERY mode.')
            request_spec = infra_validator_pb2.RequestSpec()
            json_format.Parse(exec_properties['request_spec'], request_spec)
            examples = artifact_utils.get_single_instance(
                input_dict['examples'])
            requests = request_builder.build_requests(
                model_name=os.path.basename(
                    os.path.dirname(path_utils.serving_model_path(model.uri))),
                examples=examples,
                request_spec=request_spec)
        else:
            logging.info('InfraValidator will be run in LOAD_ONLY mode.')
            requests = []

        runners = factory.create_model_server_runners(
            model=cast(standard_artifacts.Model, model),
            serving_spec=serving_spec)

        # TODO(jjong): Make logic parallel.
        for runner in runners:
            with _defer_stop(runner):
                logging.info('Starting %s.', repr(runner))
                runner.Start()

                # Check model is successfully loaded.
                if not runner.WaitUntilModelAvailable(
                        timeout_secs=validation_spec.max_loading_time_seconds):
                    logging.error(
                        'Failed to load model in %s; marking as not blessed.',
                        repr(runner))
                    self._MarkNotBlessed(blessing)
                    continue

                # Check model can be successfully queried.
                if requests:
                    try:
                        runner.client.IssueRequests(requests)
                    except (grpc.RpcError, ValueError) as e:
                        logging.error(e)
                        logging.error(
                            'Failed to query model in %s; marking as not blessed.',
                            repr(runner))
                        self._MarkNotBlessed(blessing)
                        continue

        self._MarkBlessedIfSucceeded(blessing)