示例#1
0
    def CheckBlessing(self, input_dict: Dict[Text, List[types.Artifact]],
                      output_dict: Dict[Text, List[types.Artifact]]) -> bool:
        """Check that model is blessed by upstream ModelValidator, or update output.

    Args:
      input_dict: Input dict from input key to a list of artifacts:
        - model_blessing: model blessing path from model_validator. Pusher looks
          for a file named 'BLESSED' to consider the model blessed and safe to
          push.
      output_dict: Output dict from key to a list of artifacts, including:
        - model_push: A list of 'ModelPushPath' artifact of size one.

    Returns:
      True if the model is blessed by validator.
    """
        model_blessing = artifact_utils.get_single_instance(
            input_dict['model_blessing'])
        model_push = artifact_utils.get_single_instance(
            output_dict['model_push'])
        # TODO(jyzhao): should this be in driver or executor.
        if not model_utils.is_model_blessed(model_blessing):
            model_push.set_int_custom_property('pushed', 0)
            absl.logging.info('Model on %s was not blessed',
                              model_blessing.uri)
            return False
        return True
示例#2
0
  def CheckBlessing(self, input_dict: Dict[Text, List[types.Artifact]]) -> bool:
    """Check that model is blessed by upstream validators.

    Args:
      input_dict: Input dict from input key to a list of artifacts:
        - model_blessing: A `ModelBlessing` artifact from model validator or
          evaluator.
          Pusher looks for a custom property `blessed` in the artifact to check
          it is safe to push.
        - infra_blessing: An `InfraBlessing` artifact from infra validator.
          Pusher looks for a custom proeprty `blessed` in the artifact to
          determine whether the model is mechanically servable from the model
          server to which Pusher is going to push.

    Returns:
      True if the model is blessed by validator.
    """
    model_blessing = artifact_utils.get_single_instance(
        input_dict[MODEL_BLESSING_KEY])
    # TODO(jyzhao): should this be in driver or executor.
    if not model_utils.is_model_blessed(model_blessing):
      logging.info('Model on %s was not blessed by model validation',
                   model_blessing.uri)
      return False
    if INFRA_BLESSING_KEY in input_dict:
      infra_blessing = artifact_utils.get_single_instance(
          input_dict[INFRA_BLESSING_KEY])
      if not model_utils.is_infra_validated(infra_blessing):
        logging.info('Model on %s was not blessed by infra validator',
                     model_blessing.uri)
        return False
    return True
示例#3
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Runs batch inference on a given model with given input examples.

    Args:
      input_dict: Input dict from input key to a list of Artifacts.
        - examples: examples for inference.
        - model: exported model.
        - model_blessing: model blessing result, optional.
      output_dict: Output dict from output key to a list of Artifacts.
        - output: bulk inference results.
      exec_properties: A dict of execution properties.
        - model_spec: JSON string of bulk_inferrer_pb2.ModelSpec instance.
        - data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.

    Returns:
      None
    """
        self._log_startup(input_dict, output_dict, exec_properties)

        if 'examples' not in input_dict:
            raise ValueError('\'examples\' is missing in input dict.')
        if 'inference_result' not in output_dict:
            raise ValueError('\'inference_result\' is missing in output dict.')
        output = artifact_utils.get_single_instance(
            output_dict['inference_result'])
        if 'model' not in input_dict:
            raise ValueError('Input models are not valid, model '
                             'need to be specified.')
        if 'model_blessing' in input_dict:
            model_blessing = artifact_utils.get_single_instance(
                input_dict['model_blessing'])
            if not model_utils.is_model_blessed(model_blessing):
                output.set_int_custom_property('inferred', 0)
                logging.info('Model on %s was not blessed', model_blessing.uri)
                return
        else:
            logging.info(
                'Model blessing is not provided, exported model will be '
                'used.')

        model = artifact_utils.get_single_instance(input_dict['model'])
        model_path = path_utils.serving_model_path(model.uri)
        logging.info('Use exported model from %s.', model_path)

        data_spec = bulk_inferrer_pb2.DataSpec()
        json_format.Parse(exec_properties['data_spec'], data_spec)
        if self._run_model_inference(
                data_spec, input_dict['examples'], output.uri,
                self._get_inference_spec(model_path, exec_properties)):
            output.set_int_custom_property('inferred', 1)
        else:
            output.set_int_custom_property('inferred', 0)
示例#4
0
    def CheckBlessing(self, input_dict: Dict[Text,
                                             List[types.Artifact]]) -> bool:
        """Check that model is blessed by upstream validators.

    Args:
      input_dict: Input dict from input key to a list of artifacts:
        - model_blessing: A `ModelBlessing` artifact from model validator or
          evaluator.
          Pusher looks for a custom property `blessed` in the artifact to check
          it is safe to push.
        - infra_blessing: An `InfraBlessing` artifact from infra validator.
          Pusher looks for a custom proeprty `blessed` in the artifact to
          determine whether the model is mechanically servable from the model
          server to which Pusher is going to push.

    Returns:
      True if the model is blessed by validator.
    """
        # TODO(jyzhao): should this be in driver or executor.
        maybe_model_blessing = input_dict.get(
            standard_component_specs.MODEL_BLESSING_KEY)
        if maybe_model_blessing:
            model_blessing = artifact_utils.get_single_instance(
                maybe_model_blessing)
            if not model_utils.is_model_blessed(model_blessing):
                logging.info('Model on %s was not blessed by model validation',
                             model_blessing.uri)
                return False
        maybe_infra_blessing = input_dict.get(
            standard_component_specs.INFRA_BLESSING_KEY)
        if maybe_infra_blessing:
            infra_blessing = artifact_utils.get_single_instance(
                maybe_infra_blessing)
            if not model_utils.is_infra_validated(infra_blessing):
                logging.info('Model on %s was not blessed by infra validator',
                             infra_blessing.uri)
                return False
        if not maybe_model_blessing and not maybe_infra_blessing:
            logging.warning(
                'Pusher is going to push the model without validation. '
                'Consider using Evaluator or InfraValidator in your '
                'pipeline.')
        return True
示例#5
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Runs batch inference on a given model with given input examples.

    Args:
      input_dict: Input dict from input key to a list of Artifacts.
        - examples: examples for inference.
        - model: exported model.
        - model_blessing: model blessing result
      output_dict: Output dict from output key to a list of Artifacts.
        - output: bulk inference results.
      exec_properties: A dict of execution properties.
        - model_spec: JSON string of bulk_inferrer_pb2.ModelSpec instance.
        - data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.

    Returns:
      None
    """
        self._log_startup(input_dict, output_dict, exec_properties)

        if 'examples' not in input_dict:
            raise ValueError('\'examples\' is missing in input dict.')
        if 'inference_result' not in output_dict:
            raise ValueError('\'inference_result\' is missing in output dict.')
        output = artifact_utils.get_single_instance(
            output_dict['inference_result'])
        if 'model' not in input_dict:
            raise ValueError('Input models are not valid, model '
                             'need to be specified.')
        if 'model_blessing' in input_dict:
            model_blessing = artifact_utils.get_single_instance(
                input_dict['model_blessing'])
            if not model_utils.is_model_blessed(model_blessing):
                output.set_int_custom_property('inferred', 0)
                logging.info('Model on %s was not blessed', model_blessing.uri)
                return
        else:
            logging.info(
                'Model blessing is not provided, exported model will be '
                'used.')

        model = artifact_utils.get_single_instance(input_dict['model'])
        model_path = path_utils.serving_model_path(model.uri)
        logging.info('Use exported model from %s.', model_path)

        data_spec = bulk_inferrer_pb2.DataSpec()
        json_format.Parse(exec_properties['data_spec'], data_spec)
        example_uris = {}
        if data_spec.example_splits:
            for example in input_dict['examples']:
                for split in artifact_utils.decode_split_names(
                        example.split_names):
                    if split in data_spec.example_splits:
                        example_uris[split] = os.path.join(example.uri, split)
        else:
            for example in input_dict['examples']:
                for split in artifact_utils.decode_split_names(
                        example.split_names):
                    example_uris[split] = os.path.join(example.uri, split)
        model_spec = bulk_inferrer_pb2.ModelSpec()
        json_format.Parse(exec_properties['model_spec'], model_spec)
        output_path = os.path.join(output.uri, _PREDICTION_LOGS_DIR_NAME)
        self._run_model_inference(model_path, example_uris, output_path,
                                  model_spec)
        logging.info('BulkInferrer generates prediction log to %s',
                     output_path)
        output.set_int_custom_property('inferred', 1)
示例#6
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Get human review result on a model through Slack channel.

    Args:
      input_dict: Input dict from input key to a list of artifacts, including:
        - model_export: exported model from trainer.
        - model_blessing: model blessing path from model_validator.
      output_dict: Output dict from key to a list of artifacts, including:
        - slack_blessing: model blessing result.
      exec_properties: A dict of execution properties, including:
        - slack_token: Token used to setup connection with slack server.
        - slack_channel_id: The id of the Slack channel to send and receive
          messages.
        - timeout_sec: How long do we wait for response, in seconds.

    Returns:
      None

    Raises:
      TimeoutError:
        When there is no decision made within timeout_sec.
      ConnectionError:
        When connection to slack server cannot be established.

    """
        self._log_startup(input_dict, output_dict, exec_properties)

        # Fetch execution properties from exec_properties dict.
        slack_token = exec_properties['slack_token']
        slack_channel_id = exec_properties['slack_channel_id']
        timeout_sec = exec_properties['timeout_sec']

        # Fetch input URIs from input_dict.
        model_export_uri = artifact_utils.get_single_uri(input_dict['model'])
        model_blessing = artifact_utils.get_single_instance(
            input_dict['model_blessing'])

        # Fetch output artifact from output_dict.
        slack_blessing = artifact_utils.get_single_instance(
            output_dict['slack_blessing'])

        # We only consider a model as blessed if both of the following conditions
        # are met:
        # - The model is blessed by model validator. This is determined by looking
        #   for file named 'BLESSED' from the output from Model Validator.
        # - The model is blessed by a human reviewer. This logic is in
        #   _fetch_slack_blessing().
        slack_response = None
        with Timeout(timeout_sec):
            if model_utils.is_model_blessed(model_blessing):
                slack_response = self._fetch_slack_blessing(
                    slack_token, slack_channel_id, model_export_uri)

        # If model is blessed, write an empty file named 'BLESSED' in the assigned
        # output path. Otherwise, write an empty file named 'NOT_BLESSED' instead.
        if slack_response and slack_response.approved:
            io_utils.write_string_file(
                os.path.join(slack_blessing.uri, 'BLESSED'), '')
            slack_blessing.set_int_custom_property('blessed', 1)
        else:
            io_utils.write_string_file(
                os.path.join(slack_blessing.uri, 'NOT_BLESSED'), '')
            slack_blessing.set_int_custom_property('blessed', 0)
        if slack_response:
            slack_blessing.set_string_custom_property('slack_decision_maker',
                                                      slack_response.user_id)
            slack_blessing.set_string_custom_property('slack_decision_message',
                                                      slack_response.message)
            slack_blessing.set_string_custom_property(
                'slack_decision_channel', slack_response.slack_channel_id)
            slack_blessing.set_string_custom_property('slack_decision_thread',
                                                      slack_response.thread_ts)
        absl.logging.info('Blessing result written to %s.', slack_blessing.uri)
示例#7
0
  def Do(self, input_dict: Dict[Text, List[types.Artifact]],
         output_dict: Dict[Text, List[types.Artifact]],
         exec_properties: Dict[Text, Any]) -> None:
    """Runs batch inference on a given model with given input examples.

    This function creates a new model (if necessary) and a new model version
    before inference, and cleans up resources after inference. It provides
    re-executability as it cleans up (only) the model resources that are created
    during the process even inference job failed.

    Args:
      input_dict: Input dict from input key to a list of Artifacts.
        - examples: examples for inference.
        - model: exported model.
        - model_blessing: model blessing result
      output_dict: Output dict from output key to a list of Artifacts.
        - output: bulk inference results.
      exec_properties: A dict of execution properties.
        - data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.
        - custom_config: custom_config.ai_platform_serving_args need to contain
          the serving job parameters sent to Google Cloud AI Platform. For the
          full set of parameters, refer to
          https://cloud.google.com/ml-engine/reference/rest/v1/projects.models

    Returns:
      None
    """
    self._log_startup(input_dict, output_dict, exec_properties)
    if 'examples' not in input_dict:
      raise ValueError('\'examples\' is missing in input dict.')
    if 'inference_result' not in output_dict:
      raise ValueError('\'inference_result\' is missing in output dict.')
    output = artifact_utils.get_single_instance(output_dict['inference_result'])
    if 'model' not in input_dict:
      raise ValueError('Input models are not valid, model '
                       'need to be specified.')
    if 'model_blessing' in input_dict:
      model_blessing = artifact_utils.get_single_instance(
          input_dict['model_blessing'])
      if not model_utils.is_model_blessed(model_blessing):
        output.set_int_custom_property('inferred', 0)
        logging.info('Model on %s was not blessed', model_blessing.uri)
        return
    else:
      logging.info('Model blessing is not provided, exported model will be '
                   'used.')
    if _CUSTOM_CONFIG_KEY not in exec_properties:
      raise ValueError('Input exec properties are not valid, {} '
                       'need to be specified.'.format(_CUSTOM_CONFIG_KEY))

    custom_config = json_utils.loads(
        exec_properties.get(_CUSTOM_CONFIG_KEY, 'null'))
    if custom_config is not None and not isinstance(custom_config, Dict):
      raise ValueError('custom_config in execution properties needs to be a '
                       'dict.')
    ai_platform_serving_args = custom_config.get(SERVING_ARGS_KEY)
    if not ai_platform_serving_args:
      raise ValueError(
          '\'ai_platform_serving_args\' is missing in \'custom_config\'')
    service_name, api_version = runner.get_service_name_and_api_version(
        ai_platform_serving_args)
    executor_class_path = '%s.%s' % (self.__class__.__module__,
                                     self.__class__.__name__)
    with telemetry_utils.scoped_labels(
        {telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
      job_labels = telemetry_utils.get_labels_dict()
    model = artifact_utils.get_single_instance(input_dict['model'])
    model_path = path_utils.serving_model_path(model.uri)
    logging.info('Use exported model from %s.', model_path)
    # Use model artifact uri to generate model version to guarantee the
    # 1:1 mapping from model version to model.
    model_version = 'version_' + hashlib.sha256(model.uri.encode()).hexdigest()
    inference_spec = self._get_inference_spec(model_path, model_version,
                                              ai_platform_serving_args)
    data_spec = bulk_inferrer_pb2.DataSpec()
    json_format.Parse(exec_properties['data_spec'], data_spec)
    api = discovery.build(service_name, api_version)
    new_model_created = False
    try:
      new_model_created = runner.create_model_for_aip_prediction_if_not_exist(
          api, job_labels, ai_platform_serving_args)
      runner.deploy_model_for_aip_prediction(
          api,
          model_path,
          model_version,
          ai_platform_serving_args,
          job_labels,
          skip_model_creation=True,
          set_default_version=False,
      )
      self._run_model_inference(data_spec, input_dict['examples'], output.uri,
                                inference_spec)
    except Exception as e:
      logging.error('Error in executing CloudAIBulkInferrerComponent: %s',
                    str(e))
      output.set_int_custom_property('inferred', 0)
      raise
    finally:
      # Guarantee newly created resources are cleaned up even if theinference
      # job failed.

      # Clean up the newly deployed model.
      runner.delete_model_version_from_aip_if_exists(api, model_version,
                                                     ai_platform_serving_args)
      if new_model_created:
        runner.delete_model_from_aip_if_exists(api, ai_platform_serving_args)
    # Mark the inferenence as successful after resources are cleaned up.
    output.set_int_custom_property('inferred', 1)
示例#8
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Runs batch inference on a given model with given input examples.

    Args:
      input_dict: Input dict from input key to a list of Artifacts.
        - examples: examples for inference.
        - model: exported model.
        - model_blessing: model blessing result, optional.
      output_dict: Output dict from output key to a list of Artifacts.
        - output: bulk inference results.
      exec_properties: A dict of execution properties.
        - model_spec: JSON string of bulk_inferrer_pb2.ModelSpec instance.
        - data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.

    Returns:
      None
    """
        self._log_startup(input_dict, output_dict, exec_properties)

        if output_dict.get(standard_component_specs.INFERENCE_RESULT_KEY):
            inference_result = artifact_utils.get_single_instance(
                output_dict[standard_component_specs.INFERENCE_RESULT_KEY])
        else:
            inference_result = None
        if output_dict.get(standard_component_specs.OUTPUT_EXAMPLES_KEY):
            output_examples = artifact_utils.get_single_instance(
                output_dict[standard_component_specs.OUTPUT_EXAMPLES_KEY])
        else:
            output_examples = None

        if 'examples' not in input_dict:
            raise ValueError('\'examples\' is missing in input dict.')
        if 'model' not in input_dict:
            raise ValueError('Input models are not valid, model '
                             'need to be specified.')
        if standard_component_specs.MODEL_BLESSING_KEY in input_dict:
            model_blessing = artifact_utils.get_single_instance(
                input_dict[standard_component_specs.MODEL_BLESSING_KEY])
            if not model_utils.is_model_blessed(model_blessing):
                logging.info('Model on %s was not blessed', model_blessing.uri)
                return
        else:
            logging.info(
                'Model blessing is not provided, exported model will be '
                'used.')

        model = artifact_utils.get_single_instance(
            input_dict[standard_component_specs.MODEL_KEY])
        model_path = path_utils.serving_model_path(
            model.uri, path_utils.is_old_model_artifact(model))
        logging.info('Use exported model from %s.', model_path)

        data_spec = bulk_inferrer_pb2.DataSpec()
        proto_utils.json_to_proto(
            exec_properties[standard_component_specs.DATA_SPEC_KEY], data_spec)

        output_example_spec = bulk_inferrer_pb2.OutputExampleSpec()
        if exec_properties.get(
                standard_component_specs.OUTPUT_EXAMPLE_SPEC_KEY):
            proto_utils.json_to_proto(
                exec_properties[
                    standard_component_specs.OUTPUT_EXAMPLE_SPEC_KEY],
                output_example_spec)

        self._run_model_inference(
            data_spec, output_example_spec,
            input_dict[standard_component_specs.EXAMPLES_KEY], output_examples,
            inference_result,
            self._get_inference_spec(model_path, exec_properties))
示例#9
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Runs batch inference on a given model with given input examples.

        Args:
          input_dict: Input dict from input key to a list of Artifacts.
            - examples: examples for inference.
            - model: exported model.
            - model_blessing: model blessing result, optional.
          output_dict: Output dict from output key to a list of Artifacts.
            - output: bulk inference results.
          exec_properties: A dict of execution properties.
            - model_spec: JSON string of bulk_inferrer_pb2.ModelSpec instance.
            - data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.

        Returns:
          None
        """
        self._log_startup(input_dict, output_dict, exec_properties)

        source = exec_properties[StepKeys.SOURCE]
        args = exec_properties[StepKeys.ARGS]
        c = source_utils.load_source_path_class(source)
        inferrer_step: BaseInferrer = c(**args)

        output_examples = artifact_utils.get_single_instance(
            output_dict[PREDICTIONS])

        if EXAMPLES not in input_dict:
            raise ValueError('\'examples\' is missing in input dict.')
        if MODEL not in input_dict:
            raise ValueError('Input models are not valid, model '
                             'need to be specified.')
        if MODEL_BLESSING in input_dict:
            model_blessing = artifact_utils.get_single_instance(
                input_dict['model_blessing'])
            if not model_utils.is_model_blessed(model_blessing):
                logging.info('Model on %s was not blessed', model_blessing.uri)
                return
        else:
            logging.info(
                'Model blessing is not provided, exported model will be '
                'used.')

        model = artifact_utils.get_single_instance(input_dict[MODEL])
        model_path = path_utils.serving_model_path(model.uri)
        logging.info('Use exported model from %s.', model_path)

        output_example_spec = bulk_inferrer_pb2.OutputExampleSpec(
            output_columns_spec=[
                bulk_inferrer_pb2.OutputColumnsSpec(
                    predict_output=bulk_inferrer_pb2.PredictOutput(
                        output_columns=[
                            bulk_inferrer_pb2.PredictOutputCol(
                                output_key=x,
                                output_column=f'{x}_label',
                            ) for x in inferrer_step.get_labels()
                        ]))
            ])

        model_spec = bulk_inferrer_pb2.ModelSpec()
        saved_model_spec = model_spec_pb2.SavedModelSpec(
            model_path=model_path,
            tag=model_spec.tag,
            signature_name=model_spec.model_signature_name)
        inference_spec = model_spec_pb2.InferenceSpecType()
        inference_spec.saved_model_spec.CopyFrom(saved_model_spec)

        self._run_model_inference(output_example_spec, input_dict[EXAMPLES],
                                  output_examples, inference_spec,
                                  inferrer_step)
示例#10
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Runs batch inference on a given model with given input examples.

    Args:
      input_dict: Input dict from input key to a list of Artifacts.
        - examples: examples for inference.
        - model_export: exported model.
        - model_blessing: model blessing result
        - model_push: pushed model Either model_push or (model_export and
          model_blessing) need to present.
      output_dict: Output dict from output key to a list of Artifacts.
        - output: bulk inference results.
      exec_properties: A dict of execution properties.
        - model_spec: JSON string of bulk_inferrer_pb2.ModelSpec instance.
        - data_spec: JSON string of bulk_inferrer_pb2.DataSpec instance.

    Returns:
      None
    """
        self._log_startup(input_dict, output_dict, exec_properties)

        if 'examples' not in input_dict:
            raise ValueError('\'examples\' is missing in input dict.')
        if 'output' not in output_dict:
            raise ValueError('\'output\' is missing in output dict.')
        output = artifact_utils.get_single_instance(output_dict['output'])
        if 'model_push' in input_dict:
            model_push = artifact_utils.get_single_instance(
                input_dict['model_push'])
            model_path = io_utils.get_only_uri_in_dir(model_push.uri)
            logging.info('Use pushed model from %s.', model_path)
        elif 'model_blessing' in input_dict and 'model_export' in input_dict:
            model_blessing = artifact_utils.get_single_instance(
                input_dict['model_blessing'])
            if not model_utils.is_model_blessed(model_blessing):
                output.set_int_custom_property('inferred', 0)
                logging.info('Model on %s was not blessed', model_blessing.uri)
                return
            model_export = artifact_utils.get_single_instance(
                input_dict['model_export'])
            model_path = path_utils.serving_model_path(model_export.uri)
            logging.info('Use exported model from %s.', model_path)
        else:
            raise ValueError(
                'Input models are not valid. Either model_push or '
                '(model_blessing and model_export) need to be '
                'specified.')
        data_spec = bulk_inferrer_pb2.DataSpec()
        json_format.Parse(exec_properties['data_spec'], data_spec)
        example_uris = {}
        if data_spec.example_splits:
            for example in input_dict['examples']:
                if example.split in data_spec.example_splits:
                    example_uris[example.split] = example.uri
        else:
            for example in input_dict['examples']:
                example_uris[example.split] = example.uri
        model_spec = bulk_inferrer_pb2.ModelSpec()
        json_format.Parse(exec_properties['model_spec'], model_spec)
        output_path = os.path.join(output.uri, _PREDICTION_LOGS_DIR_NAME)
        self._run_model_inference(model_path, example_uris, output_path,
                                  model_spec)
        logging.info('BulkInferrer generates prediction log to %s',
                     output_path)
        output.set_int_custom_property('inferred', 1)