コード例 #1
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]):
        """Overrides the tfx_pusher_executor.

    Args:
      input_dict: Input dict from input key to a list of artifacts, including:
        - model_export: exported model from trainer.
        - model_blessing: model blessing path from model_validator.
      output_dict: Output dict from key to a list of artifacts, including:
        - model_push: A list of 'ModelPushPath' artifact of size one. It will
          include the model in this push execution if the model was pushed.
      exec_properties: Mostly a passthrough input dict for
        tfx.components.Pusher.executor.  custom_config.ai_platform_serving_args
        is consumed by this class.  For the full set of parameters supported by
        Google Cloud AI Platform, refer to
        https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models#creating_a_model_version.

    Returns:
      None
    Raises:
      ValueError: if ai_platform_serving_args is not in
      exec_properties.custom_config.
      RuntimeError: if the Google Cloud AI Platform training job failed.
    """
        self._log_startup(input_dict, output_dict, exec_properties)
        if not self.CheckBlessing(input_dict, output_dict):
            return

        model_export = artifact_utils.get_single_instance(
            input_dict['model_export'])
        model_export_uri = model_export.uri
        model_blessing_uri = artifact_utils.get_single_uri(
            input_dict['model_blessing'])
        model_push = artifact_utils.get_single_instance(
            output_dict['model_push'])
        # TODO(jyzhao): should this be in driver or executor.
        if not tf.gfile.Exists(os.path.join(model_blessing_uri, 'BLESSED')):
            model_push.set_int_custom_property('pushed', 0)
            tf.logging.info('Model on %s was not blessed', model_blessing_uri)
            return

        exec_properties_copy = exec_properties.copy()
        custom_config = exec_properties_copy.pop('custom_config', {})
        ai_platform_serving_args = custom_config['ai_platform_serving_args']

        # Deploy the model.
        model_path = path_utils.serving_model_path(model_export_uri)
        # Note: we do not have a logical model version right now. This
        # model_version is a timestamp mapped to trainer's exporter.
        model_version = os.path.basename(model_path)
        if ai_platform_serving_args is not None:
            runner.deploy_model_for_cmle_serving(model_path, model_version,
                                                 ai_platform_serving_args)

        # Make sure artifacts are populated in a standard way by calling
        # tfx.pusher.executor.Executor.Do().
        exec_properties_copy['push_destination'] = exec_properties.get(
            'push_destination') or self._make_local_temp_destination()
        super(Executor, self).Do(input_dict, output_dict, exec_properties_copy)
コード例 #2
0
    def testDeployModelForCMLEServing(self, mock_discovery):
        serving_path = os.path.join(self._output_data_dir, 'serving_path')
        model_version = 'model_version'

        mock_discovery.build.return_value = self._mock_api_client
        mock_create = mock.Mock()
        mock_create.return_value.execute.return_value = {'name': 'op_name'}
        self._mock_api_client.projects().models().versions(
        ).create = mock_create
        mock_get = mock.Mock()
        self._mock_api_client.projects().operations().get = mock_get
        mock_get.return_value.execute.return_value = {
            'done': 'Done',
        }

        runner.deploy_model_for_cmle_serving(serving_path, model_version,
                                             self._cmle_serving_args)

        mock_create.assert_called_with(body=mock.ANY,
                                       parent='projects/{}/models/{}'.format(
                                           self._project_id, 'model_name'))
        (_, kwargs) = mock_create.call_args
        body = kwargs['body']
        self.assertDictEqual(
            {
                'name': 'v{}'.format(model_version),
                'deployment_uri': serving_path,
                'runtime_version': runner._get_tf_runtime_version(),
                'python_version': runner._get_caip_python_version(),
            }, body)
        mock_get.assert_called_with(name='op_name')
コード例 #3
0
ファイル: executor.py プロジェクト: LeoYiChen/tfx
  def Do(self, input_dict: Dict[Text, List[types.Artifact]],
         output_dict: Dict[Text, List[types.Artifact]],
         exec_properties: Dict[Text, Any]):
    """Overrides the tfx_pusher_executor.

    Args:
      input_dict: Input dict from input key to a list of artifacts, including:
        - model_export: exported model from trainer.
        - model_blessing: model blessing path from model_validator.
      output_dict: Output dict from key to a list of artifacts, including:
        - model_push: A list of 'ModelPushPath' artifact of size one. It will
          include the model in this push execution if the model was pushed.
      exec_properties: Mostly a passthrough input dict for
        tfx.components.Pusher.executor.  custom_config.ai_platform_serving_args
        is consumed by this class.  For the full set of parameters supported by
        Google Cloud AI Platform, refer to
        https://cloud.google.com/ml-engine/docs/tensorflow/deploying-models#creating_a_model_version.

    Returns:
      None
    Raises:
      ValueError:
        If ai_platform_serving_args is not in exec_properties.custom_config.
        If Serving model path does not start with gs://.
      RuntimeError: if the Google Cloud AI Platform training job failed.
    """
    self._log_startup(input_dict, output_dict, exec_properties)
    if not self.CheckBlessing(input_dict, output_dict):
      return

    model_export = artifact_utils.get_single_instance(
        input_dict['model_export'])
    model_export_uri = model_export.uri
    model_push = artifact_utils.get_single_instance(output_dict['model_push'])

    exec_properties_copy = exec_properties.copy()
    custom_config = exec_properties_copy.pop('custom_config', {})
    ai_platform_serving_args = custom_config['ai_platform_serving_args']

    # Deploy the model.
    model_path = path_utils.serving_model_path(model_export_uri)
    # Note: we do not have a logical model version right now. This
    # model_version is a timestamp mapped to trainer's exporter.
    model_version = os.path.basename(model_path)
    if ai_platform_serving_args is not None:
      runner.deploy_model_for_cmle_serving(model_path, model_version,
                                           ai_platform_serving_args)

    model_push.set_int_custom_property('pushed', 1)
    model_push.set_string_custom_property('pushed_model', model_path)
コード例 #4
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]) -> None:
        """Push model to target directory if blessed.

    Args:
      input_dict: Input dict from input key to a list of artifacts, including:
        - model_export: exported model from trainer.
        - model_blessing: model blessing path from model_validator.  A push
        action delivers the model exports produced by Trainer to the destination
        defined in component config.
      output_dict: Output dict from key to a list of artifacts, including:
        - model_push: A list of 'ModelPushPath' artifact of size one. It will
          include the model in this push execution if the model was pushed.
      exec_properties: A dict of execution properties, including:
        - push_destination: JSON string of pusher_pb2.PushDestination instance,
          providing instruction of destination to push model.

    Returns:
      None
    """
        self._log_startup(input_dict, output_dict, exec_properties)
        if not self.CheckBlessing(input_dict, output_dict):
            return
        model_push = artifact_utils.get_single_instance(
            output_dict['model_push'])
        model_push_uri = model_push.uri
        model_export = artifact_utils.get_single_instance(
            input_dict['model_export'])
        model_export_uri = model_export.uri
        absl.logging.info('Model pushing.')
        # Copy the model we are pushing into
        model_path = path_utils.serving_model_path(model_export_uri)
        # Note: we do not have a logical model version right now. This
        # model_version is a timestamp mapped to trainer's exporter.
        model_version = os.path.basename(model_path)
        absl.logging.info('Model version is %s', model_version)
        io_utils.copy_dir(model_path,
                          os.path.join(model_push_uri, model_version))
        absl.logging.info('Model written to %s.', model_push_uri)

        # Copied to a fixed outside path, which can be listened by model server.
        #
        # If model is already successfully copied to outside before, stop copying.
        # This is because model validator might blessed same model twice (check
        # mv driver) with different blessing output, we still want Pusher to
        # handle the mv output again to keep metadata tracking, but no need to
        # copy to outside path again..
        # TODO(jyzhao): support rpc push and verification.
        push_destination = pusher_pb2.PushDestination()
        json_format.Parse(exec_properties['push_destination'],
                          push_destination)
        serving_path = os.path.join(push_destination.filesystem.base_directory,
                                    model_version)
        if tf.io.gfile.exists(serving_path):
            absl.logging.info(
                'Destination directory %s already exists, skipping current push.',
                serving_path)
        else:
            # tf.serving won't load partial model, it will retry until fully copied.
            io_utils.copy_dir(model_path, serving_path)
            absl.logging.info('Model written to serving path %s.',
                              serving_path)

        model_push.set_int_custom_property('pushed', 1)
        model_push.set_string_custom_property('pushed_model', model_export_uri)
        model_push.set_int_custom_property('pushed_model_id', model_export.id)
        absl.logging.info('Model pushed to %s.', serving_path)

        if exec_properties.get('custom_config'):
            cmle_serving_args = exec_properties.get(
                'custom_config', {}).get('cmle_serving_args')
            if cmle_serving_args is not None:
                absl.logging.warn(
                    '\'cmle_serving_args\' is deprecated, please use custom executor '
                    'in tfx.extensions.google_cloud_ai_platform.pusher instead'
                )
                return runner.deploy_model_for_cmle_serving(
                    serving_path, model_version, cmle_serving_args)