コード例 #1
0
    def testDo(self):
        output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)

        tf.io.gfile.makedirs(os.path.join(output_data_dir, 'input'))
        tf.io.gfile.makedirs(os.path.join(output_data_dir, 'output'))

        input = standard_artifacts.ModelBlessing()
        input.uri = os.path.join(output_data_dir, 'input')
        with open(os.path.join(output_data_dir, 'input', 'BLESSED'), 'w') as f:
            f.write('')

        output = standard_artifacts.ModelBlessing()
        output.uri = os.path.join(output_data_dir, 'output')
        input_dict = {'artifact': [input]}
        output_dict = {'pushed_artifact': [output]}
        exec_properties = {
            'push_destination':
            json_format.MessageToJson(pusher_pb2.PushDestination(
                filesystem=pusher_pb2.PushDestination.Filesystem(
                    base_directory=output_data_dir)),
                                      sort_keys=True)
        }
        # Run executor.
        executor = component.ArtifactPusherExecutor()
        executor.Do(input_dict, output_dict, exec_properties)
コード例 #2
0
  def testGetLatestBlessedModelArtifact_IrMode(self):
    with metadata.Metadata(connection_config=self._connection_config) as m:
      # Model with id 1, will be blessed.
      model_one = standard_artifacts.Model()
      model_one.uri = 'model_one'
      model_one.id = 1
      # Model with id 2, will be blessed.
      model_two = standard_artifacts.Model()
      model_two.uri = 'model_two'
      model_two.id = 2
      # Model with id 3, will not be blessed.
      model_three = standard_artifacts.Model()
      model_three.uri = 'model_three'
      model_three.id = 3

      model_blessing_one = standard_artifacts.ModelBlessing()
      self._set_model_blessing_bit(model_blessing_one, model_one.id, 1)
      model_blessing_two = standard_artifacts.ModelBlessing()
      self._set_model_blessing_bit(model_blessing_two, model_two.id, 1)

      resolver = latest_blessed_model_resolver.LatestBlessedModelResolver()
      result = resolver.resolve_artifacts(
          m, {
              'model': [model_one, model_two, model_three],
              'model_blessing': [model_blessing_one, model_blessing_two]
          })
      self.assertIsNotNone(result)
      self.assertEqual([a.uri for a in result['model']], ['model_two'])
コード例 #3
0
    def testDoValidation(self, exec_properties, blessed, has_baseline):
        source_data_dir = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), 'testdata')
        output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)

        # Create input dict.
        examples = standard_artifacts.Examples()
        examples.uri = os.path.join(source_data_dir, 'csv_example_gen')
        examples.split_names = artifact_utils.encode_split_names(
            ['train', 'eval'])
        model = standard_artifacts.Model()
        baseline_model = standard_artifacts.Model()
        model.uri = os.path.join(source_data_dir, 'trainer/current')
        baseline_model.uri = os.path.join(source_data_dir, 'trainer/previous/')
        blessing_output = standard_artifacts.ModelBlessing()
        blessing_output.uri = os.path.join(output_data_dir, 'blessing_output')
        schema = standard_artifacts.Schema()
        schema.uri = os.path.join(source_data_dir, 'schema_gen')
        input_dict = {
            EXAMPLES_KEY: [examples],
            MODEL_KEY: [model],
            SCHEMA_KEY: [schema],
        }
        if has_baseline:
            input_dict[BASELINE_MODEL_KEY] = [baseline_model]

        # Create output dict.
        eval_output = standard_artifacts.ModelEvaluation()
        eval_output.uri = os.path.join(output_data_dir, 'eval_output')
        blessing_output = standard_artifacts.ModelBlessing()
        blessing_output.uri = os.path.join(output_data_dir, 'blessing_output')
        output_dict = {
            EVALUATION_KEY: [eval_output],
            BLESSING_KEY: [blessing_output],
        }

        # List needs to be serialized before being passed into Do function.
        exec_properties[EXAMPLE_SPLITS_KEY] = json_utils.dumps(None)

        # Run executor.
        evaluator = executor.Executor()
        evaluator.Do(input_dict, output_dict, exec_properties)

        # Check evaluator outputs.
        self.assertTrue(
            fileio.exists(os.path.join(eval_output.uri, 'eval_config.json')))
        self.assertTrue(fileio.exists(os.path.join(eval_output.uri,
                                                   'metrics')))
        self.assertTrue(fileio.exists(os.path.join(eval_output.uri, 'plots')))
        self.assertTrue(
            fileio.exists(os.path.join(eval_output.uri, 'validations')))
        if blessed:
            self.assertTrue(
                fileio.exists(os.path.join(blessing_output.uri, 'BLESSED')))
        else:
            self.assertTrue(
                fileio.exists(os.path.join(blessing_output.uri,
                                           'NOT_BLESSED')))
コード例 #4
0
  def testStrategy_IrMode(self):
    # Model with id 1, will be blessed.
    model_one = standard_artifacts.Model()
    model_one.uri = 'model_one'
    model_one.id = 1
    # Model with id 2, will be blessed.
    model_two = standard_artifacts.Model()
    model_two.uri = 'model_two'
    model_two.id = 2
    # Model with id 3, will not be blessed.
    model_three = standard_artifacts.Model()
    model_three.uri = 'model_three'
    model_three.id = 3

    model_blessing_one = standard_artifacts.ModelBlessing()
    self._set_model_blessing_bit(model_blessing_one, model_one.id, 1)
    model_blessing_two = standard_artifacts.ModelBlessing()
    self._set_model_blessing_bit(model_blessing_two, model_two.id, 1)

    strategy = latest_blessed_model_strategy.LatestBlessedModelStrategy()
    result = strategy.resolve_artifacts(
        self._store, {
            'model': [model_one, model_two, model_three],
            'model_blessing': [model_blessing_one, model_blessing_two]
        })
    self.assertIsNotNone(result)
    self.assertEqual([a.uri for a in result['model']], ['model_two'])
コード例 #5
0
  def testStrategy(self):
    contexts = self._metadata.register_pipeline_contexts_if_not_exists(
        self._pipeline_info)
    # Model with id 1, will be blessed.
    model_one = standard_artifacts.Model()
    model_one.uri = 'model_one'
    self._metadata.publish_artifacts([model_one])
    # Model with id 2, will be blessed.
    model_two = standard_artifacts.Model()
    model_two.uri = 'model_two'
    self._metadata.publish_artifacts([model_two])
    # Model with id 3, will not be blessed.
    model_three = standard_artifacts.Model()
    model_three.uri = 'model_three'
    self._metadata.publish_artifacts([model_three])

    model_blessing_one = standard_artifacts.ModelBlessing()
    self._set_model_blessing_bit(model_blessing_one, model_one.id, 1)
    model_blessing_two = standard_artifacts.ModelBlessing()
    self._set_model_blessing_bit(model_blessing_two, model_two.id, 1)
    self._metadata.publish_artifacts([model_blessing_one, model_blessing_two])

    self._metadata.register_execution(
        exec_properties={},
        pipeline_info=self._pipeline_info,
        component_info=self._component_info,
        contexts=contexts)
    self._metadata.publish_execution(
        component_info=self._component_info,
        output_artifacts={
            'a': [model_one, model_two, model_three],
            'b': [model_blessing_one, model_blessing_two]
        })

    strategy = latest_blessed_model_strategy.LatestBlessedModelStrategy()
    resolve_result = strategy.resolve(
        pipeline_info=self._pipeline_info,
        metadata_handler=self._metadata,
        source_channels={
            'model':
                types.Channel(
                    type=standard_artifacts.Model,
                    producer_component_id=self._component_info.component_id,
                    output_key='a'),
            'model_blessing':
                types.Channel(
                    type=standard_artifacts.ModelBlessing,
                    producer_component_id=self._component_info.component_id,
                    output_key='b')
        })
    self.assertTrue(resolve_result.has_complete_result)
    self.assertEqual([
        a.uri
        for a in resolve_result.per_key_resolve_result['model']
    ], ['model_two'])
    self.assertTrue(resolve_result.per_key_resolve_state['model'])
コード例 #6
0
  def testGetLatestBlessedModelArtifact(self):
    with metadata.Metadata(connection_config=self._connection_config) as m:
      contexts = m.register_pipeline_contexts_if_not_exists(self._pipeline_info)
      # Model with id 1, will be blessed.
      model_one = standard_artifacts.Model()
      model_one.uri = 'model_one'
      m.publish_artifacts([model_one])
      # Model with id 2, will be blessed.
      model_two = standard_artifacts.Model()
      model_two.uri = 'model_two'
      m.publish_artifacts([model_two])
      # Model with id 3, will not be blessed.
      model_three = standard_artifacts.Model()
      model_three.uri = 'model_three'
      m.publish_artifacts([model_three])

      model_blessing_one = standard_artifacts.ModelBlessing()
      self._set_model_blessing_bit(model_blessing_one, model_one.id, 1)
      model_blessing_two = standard_artifacts.ModelBlessing()
      self._set_model_blessing_bit(model_blessing_two, model_two.id, 1)
      m.publish_artifacts([model_blessing_one, model_blessing_two])

      m.register_execution(
          input_artifacts={
              'a': [model_one, model_two, model_three],
              'b': [model_blessing_one, model_blessing_two]
          },
          exec_properties={},
          pipeline_info=self._pipeline_info,
          component_info=self._component_info,
          contexts=contexts)

      resolver = latest_blessed_model_resolver.LatestBlessedModelResolver()
      resolve_result = resolver.resolve(
          pipeline_info=self._pipeline_info,
          metadata_handler=m,
          source_channels={
              'model':
                  types.Channel(type=standard_artifacts.Model),
              'model_blessing':
                  types.Channel(type=standard_artifacts.ModelBlessing)
          })
      self.assertTrue(resolve_result.has_complete_result)
      self.assertEqual([
          artifact.uri
          for artifact in resolve_result.per_key_resolve_result['model']
      ], ['model_two'])
      self.assertTrue(resolve_result.per_key_resolve_state['model'])
コード例 #7
0
  def setUp(self):
    super(ExecutorTest, self).setUp()
    self._source_data_dir = os.path.join(
        os.path.dirname(os.path.dirname(__file__)), 'testdata')
    output_data_dir = os.path.join(
        os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
        self._testMethodName)
    self.component_id = 'test_component'

    # Create input dict.
    eval_examples = standard_artifacts.Examples()
    eval_examples.split_names = artifact_utils.encode_split_names(['eval'])
    eval_examples.uri = os.path.join(self._source_data_dir, 'csv_example_gen')
    model = standard_artifacts.Model()
    model.uri = os.path.join(self._source_data_dir, 'trainer/current')
    self._input_dict = {
        executor.EXAMPLES_KEY: [eval_examples],
        executor.MODEL_KEY: [model],
    }

    # Create output dict.
    self._blessing = standard_artifacts.ModelBlessing()
    self._blessing.uri = os.path.join(output_data_dir, 'blessing')
    self._output_dict = {executor.BLESSING_KEY: [self._blessing]}

    # Create context
    self._tmp_dir = os.path.join(output_data_dir, '.temp')
    self._context = executor.Executor.Context(tmp_dir=self._tmp_dir,
                                              unique_id='2')
コード例 #8
0
    def __init__(self,
                 examples: types.Channel,
                 model: types.Channel,
                 blessing: Optional[types.Channel] = None,
                 instance_name: Optional[Text] = None):
        """Construct a ModelValidator component.

    Args:
      examples: A Channel of type `standard_artifacts.Examples`, usually
        produced by an
        [ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen) component.
        _required_
      model: A Channel of type `standard_artifacts.Model`, usually produced by
        a [Trainer](https://www.tensorflow.org/tfx/guide/trainer) component.
        _required_
      blessing: Output channel of 'ModelBlessingPath' that contains the
        validation result.
      instance_name: Optional name assigned to this specific instance of
        ModelValidator.  Required only if multiple ModelValidator components are
        declared in the same pipeline.
    """
        blessing = blessing or types.Channel(
            type=standard_artifacts.ModelBlessing,
            artifacts=[standard_artifacts.ModelBlessing()])
        spec = ModelValidatorSpec(examples=examples,
                                  model=model,
                                  blessing=blessing)
        super(ModelValidator, self).__init__(spec=spec,
                                             instance_name=instance_name)
コード例 #9
0
  def setUp(self):
    super().setUp()
    self._source_data_dir = os.path.join(
        os.path.dirname(
            os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
        'components', 'testdata')
    self._output_data_dir = os.path.join(
        os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
        self._testMethodName)
    self.component_id = 'test_component'

    # Create input dict.
    self._examples = standard_artifacts.Examples()
    self._examples.uri = os.path.join(self._source_data_dir, 'csv_example_gen')
    self._examples.split_names = artifact_utils.encode_split_names(
        ['unlabelled'])
    self._model = standard_artifacts.Model()
    self._model.uri = os.path.join(self._source_data_dir, 'trainer/current')
    self._model_version = 'version_' + hashlib.sha256(
        self._model.uri.encode()).hexdigest()

    self._model_blessing = standard_artifacts.ModelBlessing()
    self._model_blessing.uri = os.path.join(self._source_data_dir,
                                            'model_validator/blessed')
    self._model_blessing.set_int_custom_property('blessed', 1)

    self._inference_result = standard_artifacts.InferenceResult()
    self._prediction_log_dir = os.path.join(self._output_data_dir,
                                            'prediction_logs')
    self._inference_result.uri = self._prediction_log_dir

    # Create context
    self._tmp_dir = os.path.join(self._output_data_dir, '.temp')
    self._context = executor.Executor.Context(
        tmp_dir=self._tmp_dir, unique_id='2')
コード例 #10
0
    def __init__(self,
                 examples: types.channel,
                 schema: types.channel,
                 model: types.channel,
                 min_recall: float,
                 max_latency: float,
                 evaluation: Optional[types.Channel] = None,
                 blessing: Optional[types.Channel] = None,
                 instance_name=None):

        blessing = blessing or types.Channel(
            type=standard_artifacts.ModelBlessing,
            artifacts=[standard_artifacts.ModelBlessing()])

        evaluation = evaluation or types.Channel(
            type=standard_artifacts.ModelEvaluation,
            artifacts=[standard_artifacts.ModelEvaluation()])

        spec = IndexEvaluatorSpec(examples=examples,
                                  schema=schema,
                                  model=model,
                                  evaluation=evaluation,
                                  blessing=blessing,
                                  min_recall=min_recall,
                                  max_latency=max_latency)

        super().__init__(spec=spec, instance_name=instance_name)
コード例 #11
0
ファイル: executor_test.py プロジェクト: yongsheng268/tfx
    def setUp(self):
        self._source_data_dir = os.path.join(
            os.path.dirname(
                os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
            'components', 'testdata')
        self._output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)
        tf.gfile.MakeDirs(self._output_data_dir)
        self._model_export = standard_artifacts.Model()
        self._model_export.uri = os.path.join(self._source_data_dir,
                                              'trainer/current/')
        self._model_blessing = standard_artifacts.ModelBlessing()
        self._input_dict = {
            'model_export': [self._model_export],
            'model_blessing': [self._model_blessing],
        }

        self._model_push = standard_artifacts.PushedModel()
        self._model_push.uri = os.path.join(self._output_data_dir,
                                            'model_push')
        tf.gfile.MakeDirs(self._model_push.uri)
        self._output_dict = {
            'model_push': [self._model_push],
        }
        self._exec_properties = {
            'custom_config': {
                'ai_platform_serving_args': {
                    'model_name': 'model_name',
                    'project_id': 'project_id'
                },
            },
        }
        self._executor = Executor()
コード例 #12
0
 def setUp(self):
     super(ComponentTest, self).setUp()
     self._examples = channel_utils.as_channel(
         [standard_artifacts.Examples()])
     self._model = channel_utils.as_channel([standard_artifacts.Model()])
     self._model_blessing = channel_utils.as_channel(
         [standard_artifacts.ModelBlessing()])
コード例 #13
0
    def __init__(self,
                 model: types.Channel,
                 model_blessing: types.Channel,
                 slack_token: Text,
                 slack_channel_id: Text,
                 timeout_sec: int,
                 slack_blessing: Optional[types.Channel] = None,
                 instance_name: Optional[Text] = None):
        """Construct a SlackComponent.

    Args:
      model: A Channel of type `standard_artifacts.Model`, usually produced by
        a Trainer component.
      model_blessing: A Channel of type `standard_artifacts.ModelBlessing`,
        usually produced by a ModelValidator component.
      slack_token: A token used for setting up connection with Slack server.
      slack_channel_id: Slack channel id to communicate on.
      timeout_sec: Seconds to wait for response before default to reject.
      slack_blessing: Optional output channel of type
        `standard_artifacts.ModelBlessing` with result of blessing; will be
        created for you if not specified.
      instance_name: Optional unique instance name. Necessary if multiple Pusher
        components are declared in the same pipeline.
    """
        slack_blessing = slack_blessing or types.Channel(
            type=standard_artifacts.ModelBlessing,
            artifacts=[standard_artifacts.ModelBlessing()])
        spec = SlackComponentSpec(slack_token=slack_token,
                                  slack_channel_id=slack_channel_id,
                                  timeout_sec=timeout_sec,
                                  model=model,
                                  model_blessing=model_blessing,
                                  slack_blessing=slack_blessing)
        super(SlackComponent, self).__init__(spec=spec,
                                             instance_name=instance_name)
コード例 #14
0
  def __init__(self,
               examples: types.Channel,
               model: types.Channel,
               blessing: Optional[types.Channel] = None,
               name: Optional[Text] = None):
    """Construct a ModelValidator component.

    Args:
      examples: A Channel of 'ExamplesPath' type, usually produced by ExampleGen
        component.
      model: A Channel of 'ModelExportPath' type, usually produced by Trainer
        component.
      blessing: Optional output channel of 'ModelBlessingPath' for result of
        blessing.
      name: Optional unique name. Necessary if multiple ModelValidator
        components are declared in the same pipeline.
    """
    blessing = blessing or types.Channel(
        type=standard_artifacts.ModelBlessing,
        artifacts=[standard_artifacts.ModelBlessing()])
    spec = ModelValidatorSpec(
        examples=channel_utils.as_channel(examples),
        model=channel_utils.as_channel(model),
        blessing=blessing)
    super(ModelValidator, self).__init__(spec=spec, name=name)
コード例 #15
0
ファイル: executor_test.py プロジェクト: suryaavala/tfx
    def setUp(self):
        super(ExecutorTest, self).setUp()
        self._source_data_dir = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), 'testdata')
        self._output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)
        fileio.makedirs(self._output_data_dir)
        self._model_export = standard_artifacts.Model()
        self._model_export.uri = os.path.join(self._source_data_dir,
                                              'trainer/current')
        self._model_blessing = standard_artifacts.ModelBlessing()
        self._input_dict = {
            MODEL_KEY: [self._model_export],
            MODEL_BLESSING_KEY: [self._model_blessing],
        }

        self._model_push = standard_artifacts.PushedModel()
        self._model_push.uri = os.path.join(self._output_data_dir,
                                            'model_push')
        fileio.makedirs(self._model_push.uri)
        self._output_dict = {
            PUSHED_MODEL_KEY: [self._model_push],
        }
        self._serving_model_dir = os.path.join(self._output_data_dir,
                                               'serving_model_dir')
        fileio.makedirs(self._serving_model_dir)
        self._exec_properties = self._MakeExecProperties()
        self._executor = executor.Executor()
コード例 #16
0
    def setUp(self):
        super(ExecutorTest, self).setUp()
        self._source_data_dir = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), 'testdata')
        self._output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)
        self.component_id = 'test_component'

        # Create input dict.
        self._examples = standard_artifacts.Examples(split='unlabelled')
        self._examples.uri = os.path.join(self._source_data_dir,
                                          'csv_example_gen/unlabelled/')
        self._model_export = standard_artifacts.Model()
        self._model_export.uri = os.path.join(self._source_data_dir,
                                              'trainer/current/')

        self._model_blessing = standard_artifacts.ModelBlessing()
        self._model_blessing.uri = os.path.join(self._source_data_dir,
                                                'model_validator/blessed')
        self._model_blessing.set_int_custom_property('blessed', 1)

        self._inference_result = standard_artifacts.InferenceResult()
        self._prediction_log_dir = os.path.join(self._output_data_dir,
                                                'prediction_logs')
        self._inference_result.uri = self._prediction_log_dir

        # Create context
        self._tmp_dir = os.path.join(self._output_data_dir, '.temp')
        self._context = executor.Executor.Context(tmp_dir=self._tmp_dir,
                                                  unique_id='2')
コード例 #17
0
  def setUp(self):
    super(ExecutorTest, self).setUp()
    self._source_data_dir = os.path.join(
        os.path.dirname(os.path.dirname(__file__)), 'testdata')
    self._output_data_dir = os.path.join(
        os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
        self._testMethodName)
    tf.io.gfile.makedirs(self._output_data_dir)
    self._model_export = standard_artifacts.Model()
    self._model_export.uri = os.path.join(self._source_data_dir,
                                          'trainer/current')
    self._model_blessing = standard_artifacts.ModelBlessing()
    self._input_dict = {
        executor.MODEL_KEY: [self._model_export],
        executor.MODEL_BLESSING_KEY: [self._model_blessing],
    }

    self._model_push = standard_artifacts.PushedModel()
    self._model_push.uri = os.path.join(self._output_data_dir, 'model_push')
    tf.io.gfile.makedirs(self._model_push.uri)
    self._output_dict = {
        executor.PUSHED_MODEL_KEY: [self._model_push],
    }
    self._serving_model_dir = os.path.join(self._output_data_dir,
                                           'serving_model_dir')
    tf.io.gfile.makedirs(self._serving_model_dir)
    self._exec_properties = {
        'push_destination':
            json_format.MessageToJson(
                pusher_pb2.PushDestination(
                    filesystem=pusher_pb2.PushDestination.Filesystem(
                        base_directory=self._serving_model_dir)),
                preserving_proto_field_name=True),
    }
    self._executor = executor.Executor()
コード例 #18
0
 def setUp(self):
   super(PlaceholderUtilsTest, self).setUp()
   examples = [standard_artifacts.Examples()]
   examples[0].uri = "/tmp"
   examples[0].split_names = artifact_utils.encode_split_names(
       ["train", "eval"])
   serving_spec = infra_validator_pb2.ServingSpec()
   serving_spec.tensorflow_serving.tags.extend(["latest", "1.15.0-gpu"])
   self._resolution_context = placeholder_utils.ResolutionContext(
       exec_info=data_types.ExecutionInfo(
           input_dict={
               "model": [standard_artifacts.Model()],
               "examples": examples,
           },
           output_dict={"blessing": [standard_artifacts.ModelBlessing()]},
           exec_properties={
               "proto_property":
                   json_format.MessageToJson(
                       message=serving_spec,
                       sort_keys=True,
                       preserving_proto_field_name=True,
                       indent=0)
           },
           execution_output_uri="test_executor_output_uri",
           stateful_working_dir="test_stateful_working_dir",
           pipeline_node=pipeline_pb2.PipelineNode(
               node_info=pipeline_pb2.NodeInfo(
                   type=metadata_store_pb2.ExecutionType(
                       name="infra_validator"))),
           pipeline_info=pipeline_pb2.PipelineInfo(id="test_pipeline_id")))
コード例 #19
0
ファイル: executor_test.py プロジェクト: yongsheng268/tfx
    def setUp(self):
        self._source_data_dir = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), 'testdata')
        output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)
        self.component_id = 'test_component'

        # Create input dict.
        eval_examples = standard_artifacts.Examples(split='eval')
        eval_examples.uri = os.path.join(self._source_data_dir,
                                         'csv_example_gen/eval/')
        model = standard_artifacts.Model()
        model.uri = os.path.join(self._source_data_dir, 'trainer/current/')
        self._input_dict = {
            'examples': [eval_examples],
            'model': [model],
        }

        # Create output dict.
        self._blessing = standard_artifacts.ModelBlessing()
        self._blessing.uri = os.path.join(output_data_dir, 'blessing')
        self._output_dict = {'blessing': [self._blessing]}

        # Create context
        self._tmp_dir = os.path.join(output_data_dir, '.temp')
        self._context = executor.Executor.Context(tmp_dir=self._tmp_dir,
                                                  unique_id='2')
コード例 #20
0
    def setUp(self):
        self._source_data_dir = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), 'testdata')
        self._output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)
        tf.gfile.MakeDirs(self._output_data_dir)
        self._model_export = standard_artifacts.Model()
        self._model_export.uri = os.path.join(self._source_data_dir,
                                              'trainer/current/')
        self._model_blessing = standard_artifacts.ModelBlessing()
        self._input_dict = {
            'model_export': [self._model_export],
            'model_blessing': [self._model_blessing],
        }

        self._model_push = standard_artifacts.PushedModel()
        self._model_push.uri = os.path.join(self._output_data_dir,
                                            'model_push')
        tf.gfile.MakeDirs(self._model_push.uri)
        self._output_dict = {
            'model_push': [self._model_push],
        }
        self._serving_model_dir = os.path.join(self._output_data_dir,
                                               'serving_model_dir')
        tf.gfile.MakeDirs(self._serving_model_dir)
        self._exec_properties = {
            'push_destination':
            json_format.MessageToJson(
                pusher_pb2.PushDestination(
                    filesystem=pusher_pb2.PushDestination.Filesystem(
                        base_directory=self._serving_model_dir))),
        }
        self._executor = executor.Executor()
コード例 #21
0
    def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties):
        source_data_dir = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), 'testdata')
        output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)

        # Create input dict.
        examples = standard_artifacts.Examples()
        examples.uri = os.path.join(source_data_dir, 'csv_example_gen')
        examples.split_names = artifact_utils.encode_split_names(
            ['train', 'eval'])
        model = standard_artifacts.Model()
        model.uri = os.path.join(source_data_dir, 'trainer/current')
        input_dict = {
            EXAMPLES_KEY: [examples],
            MODEL_KEY: [model],
        }

        # Create output dict.
        eval_output = standard_artifacts.ModelEvaluation()
        eval_output.uri = os.path.join(output_data_dir, 'eval_output')
        blessing_output = standard_artifacts.ModelBlessing()
        blessing_output.uri = os.path.join(output_data_dir, 'blessing_output')
        output_dict = {
            EVALUATION_KEY: [eval_output],
            BLESSING_KEY: [blessing_output],
        }

        try:
            # Need to import the following module so that the fairness indicator
            # post-export metric is registered.  This may raise an ImportError if the
            # currently-installed version of TFMA does not support fairness
            # indicators.
            import tensorflow_model_analysis.addons.fairness.post_export_metrics.fairness_indicators  # pylint: disable=g-import-not-at-top, unused-variable
            exec_properties['fairness_indicator_thresholds'] = [
                0.1, 0.3, 0.5, 0.7, 0.9
            ]
        except ImportError:
            logging.warning(
                'Not testing fairness indicators because a compatible TFMA version '
                'is not installed.')

        # List needs to be serialized before being passed into Do function.
        exec_properties[EXAMPLE_SPLITS_KEY] = json_utils.dumps(None)

        # Run executor.
        evaluator = executor.Executor()
        evaluator.Do(input_dict, output_dict, exec_properties)

        # Check evaluator outputs.
        self.assertTrue(
            fileio.exists(os.path.join(eval_output.uri, 'eval_config.json')))
        self.assertTrue(fileio.exists(os.path.join(eval_output.uri,
                                                   'metrics')))
        self.assertTrue(fileio.exists(os.path.join(eval_output.uri, 'plots')))
        self.assertFalse(
            fileio.exists(os.path.join(blessing_output.uri, 'BLESSED')))
コード例 #22
0
 def testConstruct(self):
     self._model = channel_utils.as_channel([standard_artifacts.Model()])
     self._model_blessing = channel_utils.as_channel(
         [standard_artifacts.ModelBlessing()])
     pusher = component.Pusher(model=self._model,
                               model_blessing=self._model_blessing)
     self.assertEqual(
         standard_artifacts.PushedModel.TYPE_NAME, pusher.outputs[
             standard_component_specs.PUSHED_MODEL_KEY].type_name)
コード例 #23
0
 def _create_mock_artifact(self, aid: int, is_blessed: bool,
                           component_id: Text):
   model_blessing = standard_artifacts.ModelBlessing()
   model_blessing.id = aid
   model_blessing.set_string_custom_property('current_model', 'uri-%d' % aid)
   model_blessing.set_int_custom_property('current_model_id', aid)
   model_blessing.set_string_custom_property('component_id', component_id)
   model_blessing.set_int_custom_property('blessed', is_blessed)
   return model_blessing
コード例 #24
0
    def setUp(self):
        super(KubeflowGCPIntegrationTest, self).setUp()

        # Example artifacts for testing.
        raw_train_examples = standard_artifacts.Examples(split='train')
        raw_train_examples.uri = os.path.join(
            self._intermediate_data_root,
            'csv_example_gen/examples/test-pipeline/train/')
        raw_eval_examples = standard_artifacts.Examples(split='eval')
        raw_eval_examples.uri = os.path.join(
            self._intermediate_data_root,
            'csv_example_gen/examples/test-pipeline/eval/')
        self._test_raw_examples = [raw_train_examples, raw_eval_examples]

        # Transformed Example artifacts for testing.
        transformed_train_examples = standard_artifacts.Examples(split='train')
        transformed_train_examples.uri = os.path.join(
            self._intermediate_data_root,
            'transform/transformed_examples/test-pipeline/train/')
        transformed_eval_examples = standard_artifacts.Examples(split='eval')
        transformed_eval_examples.uri = os.path.join(
            self._intermediate_data_root,
            'transform/transformed_examples/test-pipeline/eval/')
        self._test_transformed_examples = [
            transformed_train_examples, transformed_eval_examples
        ]

        # Schema artifact for testing.
        schema = standard_artifacts.Schema()
        schema.uri = os.path.join(self._intermediate_data_root,
                                  'schema_gen/output/test-pipeline/')
        self._test_schema = [schema]

        # TransformGraph artifact for testing.
        transform_graph = standard_artifacts.TransformGraph()
        transform_graph.uri = os.path.join(
            self._intermediate_data_root,
            'transform/transform_output/test-pipeline/')
        self._test_transform_graph = [transform_graph]

        # Model artifact for testing.
        model_1 = standard_artifacts.Model()
        model_1.uri = os.path.join(self._intermediate_data_root,
                                   'trainer/output/test-pipeline/1/')
        self._test_model_1 = [model_1]

        model_2 = standard_artifacts.Model()
        model_2.uri = os.path.join(self._intermediate_data_root,
                                   'trainer/output/test-pipeline/2/')
        self._test_model_2 = [model_2]

        # ModelBlessing artifact for testing.
        model_blessing = standard_artifacts.ModelBlessing()
        model_blessing.uri = os.path.join(
            self._intermediate_data_root,
            'model_validator/blessing/test-pipeline/')
        self._test_model_blessing = [model_blessing]
コード例 #25
0
 def setUp(self):
     super().setUp()
     self._model = channel_utils.as_channel([standard_artifacts.Model()])
     self._model_blessing = channel_utils.as_channel(
         [standard_artifacts.ModelBlessing()])
     self._infra_blessing = channel_utils.as_channel(
         [standard_artifacts.InfraBlessing()])
     self._push_destination = pusher_pb2.PushDestination(
         filesystem=pusher_pb2.PushDestination.Filesystem(
             base_directory=self.get_temp_dir()))
コード例 #26
0
ファイル: component_test.py プロジェクト: zw39125432/tfx
 def testConstruct(self):
   examples = standard_artifacts.Examples()
   model = standard_artifacts.Model()
   model_blessing = standard_artifacts.ModelBlessing()
   bulk_inferrer = component.BulkInferrer(
       examples=channel_utils.as_channel([examples]),
       model=channel_utils.as_channel([model]),
       model_blessing=channel_utils.as_channel([model_blessing]))
   self.assertEqual('InferenceResult',
                    bulk_inferrer.outputs['inference_result'].type_name)
コード例 #27
0
    def setUp(self):
        super().setUp()
        self._source_data_dir = os.path.join(
            os.path.dirname(
                os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
            'components', 'testdata')
        self._output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)
        fileio.makedirs(self._output_data_dir)
        self._model_export = standard_artifacts.Model()
        self._model_export.uri = os.path.join(self._source_data_dir,
                                              'trainer/current')
        self._model_blessing = standard_artifacts.ModelBlessing()
        self._input_dict = {
            standard_component_specs.MODEL_KEY: [self._model_export],
            standard_component_specs.MODEL_BLESSING_KEY:
            [self._model_blessing],
        }

        self._model_push = standard_artifacts.PushedModel()
        self._model_push.uri = os.path.join(self._output_data_dir,
                                            'model_push')
        fileio.makedirs(self._model_push.uri)
        self._output_dict = {
            standard_component_specs.PUSHED_MODEL_KEY: [self._model_push],
        }
        # Dict format of exec_properties. custom_config needs to be serialized
        # before being passed into Do function.
        self._exec_properties = {
            'custom_config': {
                constants.SERVING_ARGS_KEY: {
                    'model_name': 'model_name',
                    'project_id': 'project_id'
                },
            },
            'push_destination': None,
        }
        self._container_image_uri_vertex = 'gcr.io/path/to/container'
        # Dict format of exec_properties for Vertex. custom_config needs to be
        # serialized before being passed into Do function.
        self._exec_properties_vertex = {
            'custom_config': {
                constants.SERVING_ARGS_KEY: {
                    'endpoint_name': 'endpoint_name',
                    'project_id': 'project_id',
                },
                constants.VERTEX_CONTAINER_IMAGE_URI_KEY:
                self._container_image_uri_vertex,
                constants.VERTEX_REGION_KEY: 'us-central1',
                constants.ENABLE_VERTEX_KEY: True,
            },
            'push_destination': None,
        }
        self._executor = executor.Executor()
コード例 #28
0
    def testEvalution(self, exec_properties, model_agnostic=False):
        source_data_dir = os.path.join(
            os.path.dirname(os.path.dirname(__file__)), 'testdata')
        output_data_dir = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName)

        # Create input dict.
        examples = standard_artifacts.Examples()
        examples.uri = os.path.join(source_data_dir, 'csv_example_gen')
        examples.split_names = artifact_utils.encode_split_names(
            ['train', 'eval'])
        baseline_model = standard_artifacts.Model()
        baseline_model.uri = os.path.join(source_data_dir, 'trainer/previous/')
        schema = standard_artifacts.Schema()
        schema.uri = os.path.join(source_data_dir, 'schema_gen')
        input_dict = {
            EXAMPLES_KEY: [examples],
            SCHEMA_KEY: [schema],
        }
        if not model_agnostic:
            model = standard_artifacts.Model()
            model.uri = os.path.join(source_data_dir, 'trainer/current')
            input_dict[MODEL_KEY] = [model]

        # Create output dict.
        eval_output = standard_artifacts.ModelEvaluation()
        eval_output.uri = os.path.join(output_data_dir, 'eval_output')
        blessing_output = standard_artifacts.ModelBlessing()
        blessing_output.uri = os.path.join(output_data_dir, 'blessing_output')
        output_dict = {
            EVALUATION_KEY: [eval_output],
            BLESSING_KEY: [blessing_output],
        }

        # Test multiple splits.
        exec_properties[EXAMPLE_SPLITS_KEY] = json_utils.dumps(
            ['train', 'eval'])

        if MODULE_FILE_KEY in exec_properties:
            exec_properties[MODULE_FILE_KEY] = os.path.join(
                source_data_dir, 'module_file', 'evaluator_module.py')

        # Run executor.
        evaluator = executor.Executor()
        evaluator.Do(input_dict, output_dict, exec_properties)

        # Check evaluator outputs.
        self.assertTrue(
            fileio.exists(os.path.join(eval_output.uri, 'eval_config.json')))
        self.assertTrue(fileio.exists(os.path.join(eval_output.uri,
                                                   'metrics')))
        self.assertTrue(fileio.exists(os.path.join(eval_output.uri, 'plots')))
        self.assertFalse(
            fileio.exists(os.path.join(blessing_output.uri, 'BLESSED')))
コード例 #29
0
  def testCanChangePropertiesByNameIdMapping(self):
    model_blessing = standard_artifacts.ModelBlessing()
    model_blessing.set_int_custom_property(
        constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY,
        _EXPECTED_BASELINE_MODEL_INT_ID)
    model_blessing.set_int_custom_property(
        constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY,
        _EXPECTED_CURRENT_MODEL_INT_ID)

    expected_model_blessing = standard_artifacts.ModelBlessing()
    expected_model_blessing.set_string_custom_property(
        constants.ARTIFACT_PROPERTY_BASELINE_MODEL_ID_KEY,
        _EXPECTED_BASELINE_MODEL_STRING_ID)
    expected_model_blessing.set_string_custom_property(
        constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY,
        _EXPECTED_CURRENT_MODEL_STRING_ID)
    kubeflow_v2_entrypoint_utils.refactor_model_blessing(
        model_blessing, _TEST_NAME_FROM_ID)

    self.assertDictEqual(expected_model_blessing.to_json_dict(),
                         model_blessing.to_json_dict())
コード例 #30
0
 def testConstruct(self):
     model = standard_artifacts.Model()
     model_blessing = standard_artifacts.ModelBlessing()
     output = ExportedModel()
     this_component = component.Export(
         function_name='component_test.pouet',
         model=channel_utils.as_channel([model]),
         model_blessing=channel_utils.as_channel([model_blessing]),
         output=channel_utils.as_channel([output])).with_id(u'Testing123')
     self.assertEqual(ExportedModel.TYPE_NAME,
                      this_component.outputs[OUTPUT_KEY].type_name)
     artifact_collection = this_component.outputs[OUTPUT_KEY].get()
     self.assertIsNotNone(artifact_collection)