def launch(self) -> data_types.ExecutionInfo:
    """Execute the component, includes driver, executor and publisher.

    Returns:
      The execution decision of the launch.
    """
    absl.logging.info('Running driver for %s',
                      self._component_info.component_id)
    execution_decision = self._run_driver(self._input_dict, self._output_dict,
                                          self._exec_properties)

    if not execution_decision.use_cached_results:
      absl.logging.info('Running executor for %s',
                        self._component_info.component_id)
      self._run_executor(execution_decision.execution_id,
                         execution_decision.input_dict,
                         execution_decision.output_dict,
                         execution_decision.exec_properties)

    absl.logging.info('Running publisher for %s',
                      self._component_info.component_id)
    self._run_publisher(execution_decision.use_cached_results,
                        execution_decision.execution_id,
                        execution_decision.input_dict,
                        execution_decision.output_dict)

    return data_types.ExecutionInfo(
        input_dict=execution_decision.input_dict,
        output_dict=execution_decision.output_dict,
        exec_properties=execution_decision.exec_properties,
        execution_id=execution_decision.execution_id)
 def testDumpUiMetadata(self):
     trainer = Trainer(examples=Channel(type=standard_artifacts.Examples),
                       module_file='module_file',
                       train_args=trainer_pb2.TrainArgs(splits=['train'],
                                                        num_steps=100),
                       eval_args=trainer_pb2.EvalArgs(splits=['eval'],
                                                      num_steps=50))
     model_run = standard_artifacts.ModelRun()
     model_run.uri = 'model_run_uri'
     exec_info = data_types.ExecutionInfo(
         input_dict={},
         output_dict={'model_run': [model_run]},
         exec_properties={},
         execution_id='id')
     ui_metadata_path = os.path.join(
         os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
         self._testMethodName, 'json')
     fileio.makedirs(os.path.dirname(ui_metadata_path))
     container_entrypoint._dump_ui_metadata(trainer, exec_info,
                                            ui_metadata_path)
     with open(ui_metadata_path) as f:
         ui_metadata = json.load(f)
         self.assertEqual('tensorboard', ui_metadata['outputs'][-1]['type'])
         self.assertEqual('model_run_uri',
                          ui_metadata['outputs'][-1]['source'])
Beispiel #3
0
    def launch(self) -> data_types.ExecutionInfo:
        """Execute the component, includes driver, executor and publisher.

    Returns:
      The execution decision of the launch.
    """
        absl.logging.info('Running driver for %s',
                          self._component_info.component_id)
        execution_decision = self._run_driver(self._input_dict,
                                              self._output_dict,
                                              self._exec_properties)

        if not execution_decision.use_cached_results:
            absl.logging.info('Running executor for %s',
                              self._component_info.component_id)
            # Make a deep copy for input_dict and exec_properties, because they should
            # be immutable in this context.
            # output_dict can still be changed, specifically properties.
            self._run_executor(
                execution_decision.execution_id,
                copy.deepcopy(execution_decision.input_dict),
                execution_decision.output_dict,
                copy.deepcopy(execution_decision.exec_properties))

        absl.logging.info('Running publisher for %s',
                          self._component_info.component_id)
        self._run_publisher(output_dict=execution_decision.output_dict)

        return data_types.ExecutionInfo(
            input_dict=execution_decision.input_dict,
            output_dict=execution_decision.output_dict,
            exec_properties=execution_decision.exec_properties,
            execution_id=execution_decision.execution_id)
Beispiel #4
0
    def testDumpUiMetadata(self):
        trainer = pipeline_pb2.PipelineNode()
        trainer.node_info.type.name = 'tfx.components.trainer.component.Trainer'
        model_run_out_spec = pipeline_pb2.OutputSpec(
            artifact_spec=pipeline_pb2.OutputSpec.ArtifactSpec(
                type=metadata_store_pb2.ArtifactType(
                    name=standard_artifacts.ModelRun.TYPE_NAME)))
        trainer.outputs.outputs['model_run'].CopyFrom(model_run_out_spec)

        model_run = standard_artifacts.ModelRun()
        model_run.uri = 'model_run_uri'
        exec_info = data_types.ExecutionInfo(
            input_dict={},
            output_dict={'model_run': [model_run]},
            exec_properties={},
            execution_id='id')
        ui_metadata_path = os.path.join(
            os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
            self._testMethodName, 'json')
        fileio.makedirs(os.path.dirname(ui_metadata_path))
        container_entrypoint._dump_ui_metadata(trainer, exec_info,
                                               ui_metadata_path)
        with open(ui_metadata_path) as f:
            ui_metadata = json.load(f)
            self.assertEqual('tensorboard', ui_metadata['outputs'][-1]['type'])
            self.assertEqual('model_run_uri',
                             ui_metadata['outputs'][-1]['source'])