Exemple #1
0
        pipeline.append(trainer)

        # Finally, call evaluate() on the workflow DAG outputs, This will
        # automatically append Evaluators to compute metrics from the given
        # SavedModel and 'eval' TF Examples.
        self.evaluate(
            pipeline,
            examples=task.train_and_eval_examples,
            model=trainer.outputs.model)


if __name__ == '__main__':

  run_config = dict(
      pipeline_name=config.PIPELINE_NAME + '_openML',
      data_dir=config.OTHER_DOWNLOAD_DIR,
  )

  if config.USE_KUBEFLOW:
    # We need the string "KubeflowDagRunner" in this file to appease the
    # validator used in `tfx create pipeline`.
    # Validator: https://github.com/tensorflow/tfx/blob/v0.22.0/tfx/tools/cli/handler/base_handler.py#L105
    nitroml.main(
        pipeline_root=config.PIPELINE_ROOT,
        tfx_runner=nitroml.get_default_kubeflow_dag_runner(),
        **run_config)
  else:
    # This example has not been tested with engines other than Kubeflow.
    nitroml.main(**run_config)
Exemple #2
0
                        examples=task.train_and_eval_examples,
                        preprocessor=ad.BasicPreprocessor()))

        # Define a Trainer to train our model on the given task.
        trainer = self.add(
            at.AutoTrainer(
                problem_statement=task.problem_statement,
                transformed_examples=autodata.outputs.transformed_examples,
                transform_graph=autodata.outputs.transform_graph,
                schema=autodata.outputs.schema,
                train_steps=1000,
                eval_steps=500,
                enable_tuning=enable_tuning))
        # Finally, call evaluate() on the workflow DAG outputs. This will
        # automatically append Evaluators to compute metrics from the given
        # SavedModel and 'eval' TF Examples.
        self.evaluate(task=task, model=trainer.outputs.model)


if __name__ == '__main__':
    if config.USE_KUBEFLOW:
        # We need the string "KubeflowDagRunner" in this file to appease the
        # validator used in `tfx create pipeline`.
        # Validator: https://github.com/tensorflow/tfx/blob/v0.22.0/tfx/tools/cli/handler/base_handler.py#L105
        nitroml.main(pipeline_name=config.PIPELINE_NAME + '_titanic',
                     pipeline_root=config.PIPELINE_ROOT,
                     data_dir=config.TF_DOWNLOAD_DIR,
                     tfx_runner=nitroml.get_default_kubeflow_dag_runner())
    else:
        nitroml.main()
                task_pipeline = task.components + autodata.components + test_meta_components + [
                    trainer
                ]

                # Finally, call evaluate() on the workflow DAG outputs, This will
                # automatically append Evaluators to compute metrics from the given
                # SavedModel and 'eval' TF Examples.ss
                self.evaluate(task_pipeline,
                              examples=task.train_and_eval_examples,
                              model=trainer.outputs.model)


if __name__ == '__main__':

    metalearning_algorithm = 'nearest_neighbor'
    run_config = dict(pipeline_name=f'metalearning_{metalearning_algorithm}',
                      data_dir=config.OTHER_DOWNLOAD_DIR,
                      algorithm=metalearning_algorithm)

    if config.USE_KUBEFLOW:
        # We need the string "KubeflowDagRunner" in this file to appease the
        # validator used in `tfx create pipeline`.
        # Validator: https://github.com/tensorflow/tfx/blob/v0.22.0/tfx/tools/cli/handler/base_handler.py#L105
        nitroml.main(pipeline_root=os.path.join(config.PIPELINE_ROOT,
                                                run_config['pipeline_name']),
                     tfx_runner=nitroml.get_default_kubeflow_dag_runner(),
                     **run_config)
    else:
        # This example has not been tested with engines other than Kubeflow.
        nitroml.main(**run_config)