コード例 #1
0
def main(argv=None):
    parser = argparse.ArgumentParser(description='Submit PySpark Job')
    parser.add_argument('--region',
                        type=str,
                        help='The region where the cluster launches.')
    parser.add_argument('--jobflow_id',
                        type=str,
                        help='The name of the cluster to run job.')
    parser.add_argument('--job_name', type=str, help='The name of spark job.')
    parser.add_argument('--py_file',
                        type=str,
                        help='A path to a pyspark file run during the step')
    parser.add_argument('--input', type=str, help='File path of the dataset.')
    parser.add_argument('--output',
                        type=str,
                        help='Output path of the result files.')
    parser.add_argument('--output_file',
                        type=str,
                        help='S3 URI of the training job results.')

    args = parser.parse_args()

    logging.getLogger().setLevel(logging.INFO)
    client = _utils.get_client(args.region)
    logging.info('Submitting job to %s...', args.jobflow_id)
    spark_args = [args.input, args.output]
    step_id = _utils.submit_pyspark_job(client, args.jobflow_id, args.job_name,
                                        args.py_file, spark_args)
    logging.info('Job request submitted. Waiting for completion...')
    _utils.wait_for_job(client, args.jobflow_id, step_id)

    Path('/output.txt').write_text(unicode(step_id))
    Path(args.output_file).parent.mkdir(parents=True, exist_ok=True)
    Path(args.output_file).write_text(unicode(args.output))
    logging.info('Job completed.')
コード例 #2
0
ファイル: analyze.py プロジェクト: sukeesh/pipelines
def main(argv=None):
  parser = argparse.ArgumentParser(description='ML Analyzer')
  parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
  parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
  parser.add_argument('--cluster', type=str, help='The name of the cluster to run job.')
  parser.add_argument('--output', type=str, help='GCS path to use for output.')
  parser.add_argument('--train', type=str, help='GCS path of the training csv file.')
  parser.add_argument('--schema', type=str, help='GCS path of the json schema file.')
  parser.add_argument('--output-dir-uri-output-path',
                      type=str,
                      default='/output.txt',
                      help='Local output path for the file containing the output dir URI.')
  args = parser.parse_args()

  code_path = os.path.dirname(os.path.realpath(__file__))
  runfile_source = os.path.join(code_path, 'analyze_run.py')
  dest_files = _utils.copy_resources_to_gcs([runfile_source], args.output)
  try:
    api = _utils.get_client()
    print('Submitting job...')
    spark_args = ['--output', args.output, '--train', args.train, '--schema', args.schema]
    job_id = _utils.submit_pyspark_job(
        api, args.project, args.region, args.cluster, dest_files[0], spark_args)
    print('Job request submitted. Waiting for completion...')
    _utils.wait_for_job(api, args.project, args.region, job_id)
    Path(args.output_dir_uri_output_path).parent.mkdir(parents=True, exist_ok=True)
    Path(args.output_dir_uri_output_path).write_text(args.output)

    print('Job completed.')
  finally:
    _utils.remove_resources_from_gcs(dest_files)
コード例 #3
0
def main(argv=None):
  parser = argparse.ArgumentParser(description='ML Trainer')
  parser.add_argument('--project', type=str, help='Google Cloud project ID to use.')
  parser.add_argument('--region', type=str, help='Which zone to run the analyzer.')
  parser.add_argument('--cluster', type=str, help='The name of the cluster to run job.')
  parser.add_argument('--package', type=str,
                      help='GCS Path of XGBoost distributed trainer package.')
  parser.add_argument('--output', type=str, help='GCS path to use for output.')
  parser.add_argument('--conf', type=str, help='GCS path of the training json config file.')
  parser.add_argument('--rounds', type=int, help='Number of rounds to train.')
  parser.add_argument('--workers', type=int, help='Number of workers to use for training.')
  parser.add_argument('--train', type=str, help='GCS path of the training libsvm file pattern.')
  parser.add_argument('--eval', type=str, help='GCS path of the eval libsvm file pattern.')
  parser.add_argument('--analysis', type=str, help='GCS path of the analysis input.')
  parser.add_argument('--target', type=str, help='Target column name.')
  args = parser.parse_args()

  logging.getLogger().setLevel(logging.INFO)
  api = _utils.get_client()
  logging.info('Submitting job...')
  spark_args = [args.conf, str(args.rounds), str(args.workers), args.analysis, args.target,
                args.train, args.eval, args.output]
  job_id = _utils.submit_spark_job(
      api, args.project, args.region, args.cluster, [args.package],
      'ml.dmlc.xgboost4j.scala.example.spark.XGBoostTrainer', spark_args)
  logging.info('Job request submitted. Waiting for completion...')
  _utils.wait_for_job(api, args.project, args.region, job_id)
  with open('/output.txt', 'w') as f:
    f.write(args.output)

  logging.info('Job completed.')
コード例 #4
0
def main(argv=None):
    parser = argparse.ArgumentParser(description='ML Predictor')
    parser.add_argument('--project',
                        type=str,
                        help='Google Cloud project ID to use.')
    parser.add_argument('--region',
                        type=str,
                        help='Which zone to run the analyzer.')
    parser.add_argument('--cluster',
                        type=str,
                        help='The name of the cluster to run job.')
    parser.add_argument(
        '--package',
        type=str,
        help='GCS Path of XGBoost distributed trainer package.')
    parser.add_argument('--model',
                        type=str,
                        help='GCS path of the model file.')
    parser.add_argument('--output',
                        type=str,
                        help='GCS path to use for output.')
    parser.add_argument('--predict',
                        type=str,
                        help='GCS path of prediction libsvm file.')
    parser.add_argument('--analysis',
                        type=str,
                        help='GCS path of the analysis input.')
    parser.add_argument('--target', type=str, help='Target column name.')
    args = parser.parse_args()

    logging.getLogger().setLevel(logging.INFO)
    api = _utils.get_client()
    logging.info('Submitting job...')
    spark_args = [
        args.model, args.predict, args.analysis, args.target, args.output
    ]
    job_id = _utils.submit_spark_job(
        api, args.project, args.region, args.cluster, [args.package],
        'ml.dmlc.xgboost4j.scala.example.spark.XGBoostPredictor', spark_args)
    logging.info('Job request submitted. Waiting for completion...')
    _utils.wait_for_job(api, args.project, args.region, job_id)
    prediction_results = os.path.join(args.output, 'part-*.csv')
    with open('/output.txt', 'w') as f:
        f.write(prediction_results)

    with file_io.FileIO(os.path.join(args.output, 'schema.json'), 'r') as f:
        schema = json.load(f)

    metadata = {
        'outputs': [{
            'type': 'table',
            'storage': 'gcs',
            'format': 'csv',
            'header': [x['name'] for x in schema],
            'source': prediction_results
        }]
    }
    with open('/mlpipeline-ui-metadata.json', 'w') as f:
        json.dump(metadata, f)
    logging.info('Job completed.')
コード例 #5
0
def main(argv=None):
    parser = argparse.ArgumentParser(description='ML Analyzer')
    parser.add_argument('--project',
                        type=str,
                        help='Google Cloud project ID to use.')
    parser.add_argument('--region',
                        type=str,
                        help='Which zone to run the analyzer.')
    parser.add_argument('--cluster',
                        type=str,
                        help='The name of the cluster to run job.')
    parser.add_argument('--output',
                        type=str,
                        help='GCS path to use for output.')
    parser.add_argument('--train',
                        type=str,
                        help='GCS path of the training csv file.')
    parser.add_argument('--schema',
                        type=str,
                        help='GCS path of the json schema file.')
    args = parser.parse_args()

    code_path = os.path.dirname(os.path.realpath(__file__))
    dirname = os.path.basename(__file__).split('.')[0]
    runfile_source = os.path.join(code_path, dirname, 'run.py')
    dest_files = _utils.copy_resources_to_gcs([runfile_source], args.output)
    try:
        api = _utils.get_client()
        print('Submitting job...')
        spark_args = [
            '--output', args.output, '--train', args.train, '--schema',
            args.schema
        ]
        job_id = _utils.submit_pyspark_job(api, args.project, args.region,
                                           args.cluster, dest_files[0],
                                           spark_args)
        print('Job request submitted. Waiting for completion...')
        _utils.wait_for_job(api, args.project, args.region, job_id)
        with open('/output.txt', 'w') as f:
            f.write(args.output)

        print('Job completed.')
    finally:
        _utils.remove_resources_from_gcs(dest_files)
コード例 #6
0
def main(argv=None):
    parser = argparse.ArgumentParser(description='Submit Spark Job')
    parser.add_argument('--region',
                        type=str,
                        help='The region where the cluster launches.')
    parser.add_argument('--jobflow_id',
                        type=str,
                        help='The name of the cluster to run job.')
    parser.add_argument('--job_name', type=str, help='The name of spark job.')
    parser.add_argument('--jar_path',
                        type=str,
                        help='A path to a JAR file run during the step')
    parser.add_argument(
        '--main_class',
        type=str,
        default=None,
        help=
        'The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.'
    )
    parser.add_argument('--input', type=str, help='File path of the dataset.')
    parser.add_argument('--output',
                        type=str,
                        help='Output path of the result files')
    parser.add_argument('--output_file',
                        type=str,
                        help='S3 URI of the training job results.')
    args = parser.parse_args()

    logging.getLogger().setLevel(logging.INFO)
    client = _utils.get_client(args.region)
    logging.info('Submitting job...')
    spark_args = [args.input, args.output]
    step_id = _utils.submit_spark_job(client, args.jobflow_id, args.job_name,
                                      args.jar_path, args.main_class,
                                      spark_args)
    logging.info('Job request submitted. Waiting for completion...')
    _utils.wait_for_job(client, args.jobflow_id, step_id)

    Path('/output.txt').write_text(unicode(args.step_id))
    Path(args.output_file).parent.mkdir(parents=True, exist_ok=True)
    Path(args.output_file).write_text(unicode(args.output))
    logging.info('Job completed.')
コード例 #7
0
def main(argv=None):
    parser = argparse.ArgumentParser(description='ML Predictor')
    parser.add_argument('--project',
                        type=str,
                        help='Google Cloud project ID to use.')
    parser.add_argument('--region',
                        type=str,
                        help='Which zone to run the analyzer.')
    parser.add_argument('--cluster',
                        type=str,
                        help='The name of the cluster to run job.')
    parser.add_argument(
        '--package',
        type=str,
        help='GCS Path of XGBoost distributed trainer package.')
    parser.add_argument('--model',
                        type=str,
                        help='GCS path of the model file.')
    parser.add_argument('--output',
                        type=str,
                        help='GCS path to use for output.')
    parser.add_argument('--predict',
                        type=str,
                        help='GCS path of prediction libsvm file.')
    parser.add_argument('--analysis',
                        type=str,
                        help='GCS path of the analysis input.')
    parser.add_argument('--target', type=str, help='Target column name.')
    parser.add_argument(
        '--prediction-results-uri-pattern-output-path',
        type=str,
        default='/output.txt',
        help=
        'Local output path for the file containing prediction results URI pattern.'
    )
    parser.add_argument(
        '--ui-metadata-output-path',
        type=str,
        default='/mlpipeline-ui-metadata.json',
        help=
        'Local output path for the file containing UI metadata JSON structure.'
    )

    args = parser.parse_args()

    logging.getLogger().setLevel(logging.INFO)
    api = _utils.get_client()
    logging.info('Submitting job...')
    spark_args = [
        args.model, args.predict, args.analysis, args.target, args.output
    ]
    job_id = _utils.submit_spark_job(
        api, args.project, args.region, args.cluster, [args.package],
        'ml.dmlc.xgboost4j.scala.example.spark.XGBoostPredictor', spark_args)
    logging.info('Job request submitted. Waiting for completion...')
    _utils.wait_for_job(api, args.project, args.region, job_id)
    prediction_results_uri_pattern = os.path.join(args.output, 'part-*.csv')
    Path(args.prediction_results_uri_pattern_output_path).parent.mkdir(
        parents=True, exist_ok=True)
    Path(args.prediction_results_uri_pattern_output_path).write_text(
        prediction_results_uri_pattern)

    with file_io.FileIO(os.path.join(args.output, 'schema.json'), 'r') as f:
        schema = json.load(f)

    metadata = {
        'outputs': [{
            'type': 'table',
            'storage': 'gcs',
            'format': 'csv',
            'header': [x['name'] for x in schema],
            'source': prediction_results_uri_pattern
        }]
    }
    Path(args.ui_metadata_output_path).parent.mkdir(parents=True,
                                                    exist_ok=True)
    Path(args.ui_metadata_output_path).write_text(json.dumps(metadata))
    logging.info('Job completed.')
コード例 #8
0
def main(argv=None):
    parser = argparse.ArgumentParser(description='ML Transfomer')
    parser.add_argument('--project',
                        type=str,
                        help='Google Cloud project ID to use.')
    parser.add_argument('--region',
                        type=str,
                        help='Which zone to run the analyzer.')
    parser.add_argument('--cluster',
                        type=str,
                        help='The name of the cluster to run job.')
    parser.add_argument('--output',
                        type=str,
                        help='GCS path to use for output.')
    parser.add_argument('--train',
                        type=str,
                        help='GCS path of the training csv file.')
    parser.add_argument('--eval',
                        type=str,
                        help='GCS path of the eval csv file.')
    parser.add_argument('--analysis',
                        type=str,
                        help='GCS path of the analysis results.')
    parser.add_argument('--target', type=str, help='Target column name.')
    args = parser.parse_args()

    # Remove existing [output]/train and [output]/eval if they exist.
    # It should not be done in the run time code because run time code should be portable
    # to on-prem while we need gsutil here.
    _utils.delete_directory_from_gcs(os.path.join(args.output, 'train'))
    _utils.delete_directory_from_gcs(os.path.join(args.output, 'eval'))

    code_path = os.path.dirname(os.path.realpath(__file__))
    dirname = os.path.basename(__file__).split('.')[0]
    runfile_source = os.path.join(code_path, dirname, 'run.py')
    dest_files = _utils.copy_resources_to_gcs([runfile_source], args.output)
    try:
        api = _utils.get_client()
        print('Submitting job...')
        spark_args = [
            '--output', args.output, '--analysis', args.analysis, '--target',
            args.target
        ]
        if args.train:
            spark_args.extend(['--train', args.train])
        if args.eval:
            spark_args.extend(['--eval', args.eval])

        job_id = _utils.submit_pyspark_job(api, args.project, args.region,
                                           args.cluster, dest_files[0],
                                           spark_args)
        print('Job request submitted. Waiting for completion...')
        _utils.wait_for_job(api, args.project, args.region, job_id)

        with open('/output_train.txt', 'w') as f:
            f.write(os.path.join(args.output, 'train', 'part-*'))
        with open('/output_eval.txt', 'w') as f:
            f.write(os.path.join(args.output, 'eval', 'part-*'))

        print('Job completed.')
    finally:
        _utils.remove_resources_from_gcs(dest_files)