コード例 #1
0
ファイル: prow_artifacts.py プロジェクト: svalleru/testing
def create_pr_symlink(args):
    """Create a 'symlink' in GCS pointing at the results for a PR.

  This is a null op if PROW environment variables indicate this is not a PR
  job.
  """
    gcs_client = storage.Client()
    # GCS layout is defined here:
    # https://github.com/kubernetes/test-infra/tree/master/gubernator#job-artifact-gcs-layout
    pull_number = os.getenv("PULL_NUMBER")
    if not pull_number:
        # Symlinks are only created for pull requests.
        return

    path = "pr-logs/directory/{job}/{build}.txt".format(
        job=os.getenv("JOB_NAME"), build=os.getenv("BUILD_NUMBER"))

    pull_number = os.getenv("PULL_NUMBER")

    source = util.to_gcs_uri(args.bucket, path)
    target = get_gcs_dir(args.bucket)
    logging.info("Creating symlink %s pointing to %s", source, target)
    bucket = gcs_client.get_bucket(args.bucket)
    blob = bucket.blob(path)
    blob.upload_from_string(target)
コード例 #2
0
ファイル: prow_artifacts.py プロジェクト: subodh101/testing
def check_no_errors(gcs_client, artifacts_dir):
    """Check that all the XML files exist and there were no errors.
  Args:
    gcs_client: The GCS client.
    artifacts_dir: The directory where artifacts should be stored.
  Returns:
    True if there were no errors and false otherwise.
  """
    bucket_name, prefix = util.split_gcs_uri(artifacts_dir)
    bucket = gcs_client.get_bucket(bucket_name)
    no_errors = True

    for b in bucket.list_blobs(prefix=os.path.join(prefix, "junit")):
        full_path = util.to_gcs_uri(b.bucket, b.path)
        if not os.path.splitext(b.path)[-1] == ".xml":
            logging.info("Skipping %s; not an xml file", full_path)
            continue
        logging.info("Checking %s", full_path)
        xml_contents = b.download_as_string()

        if test_util.get_num_failures(xml_contents) > 0:
            logging.info("Test failures in %s", full_path)
            no_errors = False

    return no_errors
コード例 #3
0
def run_papermill_job(
        notebook_path,
        name,
        namespace,  # pylint: disable=too-many-branches,too-many-statements
        repos,
        image):
    """Generate a K8s job to run a notebook using papermill

  Args:
    notebook_path: Path to the notebook. This should be in the form
      "{REPO_OWNER}/{REPO}/path/to/notebook.ipynb"
    name: Name for the K8s job
    namespace: The namespace where the job should run.
    repos: Which repos to checkout; if None or empty tries
      to infer based on PROW environment variables
    image: The docker image to run the notebook in.
  """

    util.maybe_activate_service_account()

    with open("job.yaml") as hf:
        job = yaml.load(hf)

    if notebook_path.startswith("/"):
        raise ValueError(
            "notebook_path={0} should not start with /".format(notebook_path))

    # We need to checkout the correct version of the code
    # in presubmits and postsubmits. We should check the environment variables
    # for the prow environment variables to get the appropriate values.
    # We should probably also only do that if the
    # See
    # https://github.com/kubernetes/test-infra/blob/45246b09ed105698aa8fb928b7736d14480def29/prow/jobs.md#job-environment-variables
    if not repos:
        repos = argo_build_util.get_repo_from_prow_env()

    if not repos:
        raise ValueError("Could not get repos from prow environment variable "
                         "and --repos isn't explicitly set")

    repos += ",kubeflow/testing@HEAD"

    logging.info("Repos set to %s", repos)
    job["spec"]["template"]["spec"]["initContainers"][0]["command"] = [
        "/usr/local/bin/checkout_repos.sh",
        "--repos=" + repos,
        "--src_dir=/src",
        "--depth=all",
    ]

    job["spec"]["template"]["spec"]["containers"][0]["image"] = image

    full_notebook_path = os.path.join("/src", notebook_path)
    job["spec"]["template"]["spec"]["containers"][0]["command"] = [
        "python3", "-m", "kubeflow.examples.notebook_tests.execute_notebook",
        "--notebook_path", full_notebook_path
    ]

    job["spec"]["template"]["spec"]["containers"][0][
        "workingDir"] = os.path.dirname(full_notebook_path)

    # The prow bucket to use for results/artifacts
    prow_bucket = prow_artifacts.PROW_RESULTS_BUCKET

    if os.getenv("REPO_OWNER") and os.getenv("REPO_NAME"):
        # Running under prow
        prow_dir = prow_artifacts.get_gcs_dir(prow_bucket)
        logging.info("Prow artifacts dir: %s", prow_dir)
        prow_dir = os.path.join(prow_dir, "artifacts")

        if os.getenv("TEST_TARGET_NAME"):
            prow_dir = os.path.join(prow_dir,
                                    os.getenv("TEST_TARGET_NAME").lstrip("/"))
        prow_bucket, prow_path = util.split_gcs_uri(prow_dir)

    else:
        prow_path = "notebook-test" + datetime.datetime.now().strftime(
            "%H%M%S")
        prow_path = prow_path + "-" + uuid.uuid4().hex[0:3]
        prow_dir = util.to_gcs_uri(prow_bucket, prow_path)

    prow_path = os.path.join(prow_path, name + ".html")
    output_gcs = util.to_gcs_uri(NB_BUCKET, prow_path)

    job["spec"]["template"]["spec"]["containers"][0]["env"] = [
        {
            "name": "OUTPUT_GCS",
            "value": output_gcs
        },
        {
            "name": "PYTHONPATH",
            "value": "/src/kubeflow/testing/py:/src/kubeflow/examples/py"
        },
    ]

    logging.info("Notebook will be written to %s", output_gcs)
    util.load_kube_config(persist_config=False)

    if name:
        job["metadata"]["name"] = name
    else:
        job["metadata"]["name"] = ("notebook-test-" +
                                   datetime.datetime.now().strftime("%H%M%S") +
                                   "-" + uuid.uuid4().hex[0:3])
    name = job["metadata"]["name"]

    job["metadata"]["namespace"] = namespace

    # Create an API client object to talk to the K8s master.
    api_client = k8s_client.ApiClient()
    batch_api = k8s_client.BatchV1Api(api_client)

    logging.info("Creating job:\n%s", yaml.dump(job))
    actual_job = batch_api.create_namespaced_job(job["metadata"]["namespace"],
                                                 job)
    logging.info("Created job %s.%s:\n%s", namespace, name,
                 yaml.safe_dump(actual_job.to_dict()))

    final_job = util.wait_for_job(api_client,
                                  namespace,
                                  name,
                                  timeout=datetime.timedelta(minutes=30))

    logging.info("Final job:\n%s", yaml.safe_dump(final_job.to_dict()))

    # Download notebook html to artifacts
    logging.info("Copying %s to bucket %s", output_gcs, prow_bucket)

    storage_client = storage.Client()
    bucket = storage_client.get_bucket(NB_BUCKET)
    blob = bucket.get_blob(prow_path)

    destination_bucket = storage_client.get_bucket(prow_bucket)
    bucket.copy_blob(blob, destination_bucket)

    if not final_job.status.conditions:
        raise RuntimeError("Job {0}.{1}; did not complete".format(
            namespace, name))

    last_condition = final_job.status.conditions[-1]

    if last_condition.type not in ["Complete"]:
        logging.error("Job didn't complete successfully")
        raise RuntimeError("Job {0}.{1} failed".format(namespace, name))
コード例 #4
0
def test_xgboost_synthetic(
        record_xml_attribute,
        name,
        namespace,  # pylint: disable=too-many-branches,too-many-statements
        repos,
        image,
        notebook_artifacts_dir):
    '''Generate Job and summit.'''
    util.set_pytest_junit(record_xml_attribute, "test_xgboost_synthetic")

    util.maybe_activate_service_account()

    with open("job.yaml") as hf:
        job = yaml.load(hf)

    # We need to checkout the correct version of the code
    # in presubmits and postsubmits. We should check the environment variables
    # for the prow environment variables to get the appropriate values.
    # We should probably also only do that if the
    # See
    # https://github.com/kubernetes/test-infra/blob/45246b09ed105698aa8fb928b7736d14480def29/prow/jobs.md#job-environment-variables
    if not repos:
        repos = argo_build_util.get_repo_from_prow_env()

    repos += ",kubeflow/testing@HEAD"
    logging.info("Repos set to %s", repos)
    job["spec"]["template"]["spec"]["initContainers"][0]["command"] = [
        "/usr/local/bin/checkout_repos.sh",
        "--repos=" + repos,
        "--src_dir=/src",
        "--depth=all",
    ]

    nb_bucket = "kubeflow-ci-deployment"
    nb_path = os.path.join("xgboost_synthetic_testing", os.getenv("JOB_TYPE"),
                           os.getenv("HOSTNAME"), "notebook.html")
    output_gcs = util.to_gcs_uri(nb_bucket, nb_path)
    logging.info("Tested notebook will be outputed to: %s", output_gcs)
    job["spec"]["template"]["spec"]["containers"][0]["env"] = [
        {
            "name": "PYTHONPATH",
            "value": "/src/kubeflow/testing/py"
        },
        {
            "name": "OUTPUT_GCS",
            "value": output_gcs
        },
    ]
    job["spec"]["template"]["spec"]["containers"][0]["image"] = image
    util.load_kube_config(persist_config=False)

    if name:
        job["metadata"]["name"] = name
    else:
        job["metadata"]["name"] = ("xgboost-test-" +
                                   datetime.datetime.now().strftime("%H%M%S") +
                                   "-" + uuid.uuid4().hex[0:3])
        name = job["metadata"]["name"]

    job["metadata"]["namespace"] = namespace

    # Create an API client object to talk to the K8s master.
    api_client = k8s_client.ApiClient()
    batch_api = k8s_client.BatchV1Api(api_client)

    logging.info("Creating job:\n%s", yaml.dump(job))
    actual_job = batch_api.create_namespaced_job(job["metadata"]["namespace"],
                                                 job)
    logging.info("Created job %s.%s:\n%s", namespace, name,
                 yaml.safe_dump(actual_job.to_dict()))

    final_job = util.wait_for_job(api_client,
                                  namespace,
                                  name,
                                  timeout=datetime.timedelta(minutes=30))

    logging.info("Final job:\n%s", yaml.safe_dump(final_job.to_dict()))

    if not final_job.status.conditions:
        raise RuntimeError("Job {0}.{1}; did not complete".format(
            namespace, name))

    last_condition = final_job.status.conditions[-1]

    # Download notebook html to artifacts
    notebook_artifacts_path = os.path.join(notebook_artifacts_dir,
                                           "notebook.html")
    logging.info("Writing notebook artifact to: %s", notebook_artifacts_path)
    os.makedirs(notebook_artifacts_dir, exist_ok=True)
    storage_client = storage.Client()
    bucket = storage_client.get_bucket(nb_bucket)
    blob = bucket.get_blob(nb_path)
    blob.download_to_filename(notebook_artifacts_path)

    if last_condition.type not in ["Complete"]:
        logging.error("Job didn't complete successfully")
        raise RuntimeError("Job {0}.{1} failed".format(namespace, name))