def print_culprit_finder_pipeline(project_name, tasks, good_bazel_commit,
                                  bad_bazel_commit, needs_clean, repeat_times):
    pipeline_steps = []
    for task_name in tasks:
        platform_name = get_platform(project_name, task_name)
        label = bazelci.PLATFORMS[platform_name][
            "emoji-name"] + " Bisecting for {0}".format(project_name)
        command = (
            '%s culprit_finder.py runner --project_name="%s" --task_name=%s --good_bazel_commit=%s --bad_bazel_commit=%s %s %s'
            % (
                bazelci.PLATFORMS[platform_name]["python"],
                project_name,
                task_name,
                good_bazel_commit,
                bad_bazel_commit,
                "--needs_clean" if needs_clean else "",
                ("--repeat_times=" +
                 str(repeat_times)) if repeat_times else "",
            ))
        commands = [
            bazelci.fetch_bazelcipy_command(),
            fetch_culprit_finder_py_command(), command
        ]
        pipeline_steps.append(
            bazelci.create_step(label, commands, platform_name))
    print(yaml.dump({"steps": pipeline_steps}))
예제 #2
0
def print_culprit_finder_pipeline(project_name, platform_name,
                                  good_bazel_commit, bad_bazel_commit):
    host_platform = PLATFORMS[platform_name].get("host-platform",
                                                 platform_name)
    pipeline_steps = []
    command = (
        '%s culprit_finder.py runner --project_name="%s" --platform_name=%s --good_bazel_commit=%s --bad_bazel_commit=%s'
        % (
            bazelci.python_binary(platform_name),
            project_name,
            platform_name,
            good_bazel_commit,
            bad_bazel_commit,
        ))
    pipeline_steps.append({
        "label":
        PLATFORMS[platform_name]["emoji-name"] +
        " Bisecting for {0}".format(project_name),
        "command": [
            bazelci.fetch_bazelcipy_command(),
            fetch_culprit_finder_py_command(),
            command,
        ],
        "agents": {
            "kind":
            "worker",
            "java":
            PLATFORMS[platform_name]["java"],
            "os":
            bazelci.rchop(host_platform, "_nojava", "_java8", "_java9",
                          "_java10"),
        },
    })
    print(yaml.dump({"steps": pipeline_steps}))
def print_culprit_finder_pipeline(
    project_name, platform_name, good_bazel_commit, bad_bazel_commit, needs_clean
):
    label = PLATFORMS[platform_name]["emoji-name"] + " Bisecting for {0}".format(project_name)
    command = (
        '%s culprit_finder.py runner --project_name="%s" --platform_name=%s --good_bazel_commit=%s --bad_bazel_commit=%s %s'
        % (
            bazelci.python_binary(platform_name),
            project_name,
            platform_name,
            good_bazel_commit,
            bad_bazel_commit,
            "--needs_clean" if needs_clean else "",
        )
    )
    commands = [bazelci.fetch_bazelcipy_command(), fetch_culprit_finder_py_command(), command]
    pipeline_steps = []
    pipeline_steps.append(bazelci.create_step(label, commands, platform_name))
    print(yaml.dump({"steps": pipeline_steps}))
예제 #4
0
def _ci_step_for_platform_and_commits(bazel_commits, platform, project,
                                      extra_options):
    """Perform bazel-bench for the platform-project combination.
  Uploads results to BigQuery.

  Args:
    bazel_commits: a list of strings: bazel commits to be benchmarked.
    platform: a string: the platform to benchmark on.
    project: an object: contains the information of the project to be
      tested on.
    extra_options: a string: extra bazel-bench options.

  Return:
    An object: the result of applying bazelci.create_step to wrap the
      command to be executed by buildkite-agent.
  """
    project_clone_path = _get_clone_path(project["git_repository"], platform)
    bazel_clone_path = _get_clone_path(BAZEL_REPOSITORY, platform)

    bazel_bench_command = " ".join([
        "bazel",
        "run",
        "benchmark",
        "--",
        "--bazel_commits=%s" % ",".join(bazel_commits),
        "--bazel_source=%s" % bazel_clone_path,
        "--project_source=%s" % project_clone_path,
        "--platform=%s" % platform,
        "--collect_memory",
        "--data_directory=%s" % DATA_DIRECTORY,
        extra_options,
        "--",
        project["bazel_command"],
    ])

    commands = (
        [bazelci.fetch_bazelcipy_command()] +
        _bazel_bench_env_setup_command(platform, ",".join(bazel_commits)) +
        [bazel_bench_command])
    label = (bazelci.PLATFORMS[platform]["emoji-name"] +
             " Running bazel-bench on project: %s" % project["name"])
    return bazelci.create_step(label, commands, platform)
def add_presubmit_jobs(module_name, module_version, task_config,
                       pipeline_steps):
    for task_name in task_config:
        platform_name = bazelci.get_platform_for_task(task_name, task_config)
        label = bazelci.PLATFORMS[platform_name][
            "emoji-name"] + " {0}@{1}".format(module_name, module_version)
        command = (
            '%s bcr_presubmit.py runner --module_name="%s" --module_version="%s" --task=%s'
            % (
                bazelci.PLATFORMS[platform_name]["python"],
                module_name,
                module_version,
                task_name,
            ))
        commands = [
            bazelci.fetch_bazelcipy_command(),
            fetch_bcr_presubmit_py_command(), command
        ]
        pipeline_steps.append(
            bazelci.create_step(label, commands, platform_name))
예제 #6
0
def _ci_step_for_platform_and_commits(bazel_commits, platform, project,
                                      extra_options, date, bucket,
                                      bigquery_table):
    """Perform bazel-bench for the platform-project combination.
    Uploads results to BigQuery.

    Args:
        bazel_commits: a list of strings: bazel commits to be benchmarked.
        platform: a string: the platform to benchmark on.
        project: an object: contains the information of the project to be
          tested on.
        extra_options: a string: extra bazel-bench options.
        date: the date of the commits.
        bucket: the GCP Storage bucket to upload data to.
        bigquery_table: the table to upload data to. In the form `project:table_identifier`.

    Return:
        An object: the result of applying bazelci.create_step to wrap the
          command to be executed by buildkite-agent.
    """
    project_clone_path = _get_clone_path(project["git_repository"], platform)
    bazel_clone_path = _get_clone_path(BAZEL_REPOSITORY, platform)

    bazel_bench_command = " ".join([
        "bazel",
        "run",
        "benchmark",
        "--",
        "--bazel_commits=%s" % ",".join(bazel_commits),
        "--bazel_source=%s" % bazel_clone_path,
        "--project_source=%s" % project_clone_path,
        "--project_label=%s" % project["project_label"],
        "--platform=%s" % platform,
        "--collect_memory",
        "--data_directory=%s" % DATA_DIRECTORY,
        "--csv_file_name=%s" % BAZEL_BENCH_RESULT_FILENAME,
        "--collect_json_profile",
        "--aggregate_json_profiles",
        extra_options,
        "--",
        project["bazel_command"],
    ])
    # TODO(leba): Use GCP Python client instead of gsutil.
    # TODO(https://github.com/bazelbuild/bazel-bench/issues/46): Include task-specific shell commands and build flags.

    # Upload everything under DATA_DIRECTORY to Storage.
    # This includes the raw data, aggr JSON profile and the JSON profiles
    # themselves.
    storage_subdir = "{}/{}/{}/".format(project["storage_subdir"],
                                        date.strftime("%Y/%m/%d"), platform)
    upload_output_files_storage_command = " ".join([
        "gsutil",
        "-m",
        "cp",
        "-r",
        "{}/*".format(DATA_DIRECTORY),
        "gs://{}/{}".format(bucket, storage_subdir),
    ])
    upload_to_big_query_command = " ".join([
        "bq",
        "load",
        "--skip_leading_rows=1",
        "--source_format=CSV",
        bigquery_table,
        "{}/perf_data.csv".format(DATA_DIRECTORY),
    ])

    commands = (
        [bazelci.fetch_bazelcipy_command()] +
        _bazel_bench_env_setup_command(platform, ",".join(bazel_commits)) + [
            bazel_bench_command, upload_output_files_storage_command,
            upload_to_big_query_command
        ])
    label = bazelci.PLATFORMS[platform]["emoji-name"] + project["project_label"]
    return bazelci.create_step(label, commands, platform)