def print_culprit_finder_pipeline(project_name, tasks, good_bazel_commit, bad_bazel_commit, needs_clean, repeat_times): pipeline_steps = [] for task_name in tasks: platform_name = get_platform(project_name, task_name) label = bazelci.PLATFORMS[platform_name][ "emoji-name"] + " Bisecting for {0}".format(project_name) command = ( '%s culprit_finder.py runner --project_name="%s" --task_name=%s --good_bazel_commit=%s --bad_bazel_commit=%s %s %s' % ( bazelci.PLATFORMS[platform_name]["python"], project_name, task_name, good_bazel_commit, bad_bazel_commit, "--needs_clean" if needs_clean else "", ("--repeat_times=" + str(repeat_times)) if repeat_times else "", )) commands = [ bazelci.fetch_bazelcipy_command(), fetch_culprit_finder_py_command(), command ] pipeline_steps.append( bazelci.create_step(label, commands, platform_name)) print(yaml.dump({"steps": pipeline_steps}))
def _report_generation_step(date, project_label, bucket, bigquery_table, platform, report_name, update_latest=False, upload_report=False): """Generate the daily report. Also update the path reserved for the latest report of each project. """ commands = [] commands.append(" ".join([ "bazel", "run", "report:generate_report", "--", "--date={}".format(date), "--project={}".format(project_label), "--storage_bucket={}".format(bucket), "--bigquery_table={}".format(bigquery_table), "--report_name={}".format(report_name), "--upload_report={}".format(upload_report) ])) # Copy the generated report to a special path on GCS that's reserved for # "latest" reports. GCS doesn't support symlink. if upload_report and update_latest: date_dir = date.strftime("%Y/%m/%d") report_dated_path_gcs = "gs://{}/{}/{}/{}.html".format( bucket, project_label, date_dir, report_name) report_latest_path_gcs = "gs://{}/{}/report_latest.html".format( bucket, project_label) commands.append(" ".join( ["gsutil", "cp", report_dated_path_gcs, report_latest_path_gcs])) label = "Generating report on {} for project: {}.".format( date, project_label) return bazelci.create_step(label, commands, platform)
def print_steps_for_failing_jobs(build_number): build_info = get_build_info(build_number) failing_jobs = get_failing_jobs(build_info) incompatible_flags = list(bazelci.fetch_incompatible_flags().keys()) pipeline_steps = [] for incompatible_flag in incompatible_flags: for job in failing_jobs: label = "%s: %s" % (incompatible_flag, job["name"]) command = list(job["command"]) command[ 1] = command[1] + " --incompatible_flag=" + incompatible_flag pipeline_steps.append( bazelci.create_step(label, command, job["platform"])) print(yaml.dump({"steps": pipeline_steps}))
def print_culprit_finder_pipeline( project_name, platform_name, good_bazel_commit, bad_bazel_commit, needs_clean ): label = PLATFORMS[platform_name]["emoji-name"] + " Bisecting for {0}".format(project_name) command = ( '%s culprit_finder.py runner --project_name="%s" --platform_name=%s --good_bazel_commit=%s --bad_bazel_commit=%s %s' % ( bazelci.python_binary(platform_name), project_name, platform_name, good_bazel_commit, bad_bazel_commit, "--needs_clean" if needs_clean else "", ) ) commands = [bazelci.fetch_bazelcipy_command(), fetch_culprit_finder_py_command(), command] pipeline_steps = [] pipeline_steps.append(bazelci.create_step(label, commands, platform_name)) print(yaml.dump({"steps": pipeline_steps}))
def _ci_step_for_platform_and_commits(bazel_commits, platform, project, extra_options): """Perform bazel-bench for the platform-project combination. Uploads results to BigQuery. Args: bazel_commits: a list of strings: bazel commits to be benchmarked. platform: a string: the platform to benchmark on. project: an object: contains the information of the project to be tested on. extra_options: a string: extra bazel-bench options. Return: An object: the result of applying bazelci.create_step to wrap the command to be executed by buildkite-agent. """ project_clone_path = _get_clone_path(project["git_repository"], platform) bazel_clone_path = _get_clone_path(BAZEL_REPOSITORY, platform) bazel_bench_command = " ".join([ "bazel", "run", "benchmark", "--", "--bazel_commits=%s" % ",".join(bazel_commits), "--bazel_source=%s" % bazel_clone_path, "--project_source=%s" % project_clone_path, "--platform=%s" % platform, "--collect_memory", "--data_directory=%s" % DATA_DIRECTORY, extra_options, "--", project["bazel_command"], ]) commands = ( [bazelci.fetch_bazelcipy_command()] + _bazel_bench_env_setup_command(platform, ",".join(bazel_commits)) + [bazel_bench_command]) label = (bazelci.PLATFORMS[platform]["emoji-name"] + " Running bazel-bench on project: %s" % project["name"]) return bazelci.create_step(label, commands, platform)
def add_presubmit_jobs(module_name, module_version, task_config, pipeline_steps): for task_name in task_config: platform_name = bazelci.get_platform_for_task(task_name, task_config) label = bazelci.PLATFORMS[platform_name][ "emoji-name"] + " {0}@{1}".format(module_name, module_version) command = ( '%s bcr_presubmit.py runner --module_name="%s" --module_version="%s" --task=%s' % ( bazelci.PLATFORMS[platform_name]["python"], module_name, module_version, task_name, )) commands = [ bazelci.fetch_bazelcipy_command(), fetch_bcr_presubmit_py_command(), command ] pipeline_steps.append( bazelci.create_step(label, commands, platform_name))
def print_steps_for_failing_jobs(build_info): failing_jobs = get_failing_jobs(build_info) incompatible_flags = list(bazelci.fetch_incompatible_flags().keys()) pipeline_steps = [] counter = 0 for incompatible_flag in incompatible_flags: for job in failing_jobs: counter += 1 if counter > BUILDKITE_MAX_JOBS_LIMIT: continue label = "%s: %s" % (incompatible_flag, job["name"]) command = list(job["command"]) command[ 1] = command[1] + " --incompatible_flag=" + incompatible_flag pipeline_steps.append( bazelci.create_step(label, command, job["platform"])) if counter > BUILDKITE_MAX_JOBS_LIMIT: bazelci.eprint("We only allow " + str(BUILDKITE_MAX_JOBS_LIMIT) + " jobs to be registered at once, skipping " + str(counter - BUILDKITE_MAX_JOBS_LIMIT) + " jobs.") print(yaml.dump({"steps": pipeline_steps}))
def _report_generation_step(date, project_label, bucket, platform): """Generate the daily report. Also update the path reserved for the latest report of each project. """ commands = [] commands.append(" ".join([ "python3.6", "report/generate_report.py", "--date={}".format(date), "--project={}".format(project_label), "--storage_bucket={}".format(bucket) ])) # Copy the generated report to a special path on GCS that's reserved for # "latest" reports. GCS doesn't support symlink. date_dir = date.strftime("%Y/%m/%d") report_dated_path_gcs = "gs://{}/{}/{}/report.html".format( bucket, project_label, date_dir) report_latest_path_gcs = "gs://{}/{}/report_latest.html".format( bucket, project_label) commands.append(" ".join( ["gsutil", "cp", report_dated_path_gcs, report_latest_path_gcs])) label = "Generating report on {} for project: {}.".format( date, project_label) return bazelci.create_step(label, commands, platform)
def _ci_step_for_platform_and_commits(bazel_commits, platform, project, extra_options, date, bucket, bigquery_table): """Perform bazel-bench for the platform-project combination. Uploads results to BigQuery. Args: bazel_commits: a list of strings: bazel commits to be benchmarked. platform: a string: the platform to benchmark on. project: an object: contains the information of the project to be tested on. extra_options: a string: extra bazel-bench options. date: the date of the commits. bucket: the GCP Storage bucket to upload data to. bigquery_table: the table to upload data to. In the form `project:table_identifier`. Return: An object: the result of applying bazelci.create_step to wrap the command to be executed by buildkite-agent. """ project_clone_path = _get_clone_path(project["git_repository"], platform) bazel_clone_path = _get_clone_path(BAZEL_REPOSITORY, platform) bazel_bench_command = " ".join([ "bazel", "run", "benchmark", "--", "--bazel_commits=%s" % ",".join(bazel_commits), "--bazel_source=%s" % bazel_clone_path, "--project_source=%s" % project_clone_path, "--project_label=%s" % project["project_label"], "--platform=%s" % platform, "--collect_memory", "--data_directory=%s" % DATA_DIRECTORY, "--csv_file_name=%s" % BAZEL_BENCH_RESULT_FILENAME, "--collect_json_profile", "--aggregate_json_profiles", extra_options, "--", project["bazel_command"], ]) # TODO(leba): Use GCP Python client instead of gsutil. # TODO(https://github.com/bazelbuild/bazel-bench/issues/46): Include task-specific shell commands and build flags. # Upload everything under DATA_DIRECTORY to Storage. # This includes the raw data, aggr JSON profile and the JSON profiles # themselves. storage_subdir = "{}/{}/{}/".format(project["storage_subdir"], date.strftime("%Y/%m/%d"), platform) upload_output_files_storage_command = " ".join([ "gsutil", "-m", "cp", "-r", "{}/*".format(DATA_DIRECTORY), "gs://{}/{}".format(bucket, storage_subdir), ]) upload_to_big_query_command = " ".join([ "bq", "load", "--skip_leading_rows=1", "--source_format=CSV", bigquery_table, "{}/perf_data.csv".format(DATA_DIRECTORY), ]) commands = ( [bazelci.fetch_bazelcipy_command()] + _bazel_bench_env_setup_command(platform, ",".join(bazel_commits)) + [ bazel_bench_command, upload_output_files_storage_command, upload_to_big_query_command ]) label = bazelci.PLATFORMS[platform]["emoji-name"] + project["project_label"] return bazelci.create_step(label, commands, platform)