Beispiel #1
0
def _get_platforms(project_name):
    """Get the platforms on which this project is run on BazelCI.

    Args:
      project_name: a string: the name of the project. e.g. "Bazel".

    Returns:
      A list of string: the platforms for this project.
    """
    http_config = bazelci.DOWNSTREAM_PROJECTS_PRODUCTION[project_name][
        "http_config"]
    configs = bazelci.fetch_configs(http_config, None)
    tasks = configs["tasks"]
    return list(
        map(lambda k: bazelci.get_platform_for_task(k, tasks[k]), tasks))
def create_test_repo(module_name, module_version, task):
    configs = get_task_config(module_name, module_version)
    platform = bazelci.get_platform_for_task(task, configs.get("tasks", None))
    # TODO(pcloudy): We use the "downstream root" as the repo root, find a better root path for BCR presubmit.
    root = pathlib.Path(bazelci.downstream_projects_root(platform))
    scratch_file(root, "WORKSPACE")
    scratch_file(root, "BUILD")
    # TODO(pcloudy): Should we test this module as the root module? Maybe we do if we support dev dependency.
    # Because if the module is not root module, dev dependencies are ignored, which can break test targets.
    # Another work around is that we can copy the dev dependencies to the generated MODULE.bazel.
    scratch_file(root, "MODULE.bazel", [
        "bazel_dep(name = '%s', version = '%s')" %
        (module_name, module_version)
    ])
    scratch_file(root, ".bazelrc", [
        "build --experimental_enable_bzlmod",
        "build --registry=%s" % BCR_REPO_DIR.as_uri(),
    ])
    return root
Beispiel #3
0
def _get_platforms(project_name, whitelist):
    """Get the platforms on which this project is run on BazelCI.
    Filter the results with a whitelist & remove duplicates.

    Args:
      project_name: a string: the name of the project. e.g. "Bazel".
      whitelist: a list of string denoting the whitelist of supported platforms.

    Returns:
      A set of string: the platforms for this project.
    """
    http_config = bazelci.DOWNSTREAM_PROJECTS_PRODUCTION[project_name][
        "http_config"]
    configs = bazelci.fetch_configs(http_config, None)
    tasks = configs["tasks"]
    ci_platforms_for_project = [
        bazelci.get_platform_for_task(k, tasks[k]) for k in tasks
    ]

    return set([p for p in ci_platforms_for_project if p in whitelist])
def add_presubmit_jobs(module_name, module_version, task_config,
                       pipeline_steps):
    for task_name in task_config:
        platform_name = bazelci.get_platform_for_task(task_name, task_config)
        label = bazelci.PLATFORMS[platform_name][
            "emoji-name"] + " {0}@{1}".format(module_name, module_version)
        command = (
            '%s bcr_presubmit.py runner --module_name="%s" --module_version="%s" --task=%s'
            % (
                bazelci.PLATFORMS[platform_name]["python"],
                module_name,
                module_version,
                task_name,
            ))
        commands = [
            bazelci.fetch_bazelcipy_command(),
            fetch_bcr_presubmit_py_command(), command
        ]
        pipeline_steps.append(
            bazelci.create_step(label, commands, platform_name))
def get_platform(project_name, task_name):
    http_config = bazelci.DOWNSTREAM_PROJECTS[project_name]["http_config"]
    configs = bazelci.fetch_configs(http_config, None)
    task_config = configs["tasks"][task_name]
    return bazelci.get_platform_for_task(task_name, task_config)
def get_platform(project_name, task_name):
    configs = get_configs(project_name)
    task_config = configs["tasks"][task_name]
    return bazelci.get_platform_for_task(task_name, task_config)