def make_go_env(): # TODO Respect args to bzl to use the proper default runtime. ws = bazel_utils.find_workspace() bazel_ws_root = "bazel-" + os.path.basename(ws) GOROOT = os.path.join(ws, bazel_ws_root, "external/go1_12_14_linux_amd64_tar_gz/go") return { "GOROOT": GOROOT, "GOPATH": os.path.join(bazel_utils.find_workspace(), "go"), # go-errcheck behaves incorrectly if $GOROOT/bin is not added to $PATH "PATH": os.path.join(GOROOT, "bin") + ":" + os.getenv("PATH"), }
def _get_itest_target_body(bazel_path, target, use_implicit_output): if not use_implicit_output: return _get_itest_target_body_by_bazel_query(bazel_path, target) workspace = bazel_utils.find_workspace() rel_target_path = bazel_utils.executable_for_label(target) potential_target_path = os.path.join(workspace, rel_target_path) if not os.path.exists(potential_target_path): # if the expected binary does not exist, this may be a glob or an alias. # use bazel query to be sure. return _get_itest_target_body_by_bazel_query(bazel_path, target) # check if this target has services service_defs = os.path.join( potential_target_path + ".runfiles/__main__", os.path.relpath(rel_target_path, "bazel-bin") + ".service_defs", ) if not os.path.exists(service_defs): # most likely doesn't have services. but just to be safe and very correct, # use bazel query # TODO(naphat) the only thing this bazel query protects against is some internal runfiles # structure in svc.bzl changing without this code being updated. do we need it? return _get_itest_target_body_by_bazel_query(bazel_path, target) # now that we know the given target is of a specific format (no aliases), # we can safely normalize the target name target = bazel_utils.BazelTarget(target).label return ITestTarget(name=target, has_services=True)
def run_build_tool(bazel_path, target, targets, squelch_output=False): workspace = find_workspace() if not workspace: return try: # If we can bootstrap a new version, do it once. with metrics.create_and_register_timer("bzl_bootstrap_ms") as t: bzl_script = build_tool(bazel_path, target, targets, squelch_output=squelch_output) bzl_script_path = os.path.join(workspace, bzl_script) argv = [bzl_script_path] + list(sys.argv[1:]) os.environ["BZL_SKIP_BOOTSTRAP"] = "1" os.environ["BZL_BOOTSTRAP_MS"] = str(t.get_interval_ms()) os.environ["BZL_RUNNING_REBUILT_BZL"] = "1" exec_wrapper.execv(bzl_script_path, argv) except subprocess.CalledProcessError: print( "WARN: Failed to build %s, continuing without self-update." % target, file=sys.stderr, ) # If something goes wrong during rebuild, just run this version. pass
def cmd_gen_as_tool(args, bazel_args, mode_args): workspace_dir = bazel_utils.find_workspace() bzl_gen = bazel_utils.build_tool(args.bazel_path, "@dbx_build_tools//build_tools:bzl-gen") bzl_gen_path = os.path.join(workspace_dir, bzl_gen) argv = [os.path.basename(bzl_gen_path)] + mode_args exec_wrapper.execv(bzl_gen_path, argv)
def targets2packages(bazel_targets): go_packages = [] workspace_dir = bazel_utils.find_workspace() targets = bazel_utils.expand_bazel_target_dirs(workspace_dir, bazel_targets, require_build_file=False) for x in targets: if x.startswith("//go/src/"): go_packages.append(x.replace("//go/src/", "")) return go_packages
def __init__(self, name, has_services): self.name = name self.has_services = has_services self.executable_path = os.path.join( bazel_utils.find_workspace(), bazel_utils.executable_for_label(self.name)) if self.has_services: self.service_launch_cmd = [self.executable_path] self.test_cmd = self.service_launch_cmd + [" --svc.test-only"] else: self.service_launch_cmd = ["/bin/true"] self.test_cmd = [self.executable_path]
def make_go_env(ensure_goroot=True): # type: (bool) -> Dict[str, str] # TODO(msolo) Respect args to bzl to use the proper default runtime. ws = bazel_utils.find_workspace() env = { "GOPATH": os.path.join(bazel_utils.find_workspace(), "go"), "GOCACHE": "/tmp/go_build_cache", } bazel_ws_root = "bazel-" + os.path.basename(ws) GOROOT = os.path.join(ws, bazel_ws_root, "external/go_1_12_17_linux_amd64_tar_gz/go") if ensure_goroot and not os.path.isdir(GOROOT): _ensure_goroot_exists(GOROOT) if os.path.isdir(GOROOT): env["GOROOT"] = GOROOT # go-errcheck behaves incorrectly if $GOROOT/bin is not added to $PATH env["PATH"] = os.path.join(GOROOT, "bin") + os.pathsep + os.getenv( "PATH", "") return env
def main(): # type: () -> None ap = argparse.ArgumentParser( "bzl-gen", epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) register_cmd_gen(None, get_generators(), sap=ap) args = ap.parse_args() workspace = bazel_utils.find_workspace() if not workspace: sys.exit("Run from a Bazel WORKSPACE.") try: if hasattr(args, "targets"): targets = args.targets require_build_file = not getattr(args, "missing_build_file_ok", False) targets = bazel_utils.expand_bazel_targets( workspace, targets, require_build_file=require_build_file, allow_nonexistent_npm_folders=True, ) if not targets: sys.exit("No targets specified.") args.targets = targets args.func(args, (), ()) except bazel_utils.BazelError as e: if os.environ.get("BZL_DEBUG"): raise sys.exit("ERROR: " + str(e)) except subprocess.CalledProcessError as e: traceback.print_exc(file=sys.stderr) if e.output: print(e.output.decode("utf-8"), file=sys.stderr) if os.environ.get("BZL_DEBUG"): raise sys.exit(e.returncode) except KeyboardInterrupt: sys.exit("ERROR: interrupted")
def cmd_pkg(args, bazel_args, mode_args): workspace_dir = bazel_utils.find_workspace() curdir = os.getcwd() os.chdir(workspace_dir) # Each target must be of type dbx_pkg_* just for sanity. for target_str in args.targets: target = bazel_utils.BazelTarget(target_str) try: bp = build_parser.BuildParser() bp.parse_file(os.path.join(workspace_dir, target.build_file)) rule = bp.get_rule(target.name) except (IOError, KeyError) as e: sys.exit("No such target: " + target_str + " " + str(e)) run_rule(args, bazel_args, mode_args, target, rule) outputs = bazel_utils.outputs_for_label(args.bazel_path, target.label) print("bzl target", target.label, "up-to-date:") for f in outputs: print(" " + f) os.chdir(curdir)
def _cmd_itest_reload(args, bazel_args, mode_args): _raise_on_glob_target(args.target) _build_target(args, bazel_args, mode_args, args.target) itest_target = _get_itest_target(args.bazel_path, args.target, use_implicit_output=True) container_name = _get_container_name_for_target(itest_target.name) _verify_args(args, itest_target, container_should_be_running=True) host_data_dir = os.path.join(HOST_DATA_DIR_PREFIX, container_name) on_host_test_binary = os.path.join(host_data_dir, RUN_TEST_BIN_NAME) in_container_test_binary = os.path.join(IN_CONTAINER_DATA_DIR, RUN_TEST_BIN_NAME) if not os.path.exists(on_host_test_binary): # this means that the container was started from before `bzl itest` started creating # a run-test script # TODO(naphat) remove this after 09/30 message = """The run-test wrapper does not exist for this target, most likely because the container was creating using an old version of `bzl itest`. Please run the following to recreate the container: bzl itest-stop {target} && bzl itest-run {target}""".format( target=itest_target.name) sys.exit(message) test_cmd_str = " ".join( pipes.quote(x) for x in [in_container_test_binary] + args.test_arg) service_restart_cmd_str = "/bin/true" if itest_target.has_services: service_restart_cmd_str = "svcctl auto-restart | tee {}".format( os.path.join(IN_CONTAINER_DATA_DIR, SVCCTL_RESTART_OUTPUT_FILE)) service_version_check_cmd_str = "/bin/true" if itest_target.has_services: service_version_check_cmd_str = "svcctl version-check" docker_exec_args = [args.docker_path, "exec"] if sys.stdin.isatty(): docker_exec_args += ["--interactive", "--tty"] docker_exec_args += [container_name] workspace = bazel_utils.find_workspace() script = """ set -eu set -o pipefail if [[ ! -d {workspace} ]]; then echo 'Your current workspace ({workspace}) is not mounted into the existing `bzl itest` container. If you have multiple checkouts, are you running from the correct checkout? If you want to terminate the current container and start a new one, try running: bzl itest-stop {target} && bzl itest-run {target}' >&2 exit 1 fi if ! {service_version_check_cmd_str} >/dev/null 2>&1; then echo 'ERROR: Service definitions are stale or the service controller has changed. Please run the following to terminate and recreate your container:' >&2 echo '' >&2 echo 'bzl itest-stop {target} && bzl itest-run {target}' >&2 exit 1 fi {service_restart_cmd_str} {test_cmd_str} """.format( workspace=workspace, service_restart_cmd_str=service_restart_cmd_str, service_version_check_cmd_str=service_version_check_cmd_str, target=itest_target.name, test_cmd_str=test_cmd_str, ) with metrics.create_and_register_timer("service_restart_ms"): return_code = subprocess.call(docker_exec_args + ["/bin/bash", "-c", script]) if return_code == 0: if itest_target.has_services: services_restarted = [] with open(os.path.join(host_data_dir, SVCCTL_RESTART_OUTPUT_FILE), "r") as f: for line in f: if line.startswith("restart successful:"): services_restarted.append(line.split()[-1]) services_restarted.sort() metrics.set_extra_attributes("services_restarted", ",".join(services_restarted)) metrics.set_gauge("services_restarted_count", len(services_restarted)) sys.exit(return_code)
def cmd_itest_run(args, bazel_args, mode_args): _raise_on_glob_target(args.target) _build_target(args, bazel_args, mode_args, args.target) itest_target = _get_itest_target(args.bazel_path, args.target, use_implicit_output=True) container_name = _get_container_name_for_target(itest_target.name) _verify_args(args, itest_target, container_should_be_running=False) tmpdir_name = "test_tmpdir" if args.persist_tmpdir: tmpdir_name = "persistent_test_tmpdir" host_data_dir = os.path.join(HOST_DATA_DIR_PREFIX, container_name) host_tmpdir = os.path.join(host_data_dir, tmpdir_name) for dirname in [host_tmpdir, HOST_HOME_DIR]: if not os.path.exists(dirname): os.makedirs(dirname) container_tmpdir = os.path.join(IN_CONTAINER_DATA_DIR, tmpdir_name) workspace = bazel_utils.find_workspace() cwd = workspace # order matters here. The last command shows up as the last thing the user ran, i.e. # the first command they see when they hit "up" history_cmds = [] # type: ignore[var-annotated] if itest_target.has_services: history_cmds = [ "svcctl --help", "svcctl status", 'svcctl status -format "{{.CPUTime}} {{.Name}}" | sort -rgb | head', ] test_bin = os.path.join(host_data_dir, RUN_TEST_BIN_NAME) with open(test_bin, "w") as f: f.write("""#!/bin/bash -eu cd {cwd} exec {test} "$@" """.format( cwd=itest_target.executable_path + ".runfiles/__main__", test=" ".join(itest_target.test_cmd), )) os.chmod(test_bin, 0o755) test_cmd_str = " ".join( pipes.quote(x) for x in [os.path.join(IN_CONTAINER_DATA_DIR, RUN_TEST_BIN_NAME)] + args.test_arg) history_cmds.append(test_cmd_str) launch_cmd = itest_target.service_launch_cmd if args.verbose: launch_cmd += ["--svc.verbose"] default_paths = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin".split( ":") itest_paths = [ os.path.join( workspace, os.path.dirname(bazel_utils.executable_for_label(SVCCTL_TARGET))), os.path.join(workspace, "build_tools/itest"), ] env = { "DROPBOX_SERVER_TEST": "1", "PATH": ":".join(itest_paths + default_paths), "TEST_TMPDIR": container_tmpdir, "HOST_TEST_TMPDIR": host_tmpdir, "HOME": IN_CONTAINER_HOME_DIR, # override HOME since we can't readily edit /etc/passwd "LAUNCH_CMD": " ".join(launch_cmd), "TEST_CMD": test_cmd_str, # Set how much directory to clean up on startup. Pass this into the container so it gets # cleaned up as root. "CLEANDIR": os.path.join(container_tmpdir, "logs") if args.persist_tmpdir else container_tmpdir, } history_file = os.path.join(HOST_HOME_DIR, ".bash_history") bash_history.merge_history([history_file], history_cmds, history_file) bashrc_file_src = runfiles.data_path( "@dbx_build_tools//build_tools/bzl_lib/itest/bashrc") if args.build_image: docker_image = args.build_image else: docker_image = os.path.join(args.docker_registry, DEFAULT_IMAGE) init_cmd_args = [ runfiles.data_path( "@dbx_build_tools//build_tools/bzl_lib/itest/bzl-itest-init") ] # Set a fail-safe limit for an itest container to keep it from detonating the whole # machine. RSS limits are a funny thing in docker. Most likely the oom-killer will # start killing things inside the container rendering it unstable. # FIXME(msolo) It would be nice to teardown the container on out-of-memory and leave # some sort of note. mem_limit_kb = _guess_mem_limit_kb() docker_run_args = [ args.docker_path, "run", "--net=host", "--name", container_name, "--workdir", cwd, "--detach", "--memory", "%dK" % mem_limit_kb, # Swap is disabled anyway, so squelch a spurious warning. "--memory-swap", "-1", # Store target name in config, to be able to reload it later. "--label", "itest-target=%s" % args.target, ] if args.privileged: docker_run_args += ["--privileged"] # set env variables. This will also set it for subsequent `docker exec` commands for k, v in env.items(): docker_run_args += ["-e", "{}={}".format(k, v)] with metrics.create_and_register_timer("bazel_info_ms"): with open(os.devnull, "w") as dev_null: output_base = subprocess.check_output( [args.bazel_path, "info", "output_base"], stderr=dev_null).strip() install_base = subprocess.check_output( [args.bazel_path, "info", "install_base"], stderr=dev_null).strip() mounts = [ (os.fsencode(workspace), b"ro"), (b"/sqpkg", b"ro"), (output_base, b"ro"), (install_base, b"ro"), (b"/etc/ssl", b"ro"), (b"/usr/share/ca-certificates", b"ro"), # We bind mount /run/dropbox/sock-drawer/ as read-write so that services outside # itest (ie ULXC jails) can publish sockets here that can be used from the inside # (bind mount), and so that services inside itest (ie RivieraFS) can publish # sockets here (read-write) that can be used from the outside (DEFAULT_SOCKET_DIRECTORY_PATH, b"rw"), ] for path, perms in mounts: # Docker will happily create a mount source that is nonexistent, but it may not have the # right permissions. Better to just mount nothing. if not os.path.exists(path): print("missing mount point:", path, file=sys.stderr) continue src = os.path.realpath(path) docker_run_args += ["-v", b"%s:%s:%s" % (src, path, perms)] # Allow bzl itest containers to observe external changes to the mount table. if os.path.exists("/mnt/sqpkg"): docker_run_args += ["-v", "/mnt/sqpkg:/mnt/sqpkg:rslave"] if sys.stdin.isatty(): # otherwise text wrapping on subsequent shells is messed up docker_run_args += ["--tty"] docker_run_args += [ "-v", "{}:{}:rw".format(host_data_dir, IN_CONTAINER_DATA_DIR) ] docker_run_args += [ "-v", "{}:{}:rw".format(HOST_HOME_DIR, IN_CONTAINER_HOME_DIR) ] docker_run_args += [ "-v", "{}:{}:ro".format(bashrc_file_src, "/etc/bash.bashrc") ] docker_run_args += [docker_image] docker_run_args += init_cmd_args with metrics.create_and_register_timer("services_start_ms"): with open(os.devnull, "w") as f: subprocess.check_call(docker_run_args, stdout=f) docker_exec_args = [args.docker_path, "exec"] if sys.stdin.isatty(): docker_exec_args += ["--interactive", "--tty"] docker_exec_args += [container_name] exit_code = subprocess.call(docker_exec_args + [ runfiles.data_path( "@dbx_build_tools//build_tools/bzl_lib/itest/bzl-itest-wait") ]) if exit_code == 0: # run the test command with metrics.create_and_register_timer("test_ms"): # NOT check_call. Even if this script doesn't exit with 0 (e.g. test fails), # we want to keep going subprocess.call(docker_exec_args + ["/bin/bash", "-c", test_cmd_str]) if itest_target.has_services: services_started = (subprocess.check_output( [ args.docker_path, "exec", container_name, "svcctl", "status", "--all", "--format={{.Name}}", ], universal_newlines=True, ).strip().split("\n")) metrics.set_extra_attributes("services_started", ",".join(services_started)) metrics.set_gauge("services_started_count", len(services_started)) # report metrics now, instead of after the interactive session since # we don't want to measure that metrics.report_metrics() if args.detach: # display message of the day then exit exec_wrapper.execv(args.docker_path, docker_exec_args + ["cat", "/etc/motd"]) else: exit_code = subprocess.call(docker_exec_args + ["/bin/bash"]) with open(os.devnull, "w") as devnull: subprocess.check_call( [args.docker_path, "rm", "-f", container_name], stdout=devnull, stderr=devnull, ) sys.exit(exit_code)
def main(ap, self_target): try: workspace = bazel_utils.find_workspace() except bazel_utils.BazelError as e: sys.exit("Bazel Error: {}".format(e)) test_args = None try: # Hedge that we might not need to rebuild and exec. If for any # reason this fails, fall back to correct behavior. stdout, stderr = sys.stdout, sys.stderr with open("/dev/null", "w") as devnull: sys.stdout, sys.stderr = devnull, devnull test_args, unknown_args = ap.parse_known_args() # No built-in Bazel mode requires bzl to be up-to-date. rebuild_and_exec = test_args.mode not in bazel_modes except (SystemExit, AttributeError): rebuild_and_exec = True finally: sys.stdout, sys.stderr = stdout, stderr if os.environ.get("BZL_SKIP_BOOTSTRAP"): rebuild_and_exec = False # Propagate stats forward so we can sort of track the full metrics of itest. bootstrap_ms = int(os.environ.get("BZL_BOOTSTRAP_MS", 0)) metrics.create_and_register_timer("bzl_bootstrap_ms", interval_ms=bootstrap_ms) if rebuild_and_exec: metrics.set_mode("_bzl_bootstrap") # If the tool requires an update, build it and re-exec. Do this before we parse args in # case we have defined a newer mode. targets = [] # Pass in targets that we are going to build. On average this minimizes target flapping # within bazel and saves time on small incremental updates without sacrificing correct # behavior. # do this for some itest modes and if there are no unknown args (as those can be # bazel flags that causes worse build flapping) if (test_args and test_args.mode in ("itest-run", "itest-start", "itest-reload") and not unknown_args): targets.append(itest.SVCCTL_TARGET) targets.append(test_args.target) # also do this for tool modes, so we can avoid an extra bazel build if test_args and test_args.mode in ("tool", "fmt"): targets.append(test_args.target) squelch_output = test_args and test_args.mode in ("tool", "go", "go-env") run_build_tool( os.environ.get("BAZEL_PATH_FOR_BZL_REBUILD", "bazel"), self_target, targets, squelch_output=squelch_output, ) args, remaining_args = ap.parse_known_args() metrics.set_mode(args.mode) subparser_map = ap._subparsers._group_actions[0].choices if remaining_args and (args.mode is None or not getattr( subparser_map[args.mode], "bzl_allow_unknown_args", False)): print( f"ERROR: unknown args for mode {args.mode}: {remaining_args}", file=sys.stderr, ) sys.exit(2) bazel_args, mode_args = parse_bazel_args(remaining_args) if args.mode in (None, "help"): if not mode_args: ap.print_help() print() elif len(mode_args) == 1 and mode_args[0] not in bazel_modes: help_mode_parser = subparser_map[mode_args[0]] help_mode_parser.print_help() sys.stdout.flush() sys.exit(1 if args.mode is None else 0) if args.build_image and not args.build_image.startswith( args.docker_registry): args.build_image = os.path.join(args.docker_registry, args.build_image) try: if hasattr(args, "targets"): targets = args.targets require_build_file = not getattr(args, "missing_build_file_ok", False) targets = bazel_utils.expand_bazel_targets( workspace, targets, require_build_file=require_build_file) if not targets: sys.exit("No targets specified.") args.targets = targets args.func(args, bazel_args, mode_args) except bazel_utils.BazelError as e: if os.environ.get("BZL_DEBUG"): raise sys.exit("ERROR: " + str(e)) except subprocess.CalledProcessError as e: print(e, file=sys.stderr) if e.output: print(e.output, file=sys.stderr) if os.environ.get("BZL_DEBUG"): raise sys.exit(e.returncode) except KeyboardInterrupt: sys.exit("ERROR: interrupted")
def regenerate_build_files( bazel_targets_l: Sequence[str], generators: Sequence[Callable[..., Generator]], verbose: bool = False, skip_deps_generation: bool = False, dry_run: bool = False, reverse_deps_generation: bool = False, use_magic_mirror: bool = False, ) -> None: workspace_dir = bazel_utils.find_workspace() bazel_targets = set(bazel_targets_l) if reverse_deps_generation: targets = bazel_utils.expand_bazel_target_dirs( workspace_dir, [t for t in bazel_targets if not t.startswith("@")], require_build_file=False, cwd=".", ) pkgs = [t.partition(":")[0] for t in targets] patterns = ['"%s"' % pkg for pkg in pkgs] patterns.extend(['"%s:' % pkg for pkg in pkgs]) for path, dirs, files in os.walk(workspace_dir): if "BUILD" not in files: continue build_content = open(os.path.join(workspace_dir, path, "BUILD")).read() should_regen = False for pattern in patterns: if pattern in build_content: should_regen = True break if should_regen: # convert abs path to relative to workspace bazel_targets.add("//" + bazel_utils.normalize_os_path_to_target( os.path.relpath(path, workspace_dir))) generated_files = DefaultDict[str, List[str]](list) generator_instances: List[Generator] = [] for gen in generators: # Most of the time `generator` is a class. Sometimes it's a functools.partial, so handle that too. generator_name = gen.__name__ with metrics.Timer( "bzl_gen_{}_init_ms".format(generator_name)) as init_timer: generator_instances.append( gen( workspace_dir, generated_files, verbose, skip_deps_generation, dry_run, use_magic_mirror, )) metrics.log_cumulative_rate(init_timer.name, init_timer.get_interval_ms()) # In order to ensure we don't miss generating specific target types, # recursively expands the generated set until it converges. prev_visited_dirs: Set[str] = set() while bazel_targets: for generator in generator_instances: with metrics.generator_metric_context( generator.__class__.__name__): generator.regenerate(bazel_targets) visited_dirs = set(generated_files.keys()) newly_visited_dirs = visited_dirs.difference(prev_visited_dirs) if newly_visited_dirs: # continue processing prev_visited_dirs = visited_dirs bazel_targets = set([ bazel_utils.normalize_os_path_to_target( d.replace(workspace_dir, "/")) for d in newly_visited_dirs ]) else: break with metrics.Timer("bzl_gen_merge_build_files_ms") as merge_timer: merge_generated_build_files(generated_files) metrics.log_cumulative_rate(merge_timer.name, merge_timer.get_interval_ms())
def regenerate_build_files( bazel_targets, generators, verbose=False, skip_deps_generation=False, dry_run=False, reverse_deps_generation=False, use_magic_mirror=False, ): workspace_dir = bazel_utils.find_workspace() if reverse_deps_generation: targets = bazel_utils.expand_bazel_target_dirs( workspace_dir, [t for t in bazel_targets if not t.startswith("@")], require_build_file=False, cwd=".", ) pkgs = [t.partition(":")[0] for t in targets] patterns = ['"%s"' % pkg for pkg in pkgs] patterns.extend(['"%s:' % pkg for pkg in pkgs]) bazel_targets = set(bazel_targets) for path, dirs, files in os.walk(workspace_dir): if "BUILD" not in files: continue build_content = open(os.path.join(workspace_dir, path, "BUILD")).read() should_regen = False for pattern in patterns: if pattern in build_content: should_regen = True break if should_regen: # convert abs path to relative to workspace bazel_targets.add("//" + os.path.relpath(path, workspace_dir)) generated_files = defaultdict(list) # type: ignore[var-annotated] generator_instances = [ generator( workspace_dir, generated_files, verbose, skip_deps_generation, dry_run, use_magic_mirror, ) for generator in generators ] # In order to ensure we don't miss generating specific target types, # recursively expands the generated set until it converges. prev_visited_dirs = set() # type: ignore[var-annotated] updated_pkgs = set() # type: ignore[var-annotated] while bazel_targets: for generator in generator_instances: with metrics.generator_metric_context(generator.__class__.__name__): res = generator.regenerate(bazel_targets) # Generators are expected to do one/both of # - return a list of packages/directores where it could have modified BUILD files # - Update self.generated_files mapping for BUILD path -> BUILD file fragments if res: updated_pkgs.update(res) visited_dirs = set(generated_files.keys()) newly_visited_dirs = visited_dirs.difference(prev_visited_dirs) if newly_visited_dirs: # continue processing prev_visited_dirs = visited_dirs bazel_targets = [d.replace(workspace_dir, "/") for d in newly_visited_dirs] else: break merge_generated_build_files(generated_files) updated_pkgs.update(generated_files.keys()) return updated_pkgs
def _get_bzl_gen_path(bazel_path): # type: (str) -> str workspace_dir = bazel_utils.find_workspace() bzl_gen = bazel_utils.build_tool(bazel_path, "@dbx_build_tools//build_tools:bzl-gen") return os.path.join(workspace_dir, bzl_gen)