def _get_itest_target_body(bazel_path, target, use_implicit_output): if not use_implicit_output: return _get_itest_target_body_by_bazel_query(bazel_path, target) workspace = bazel_utils.find_workspace() rel_target_path = bazel_utils.executable_for_label(target) potential_target_path = os.path.join(workspace, rel_target_path) if not os.path.exists(potential_target_path): # if the expected binary does not exist, this may be a glob or an alias. # use bazel query to be sure. return _get_itest_target_body_by_bazel_query(bazel_path, target) # check if this target has services service_defs = os.path.join( potential_target_path + ".runfiles/__main__", os.path.relpath(rel_target_path, "bazel-bin") + ".service_defs", ) if not os.path.exists(service_defs): # most likely doesn't have services. but just to be safe and very correct, # use bazel query # TODO(naphat) the only thing this bazel query protects against is some internal runfiles # structure in svc.bzl changing without this code being updated. do we need it? return _get_itest_target_body_by_bazel_query(bazel_path, target) # now that we know the given target is of a specific format (no aliases), # we can safely normalize the target name target = bazel_utils.BazelTarget(target).label return ITestTarget(name=target, has_services=True)
def __init__(self, name, has_services): self.name = name self.has_services = has_services self.executable_path = os.path.join( bazel_utils.find_workspace(), bazel_utils.executable_for_label(self.name)) if self.has_services: self.service_launch_cmd = [self.executable_path] self.test_cmd = self.service_launch_cmd + [" --svc.test-only"] else: self.service_launch_cmd = ["/bin/true"] self.test_cmd = [self.executable_path]
def cmd_itest_run(args, bazel_args, mode_args): _raise_on_glob_target(args.target) _build_target(args, bazel_args, mode_args, args.target) itest_target = _get_itest_target(args.bazel_path, args.target, use_implicit_output=True) container_name = _get_container_name_for_target(itest_target.name) _verify_args(args, itest_target, container_should_be_running=False) tmpdir_name = "test_tmpdir" if args.persist_tmpdir: tmpdir_name = "persistent_test_tmpdir" host_data_dir = os.path.join(HOST_DATA_DIR_PREFIX, container_name) host_tmpdir = os.path.join(host_data_dir, tmpdir_name) for dirname in [host_tmpdir, HOST_HOME_DIR]: if not os.path.exists(dirname): os.makedirs(dirname) container_tmpdir = os.path.join(IN_CONTAINER_DATA_DIR, tmpdir_name) workspace = bazel_utils.find_workspace() cwd = workspace # order matters here. The last command shows up as the last thing the user ran, i.e. # the first command they see when they hit "up" history_cmds = [] # type: ignore[var-annotated] if itest_target.has_services: history_cmds = [ "svcctl --help", "svcctl status", 'svcctl status -format "{{.CPUTime}} {{.Name}}" | sort -rgb | head', ] test_bin = os.path.join(host_data_dir, RUN_TEST_BIN_NAME) with open(test_bin, "w") as f: f.write("""#!/bin/bash -eu cd {cwd} exec {test} "$@" """.format( cwd=itest_target.executable_path + ".runfiles/__main__", test=" ".join(itest_target.test_cmd), )) os.chmod(test_bin, 0o755) test_cmd_str = " ".join( pipes.quote(x) for x in [os.path.join(IN_CONTAINER_DATA_DIR, RUN_TEST_BIN_NAME)] + args.test_arg) history_cmds.append(test_cmd_str) launch_cmd = itest_target.service_launch_cmd if args.verbose: launch_cmd += ["--svc.verbose"] default_paths = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin".split( ":") itest_paths = [ os.path.join( workspace, os.path.dirname(bazel_utils.executable_for_label(SVCCTL_TARGET))), os.path.join(workspace, "build_tools/itest"), ] env = { "DROPBOX_SERVER_TEST": "1", "PATH": ":".join(itest_paths + default_paths), "TEST_TMPDIR": container_tmpdir, "HOST_TEST_TMPDIR": host_tmpdir, "HOME": IN_CONTAINER_HOME_DIR, # override HOME since we can't readily edit /etc/passwd "LAUNCH_CMD": " ".join(launch_cmd), "TEST_CMD": test_cmd_str, # Set how much directory to clean up on startup. Pass this into the container so it gets # cleaned up as root. "CLEANDIR": os.path.join(container_tmpdir, "logs") if args.persist_tmpdir else container_tmpdir, } history_file = os.path.join(HOST_HOME_DIR, ".bash_history") bash_history.merge_history([history_file], history_cmds, history_file) bashrc_file_src = runfiles.data_path( "@dbx_build_tools//build_tools/bzl_lib/itest/bashrc") if args.build_image: docker_image = args.build_image else: docker_image = os.path.join(args.docker_registry, DEFAULT_IMAGE) init_cmd_args = [ runfiles.data_path( "@dbx_build_tools//build_tools/bzl_lib/itest/bzl-itest-init") ] # Set a fail-safe limit for an itest container to keep it from detonating the whole # machine. RSS limits are a funny thing in docker. Most likely the oom-killer will # start killing things inside the container rendering it unstable. # FIXME(msolo) It would be nice to teardown the container on out-of-memory and leave # some sort of note. mem_limit_kb = _guess_mem_limit_kb() docker_run_args = [ args.docker_path, "run", "--net=host", "--name", container_name, "--workdir", cwd, "--detach", "--memory", "%dK" % mem_limit_kb, # Swap is disabled anyway, so squelch a spurious warning. "--memory-swap", "-1", # Store target name in config, to be able to reload it later. "--label", "itest-target=%s" % args.target, ] if args.privileged: docker_run_args += ["--privileged"] # set env variables. This will also set it for subsequent `docker exec` commands for k, v in env.items(): docker_run_args += ["-e", "{}={}".format(k, v)] with metrics.create_and_register_timer("bazel_info_ms"): with open(os.devnull, "w") as dev_null: output_base = subprocess.check_output( [args.bazel_path, "info", "output_base"], stderr=dev_null).strip() install_base = subprocess.check_output( [args.bazel_path, "info", "install_base"], stderr=dev_null).strip() mounts = [ (os.fsencode(workspace), b"ro"), (b"/sqpkg", b"ro"), (output_base, b"ro"), (install_base, b"ro"), (b"/etc/ssl", b"ro"), (b"/usr/share/ca-certificates", b"ro"), # We bind mount /run/dropbox/sock-drawer/ as read-write so that services outside # itest (ie ULXC jails) can publish sockets here that can be used from the inside # (bind mount), and so that services inside itest (ie RivieraFS) can publish # sockets here (read-write) that can be used from the outside (DEFAULT_SOCKET_DIRECTORY_PATH, b"rw"), ] for path, perms in mounts: # Docker will happily create a mount source that is nonexistent, but it may not have the # right permissions. Better to just mount nothing. if not os.path.exists(path): print("missing mount point:", path, file=sys.stderr) continue src = os.path.realpath(path) docker_run_args += ["-v", b"%s:%s:%s" % (src, path, perms)] # Allow bzl itest containers to observe external changes to the mount table. if os.path.exists("/mnt/sqpkg"): docker_run_args += ["-v", "/mnt/sqpkg:/mnt/sqpkg:rslave"] if sys.stdin.isatty(): # otherwise text wrapping on subsequent shells is messed up docker_run_args += ["--tty"] docker_run_args += [ "-v", "{}:{}:rw".format(host_data_dir, IN_CONTAINER_DATA_DIR) ] docker_run_args += [ "-v", "{}:{}:rw".format(HOST_HOME_DIR, IN_CONTAINER_HOME_DIR) ] docker_run_args += [ "-v", "{}:{}:ro".format(bashrc_file_src, "/etc/bash.bashrc") ] docker_run_args += [docker_image] docker_run_args += init_cmd_args with metrics.create_and_register_timer("services_start_ms"): with open(os.devnull, "w") as f: subprocess.check_call(docker_run_args, stdout=f) docker_exec_args = [args.docker_path, "exec"] if sys.stdin.isatty(): docker_exec_args += ["--interactive", "--tty"] docker_exec_args += [container_name] exit_code = subprocess.call(docker_exec_args + [ runfiles.data_path( "@dbx_build_tools//build_tools/bzl_lib/itest/bzl-itest-wait") ]) if exit_code == 0: # run the test command with metrics.create_and_register_timer("test_ms"): # NOT check_call. Even if this script doesn't exit with 0 (e.g. test fails), # we want to keep going subprocess.call(docker_exec_args + ["/bin/bash", "-c", test_cmd_str]) if itest_target.has_services: services_started = (subprocess.check_output( [ args.docker_path, "exec", container_name, "svcctl", "status", "--all", "--format={{.Name}}", ], universal_newlines=True, ).strip().split("\n")) metrics.set_extra_attributes("services_started", ",".join(services_started)) metrics.set_gauge("services_started_count", len(services_started)) # report metrics now, instead of after the interactive session since # we don't want to measure that metrics.report_metrics() if args.detach: # display message of the day then exit exec_wrapper.execv(args.docker_path, docker_exec_args + ["cat", "/etc/motd"]) else: exit_code = subprocess.call(docker_exec_args + ["/bin/bash"]) with open(os.devnull, "w") as devnull: subprocess.check_call( [args.docker_path, "rm", "-f", container_name], stdout=devnull, stderr=devnull, ) sys.exit(exit_code)