def __init__(self, submit=False, machine=None, root_dir=None, work_dir=None, dry_run=False): ########################################################################### self._submit = submit self._machine = machine self._root_dir = root_dir self._work_dir = work_dir self._dry_run = dry_run # Probe machine if none was specified if self._machine is None: # We could potentially integrate more with CIME here to do actual # nodename probing. if "SCREAM_MACHINE" in os.environ and is_machine_supported( os.environ["SCREAM_MACHINE"]): self._machine = os.environ["SCREAM_MACHINE"] else: expect( False, "scripts-ctest-driver requires either the machine arg or SCREAM_MACHINE in env" ) # Compute root dir (where repo is) and work dir (where build/test will happen) if not self._root_dir: self._root_dir = Path(__file__).resolve().parent.parent else: self._root_dir = Path(self._root_dir).resolve() expect( self._root_dir.is_dir() and self._root_dir.parts()[-2:] == ('scream', 'components'), "Bad root-dir '{}', should be: $scream_repo/components/scream". format(self._root_dir)) if self._work_dir is None: self._work_dir = self._root_dir.absolute().joinpath( "ctest-build-scripts") else: self._work_dir = Path(self._work_dir).absolute() if self._work_dir.exists(): expect( self._work_dir.is_dir(), "Work dir {} exists but is not a directory".format( self._work_dir)) shutil.rmtree(str(self._work_dir)) self._work_dir.mkdir(parents=True) # Load env, but do not set CTEST_PARALLEL_JOBS. This code runs on login # nodes, so resource probing will not always be accurate. setup_mach_env(self._machine, ctest_j=-1)
def __init__(self, cxx_compiler=None, f90_compiler=None, c_compiler=None, submit=False, parallel=False, fast_fail=False, baseline_ref=None, baseline_dir=None, machine=None, no_tests=False, config_only=False, keep_tree=False, custom_cmake_opts=(), custom_env_vars=(), preserve_env=False, tests=(), integration_test=False, local=False, root_dir=None, work_dir=None, quick_rerun=False, quick_rerun_failed=False, dry_run=False, make_parallel_level=0, ctest_parallel_level=0, update_expired_baselines=False, extra_verbose=False, limit_test_regex=None): ########################################################################### # When using scripts-tests, we can't pass "-l" to test-all-scream, # but we can pass "-m local". So if machine="local", reset things # as if local=True and machine=None if machine == "local": local = True machine = None self._cxx_compiler = cxx_compiler self._f90_compiler = f90_compiler self._c_compiler = c_compiler self._submit = submit self._parallel = parallel self._fast_fail = fast_fail self._baseline_ref = baseline_ref self._machine = machine self._local = local self._perform_tests = not no_tests self._config_only = config_only self._keep_tree = keep_tree self._baseline_dir = baseline_dir self._custom_cmake_opts = custom_cmake_opts self._custom_env_vars = custom_env_vars self._preserve_env = preserve_env self._tests = tests self._root_dir = root_dir self._work_dir = None if work_dir is None else Path(work_dir) self._integration_test = integration_test self._quick_rerun = quick_rerun self._quick_rerun_failed = quick_rerun_failed self._dry_run = dry_run self._tests_needing_baselines = [] self._update_expired_baselines = update_expired_baselines self._extra_verbose = extra_verbose self._limit_test_regex = limit_test_regex self._test_full_names = OrderedDict([ ("dbg", "full_debug"), ("sp", "full_sp_debug"), ("fpe", "debug_nopack_fpe"), ("opt", "release"), ("valg", "valgrind"), ("cmc", "cuda_mem_check"), ("cov", "coverage"), ]) # Not all builds are ment to perform comparisons against pre-built baselines self._test_uses_baselines = OrderedDict([ ("dbg", True), ("sp", True), ("fpe", False), ("opt", True), ("valg", False), ("cmc", False), ("cov", False), ]) self._tests_cmake_args = { "dbg": [("CMAKE_BUILD_TYPE", "Debug"), ("EKAT_DEFAULT_BFB", "True")], "sp": [("CMAKE_BUILD_TYPE", "Debug"), ("SCREAM_DOUBLE_PRECISION", "False"), ("EKAT_DEFAULT_BFB", "True")], "fpe": [("CMAKE_BUILD_TYPE", "Debug"), ("SCREAM_PACK_SIZE", "1"), ("SCREAM_SMALL_PACK_SIZE", "1"), ("SCREAM_ENABLE_BASELINE_TESTS", "False"), ("EKAT_DEFAULT_BFB", "True")], "opt": [("CMAKE_BUILD_TYPE", "Release")], "valg": [("CMAKE_BUILD_TYPE", "Debug"), ("SCREAM_TEST_PROFILE", "SHORT"), ("SCREAM_ENABLE_BASELINE_TESTS", "False"), ("EKAT_ENABLE_VALGRIND", "True")], "cmc": [("CMAKE_BUILD_TYPE", "Debug"), ("SCREAM_TEST_PROFILE", "SHORT"), ("SCREAM_ENABLE_BASELINE_TESTS", "False"), ("EKAT_ENABLE_CUDA_MEMCHECK", "True")], "cov": [("CMAKE_BUILD_TYPE", "Debug"), ("SCREAM_ENABLE_BASELINE_TESTS", "False"), ("EKAT_ENABLE_COVERAGE", "True")], } if self._quick_rerun_failed: self._quick_rerun = True ############################################ # Sanity checks and helper structs setup # ############################################ # Quick rerun skips config phase, and config-only runs only config. You can't ask for both... expect( not (self._quick_rerun and self._config_only), "Makes no sense to ask for --quick-rerun and --config-only at the same time" ) # Probe machine if none was specified if self._machine is None: # We could potentially integrate more with CIME here to do actual # nodename probing. if "SCREAM_MACHINE" in os.environ and is_machine_supported( os.environ["SCREAM_MACHINE"]): self._machine = os.environ["SCREAM_MACHINE"] else: expect( self._local, "test-all-scream requires either the machine arg (-m $machine) or the -l flag," "which makes it look for machine specs in '~/.cime/scream_mach_specs.py'." ) self._machine = "local" else: expect( not self._local, "Specifying a machine while passing '-l,--local' is ambiguous." ) if not self._tests: # default to all test types except do not do fpe on CUDA self._tests = list(self._test_full_names.keys()) self._tests.remove("valg") # don't want this on by default self._tests.remove("cov") # don't want this on by default self._tests.remove("cmc") # don't want this on by default if is_cuda_machine(self._machine): self._tests.remove("fpe") else: for t in self._tests: expect(t in self._test_full_names, "Requested test '{}' is not supported by test-all-scream, please choose from: {}".\ format(t, ", ".join(self._test_full_names.keys()))) # Compute root dir (where repo is) and work dir (where build/test will happen) if not self._root_dir: self._root_dir = Path(__file__).resolve().parent.parent else: self._root_dir = Path(self._root_dir).resolve() expect( self._root_dir.is_dir() and self._root_dir.parts()[-2:] == ('scream', 'components'), "Bad root-dir '{}', should be: $scream_repo/components/scream". format(self._root_dir)) if self._work_dir is not None: self._work_dir = Path(self._work_dir).absolute() expect( self._work_dir.is_dir(), "Error! Work directory '{}' does not exist.".format( self._work_dir)) else: self._work_dir = self._root_dir.absolute().joinpath("ctest-build") self._work_dir.mkdir(exist_ok=True) os.chdir(str(self._root_dir) ) # needed, or else every git command will need repo=root_dir expect( get_current_commit(), "Root dir: {}, does not appear to be a git repo".format( self._root_dir)) # Print some info on the branch self._original_branch = get_current_branch() self._original_commit = get_current_commit() print_last_commit(git_ref=self._original_branch, dry_run=self._dry_run) ################################### # Compilation/testing resources # ################################### # Deduce how many compilation resources per test make_max_jobs = get_mach_compilation_resources() if make_parallel_level > 0: expect( make_parallel_level <= make_max_jobs, "Requested make_parallel_level {} is more than max available {}" .format(make_parallel_level, make_max_jobs)) make_max_jobs = make_parallel_level print("Note: honoring requested value for make parallel level: {}". format(make_max_jobs)) else: print( "Note: no value passed for --make-parallel-level. Using the default for this machine: {}" .format(make_max_jobs)) ctest_max_jobs = get_mach_testing_resources(self._machine) if ctest_parallel_level > 0: expect( ctest_parallel_level <= ctest_max_jobs, "Requested ctest_parallel_level {} is more than max available {}" .format(ctest_parallel_level, ctest_max_jobs)) ctest_max_jobs = ctest_parallel_level print( "Note: honoring requested value for ctest parallel level: {}". format(ctest_max_jobs)) elif "CTEST_PARALLEL_LEVEL" in os.environ: env_val = int(os.environ["CTEST_PARALLEL_LEVEL"]) expect( env_val <= ctest_max_jobs, "CTEST_PARALLEL_LEVEL env {} is more than max available {}". format(env_val, ctest_max_jobs)) ctest_max_jobs = env_val print( "Note: honoring environment value for ctest parallel level: {}" .format(ctest_max_jobs)) else: print( "Note: no value passed for --ctest-parallel-level. Using the default for this machine: {}" .format(ctest_max_jobs)) self._ctest_max_jobs = ctest_max_jobs self._testing_res_count = dict( zip(self._tests, [ctest_max_jobs] * len(self._tests))) self._compile_res_count = dict( zip(self._tests, [make_max_jobs] * len(self._tests))) if self._parallel: # We need to be aware that other builds may be running too. # (Do not oversubscribe the machine) log_per_phys = logical_cores_per_physical_core() # Avoid splitting physical cores across test types make_jobs_per_test = ((make_max_jobs // len(self._tests)) // log_per_phys) * log_per_phys if is_cuda_machine(self._machine): ctest_jobs_per_test = ctest_max_jobs // len(self._tests) else: ctest_jobs_per_test = ((ctest_max_jobs // len(self._tests)) // log_per_phys) * log_per_phys # The current system of selecting cores explicitly with taskset will not work # if we try to oversubscribe. We would need to implement some kind of wrap-around # mechanism if make_jobs_per_test == 0 or ctest_jobs_per_test == 0: expect( False, "test-all-scream does not currently support oversubscription. " "Either run fewer test types or turn off parallel testing") self._testing_res_count = dict( zip(self._tests, [ctest_jobs_per_test] * len(self._tests))) self._compile_res_count = dict( zip(self._tests, [make_jobs_per_test] * len(self._tests))) for test in self._tests: print( "Test {} can use {} jobs to compile, and {} jobs for test". format(test, self._compile_res_count[test], self._testing_res_count[test])) # Unless the user claims to know what he/she is doing, we setup the env. # Need to happen before compiler probing if not self._preserve_env: # Setup the env on this machine setup_mach_env(self._machine, ctest_j=ctest_max_jobs) ################################### # Compute baseline info # ################################### expect( not self._baseline_dir or self._work_dir != self._baseline_dir, "Error! For your safety, do NOT use '{}' to store baselines. Move them to a different directory (even a subdirectory if that works)." .format(self._work_dir)) # If no baseline ref/dir was provided, use default master baseline dir for this machine # NOTE: if user specifies baseline ref, baseline dir will be set later to a path within work dir if self._baseline_dir is None and self._baseline_ref is None: self._baseline_dir = "AUTO" print( "No '--baseline-dir XYZ' nor '-b XYZ' provided. Testing against default baselines dir for this machine." ) # If -k was used, make sure it's allowed if self._keep_tree: expect(not self._integration_test, "Should not be doing keep-tree with integration testing") print( "WARNING! You have uncommitted changes in your repo.", " The PASS/FAIL status may depend on these changes", " so if you want to keep them, don't forget to create a commit.", sep="\n") if self._baseline_dir is None: # Make sure the baseline ref is HEAD expect( self._baseline_ref == "HEAD", "The option --keep-tree is only available when testing against pre-built baselines " "(--baseline-dir) or HEAD (-b HEAD)") else: # Make sure the baseline ref is unset (or HEAD) expect( self._baseline_ref is None or self._baseline_ref == "HEAD", "The option --keep-tree is only available when testing against pre-built baselines " "(--baseline-dir) or HEAD (-b HEAD)") else: expect( self._dry_run or is_repo_clean(), "Repo must be clean before running. If testing against HEAD or pre-built baselines, " "you can pass `--keep-tree` to allow non-clean repo.") # For integration test, enforce baseline_ref==origin/master, and proceed to merge origin/master if self._integration_test: expect( self._baseline_ref is None or self._baseline_ref == "origin/master", "Error! Integration tests cannot be done against an arbitrary baseline ref." ) # Set baseline ref and merge it self._baseline_ref = "origin/master" merge_git_ref(git_ref=self._baseline_ref, verbose=True, dry_run=self._dry_run) # Always update expired baselines if this is an integration test self._update_expired_baselines = True # By now, we should have at least one between baseline_dir and baseline_ref set (possibly both) default_baselines_root_dir = self._work_dir / "baselines" if self._baseline_dir is None: # Use default baseline dir, and create it if necessary self._baseline_dir = Path(default_baselines_root_dir).absolute() self.create_tests_dirs(self._baseline_dir, True) # Wipe out previous baselines else: if self._baseline_dir == "AUTO": expect( self._baseline_ref is None or self._baseline_ref == 'origin/master', "Do not specify `-b XYZ` when using `--baseline-dir AUTO`. The AUTO baseline dir should be used for the master baselines only.\n" " `-b XYZ` needs to probably build baselines for ref XYZ. However, no baselines will be built if the dir already contains baselines.\n" ) # We treat the "AUTO" string as a request for automatic baseline dir. auto_dir = get_mach_baseline_root_dir(self._machine) self._baseline_dir = Path( auto_dir) if auto_dir else default_baselines_root_dir if "SCREAM_FAKE_AUTO" in os.environ: self._baseline_dir = self._baseline_dir / "fake" else: self._baseline_dir = Path(self._baseline_dir).absolute() # Make sure the baseline folders exist (but do not purge content if they exist) self.create_tests_dirs(self._baseline_dir, False) print("Checking baselines directory: {}".format(self._baseline_dir)) self.baselines_are_present() if self._update_expired_baselines: self.baselines_are_expired() ############################################ # Deduce compilers if needed/possible # ############################################ if self._cxx_compiler is None: self._cxx_compiler = get_mach_cxx_compiler(self._machine) if self._f90_compiler is None: self._f90_compiler = get_mach_f90_compiler(self._machine) if self._c_compiler is None: self._c_compiler = get_mach_c_compiler(self._machine) if not self._dry_run: self._f90_compiler = run_cmd_no_fail("which {}".format( self._f90_compiler)) self._cxx_compiler = run_cmd_no_fail("which {}".format( self._cxx_compiler)) self._c_compiler = run_cmd_no_fail("which {}".format( self._c_compiler))
def __init__(self, cxx_compiler=None, f90_compiler=None, c_compiler=None, submit=False, parallel=False, fast_fail=False, baseline_ref=None, baseline_dir=None, machine=None, no_tests=False, keep_tree=False, custom_cmake_opts=(), custom_env_vars=(), preserve_env=False, tests=(), integration_test="JENKINS_HOME" in os.environ, local=False, root_dir=None, work_dir=None, quick_rerun=False,quick_rerun_failed=False,dry_run=False, make_parallel_level=0, ctest_parallel_level=0): ########################################################################### self._cxx_compiler = cxx_compiler self._f90_compiler = f90_compiler self._c_compiler = c_compiler self._submit = submit self._parallel = parallel self._fast_fail = fast_fail self._baseline_ref = baseline_ref self._machine = machine self._local = local self._perform_tests = not no_tests self._keep_tree = keep_tree self._baseline_dir = baseline_dir self._custom_cmake_opts = custom_cmake_opts self._custom_env_vars = custom_env_vars self._preserve_env = preserve_env self._tests = tests self._root_dir = root_dir self._work_dir = work_dir self._integration_test = integration_test self._quick_rerun = quick_rerun self._quick_rerun_failed = quick_rerun_failed self._dry_run = dry_run self._must_generate_baselines = False if self._quick_rerun_failed: self._quick_rerun = True ############################################ # Sanity checks and helper structs setup # ############################################ # Probe machine if none was specified if self._machine is None: # We could potentially integrate more with CIME here to do actual # nodename probing. if "CIME_MACHINE" in os.environ and is_machine_supported(os.environ["CIME_MACHINE"]): self._machine = os.environ["CIME_MACHINE"] else: expect(self._local, "test-all-scream requires either the machine arg (-m $machine) or the -l flag," "which makes it lookf for machine specs in '~/.cime/scream_mach_specs.py'.") self._machine = "local" else: expect (not self._local, "Specifying a machine while passing '-l,--local' is ambiguous.") ################################################## # Deduce how many testing resources per test # ################################################## if ctest_parallel_level > 0: ctest_max_jobs = ctest_parallel_level print("Note: honoring requested value for ctest parallel level: {}".format(ctest_max_jobs)) elif "CTEST_PARALLEL_LEVEL" in os.environ: ctest_max_jobs = int(os.environ["CTEST_PARALLEL_LEVEL"]) print("Note: honoring environment value for ctest parallel level: {}".format(ctest_max_jobs)) else: ctest_max_jobs = get_mach_testing_resources(self._machine) print("Note: no value passed for --ctest-parallel-level. Using the default for this machine: {}".format(ctest_max_jobs)) # Unless the user claims to know what he/she is doing, we setup the env. if not self._preserve_env: # Setup the env on this machine setup_mach_env(self._machine, ctest_j=ctest_max_jobs) # Compute root dir if not self._root_dir: self._root_dir = pathlib.Path(__file__).resolve().parent.parent else: self._root_dir = pathlib.Path(self._root_dir).resolve() expect(self._root_dir.is_dir() and self._root_dir.parts()[-2:] == ('scream', 'components'), "Bad root-dir '{}', should be: $scream_repo/components/scream".format(self._root_dir)) if self._work_dir is not None: expect(pathlib.Path(self._work_dir).absolute().is_dir(), "Error! Work directory '{}' does not exist.".format(self._work_dir)) else: self._work_dir = self._root_dir.absolute().joinpath("ctest-build") expect (not self._baseline_dir or self._work_dir != self._baseline_dir, "Error! For your safety, do NOT use '{}' to store baselines. Move them to a different directory (even a subdirectory of that works).".format(self._work_dir)) expect(not (self._baseline_ref and self._baseline_dir), "Makes no sense to specify a baseline generation commit if using pre-existing baselines ") self._tests_cmake_args = { "dbg" : [("CMAKE_BUILD_TYPE", "Debug"), ("EKAT_DEFAULT_BFB", "True")], "sp" : [("CMAKE_BUILD_TYPE", "Debug"), ("SCREAM_DOUBLE_PRECISION", "False"), ("EKAT_DEFAULT_BFB", "True")], "fpe" : [("CMAKE_BUILD_TYPE", "Debug"), ("SCREAM_PACK_SIZE", "1"), ("SCREAM_SMALL_PACK_SIZE", "1"), ("EKAT_DEFAULT_BFB", "True")], "opt" : [("CMAKE_BUILD_TYPE", "Release")], "valg" : [("CMAKE_BUILD_TYPE", "Debug"), ("EKAT_ENABLE_VALGRIND", "True")], } self._test_full_names = OrderedDict([ ("dbg" , "full_debug"), ("sp" , "full_sp_debug"), ("fpe" , "debug_nopack_fpe"), ("opt" , "release"), ("valg" , "valgrind"), ]) if not self._tests: # default to all test types except do not do fpe on CUDA self._tests = list(self._test_full_names.keys()) self._tests.remove("valg") # don't want this on by default if is_cuda_machine(self._machine): self._tests.remove("fpe") else: for t in self._tests: expect(t in self._test_full_names, "Requested test '{}' is not supported by test-all-scream, please choose from: {}".\ format(t, ", ".join(self._test_full_names.keys()))) os.chdir(str(self._root_dir)) # needed, or else every git command will need repo=root_dir expect(get_current_commit(), "Root dir: {}, does not appear to be a git repo".format(self._root_dir)) self._original_branch = get_current_branch() self._original_commit = get_current_commit() print_last_commit(git_ref=self._original_branch, dry_run=self._dry_run) ############################################ # Deduce compilers if needed/possible # ############################################ if self._cxx_compiler is None: self._cxx_compiler = get_mach_cxx_compiler(self._machine) if self._f90_compiler is None: self._f90_compiler = get_mach_f90_compiler(self._machine) if self._c_compiler is None: self._c_compiler = get_mach_c_compiler(self._machine) if not self._dry_run: self._f90_compiler = run_cmd_no_fail("which {}".format(self._f90_compiler)) self._cxx_compiler = run_cmd_no_fail("which {}".format(self._cxx_compiler)) self._c_compiler = run_cmd_no_fail("which {}".format(self._c_compiler)) ################################### # Compute baseline info # ################################### default_baselines_root_dir = pathlib.Path(self._work_dir,"baselines") if self._baseline_dir is None: if self._baseline_ref is None: # Compute baseline ref if self._keep_tree: self._baseline_ref = "HEAD" elif self._integration_test: # Make sure our copy of origin/master is up-to-date (at least at the time of this script's execution) git_fetch_remote("origin") self._baseline_ref = "origin/master" merge_git_ref(git_ref="origin/master", verbose=True, dry_run=self._dry_run) else: self._baseline_ref = get_common_ancestor("origin/master") # Prefer a symbolic ref if possible if self._baseline_ref is None or self._baseline_ref == get_current_commit(commit="origin/master"): self._baseline_ref = "origin/master" self._must_generate_baselines = True self._baseline_dir = pathlib.Path(default_baselines_root_dir).absolute() else: # We treat the "AUTO" string as a request for automatic baseline dir. if self._baseline_dir == "AUTO": self._baseline_dir = get_mach_baseline_root_dir(self._machine) self._baseline_dir = pathlib.Path(self._baseline_dir).absolute() # Make sure the baseline root directory exists expect(self._baseline_dir.is_dir(), "Baseline_dir {} is not a dir".format(self._baseline_dir)) if self._integration_test: self._baseline_ref = "origin/master" merge_git_ref(git_ref=self._baseline_ref, verbose=True, dry_run=self._dry_run) else: for test in self._tests: test_baseline_dir = self.get_preexisting_baseline(test) expect(test_baseline_dir.is_dir(), "Missing baseline {}".format(test_baseline_dir)) # Name of the file used to store/check the git sha of the repo used to generate baselines, # and name of the file used to store/check the builds for which baselines are available # Store it once to avoid typos-like bugs self._baseline_sha_file = pathlib.Path(self._baseline_dir, "baseline_git_sha") self._baseline_names_file = pathlib.Path(self._baseline_dir, "baseline_names") if self._integration_test: master_sha = get_current_commit(commit=self._baseline_ref) if not self.baselines_are_present(): print ("Some baselines were not found. Rebuilding them.") self._must_generate_baselines = True elif self.baselines_are_expired(expected_baseline_sha=master_sha): print ("Baselines expired. Rebuilding them.") self._must_generate_baselines = True else: print ("Baselines found and not expired. Skipping baselines generation.") if self._must_generate_baselines: print("Using commit {} to generate baselines".format(self._baseline_ref)) self._testing_res_count = { "dbg" : ctest_max_jobs, "sp" : ctest_max_jobs, "fpe" : ctest_max_jobs, "opt" : ctest_max_jobs, "valg" : ctest_max_jobs, } # Deduce how many compilation resources per test if make_parallel_level > 0: make_max_jobs = make_parallel_level print("Note: honoring requested value for make parallel level: {}".format(make_max_jobs)) else: make_max_jobs = get_mach_compilation_resources(self._machine) print("Note: no value passed for --make-parallel-level. Using the default for this machine: {}".format(make_max_jobs)) self._compile_res_count = { "dbg" : make_max_jobs, "sp" : make_max_jobs, "fpe" : make_max_jobs, "opt" : make_max_jobs, "valg" : make_max_jobs, } if self._parallel: # We need to be aware that other builds may be running too. # (Do not oversubscribe the machine) make_remainder = make_max_jobs % len(self._tests) make_count = make_max_jobs // len(self._tests) ctest_remainder = ctest_max_jobs % len(self._tests) ctest_count = ctest_max_jobs // len(self._tests) # In case we have more items in self._tests than cores/gpus (unlikely) if make_count == 0: make_count = 1 if ctest_count == 0: ctest_count = 1 for test in self._tests: self._compile_res_count[test] = make_count if self._tests.index(test)<make_remainder: self._compile_res_count[test] = make_count + 1 self._testing_res_count[test] = ctest_count if self._tests.index(test)<ctest_remainder: self._testing_res_count[test] = ctest_count + 1 print("test {} can use {} jobs to compile, and {} jobs for testing".format(test,self._compile_res_count[test],self._testing_res_count[test])) if self._keep_tree: expect(not self._integration_test, "Should not be doing keep-tree with integration testing") print("WARNING! You have uncommitted changes in your repo.", " The PASS/FAIL status may depend on these changes", " so if you want to keep them, don't forget to create a commit.",sep="\n") if self._baseline_dir is None: # Make sure the baseline ref is HEAD expect(self._baseline_ref == "HEAD", "The option --keep-tree is only available when testing against pre-built baselines " "(--baseline-dir) or HEAD (-b HEAD)") else: expect(self._dry_run or is_repo_clean(), "Repo must be clean before running. If testing against HEAD or pre-built baselines, " "you can pass `--keep-tree` to allow non-clean repo.")