def cleanup(): """Make sure BOUT++ directory is clean and submodules correct """ shell("make distclean") shell_safe(r'find src -type f -name "*\.o" -delete') shutil.rmtree("googletest", ignore_errors=True) shutil.rmtree("externalpackages/googletest", ignore_errors=True) shutil.rmtree("externalpackages/mpark.variant", ignore_errors=True) shell_safe("git submodule update --init --recursive")
def test_equilib_1d(retest=False, np=1): global tests,testcommand,retestOption start_time = time.monotonic() testcommand = testcommand + " " + str(np) numTests = 0 numFailures = 0 failedTests = [] currentDir = os.getcwd() if retest: print("Rechecking results - NOT running test examples") testcommand = testcommand+retestOption for testdir in tests: os.chdir(currentDir) os.chdir(testdir) s,out = shell(testcommand,pipe=False) numTests = numTests+1 if s != 0: numFailures = numFailures+1 failedTests.append(testdir) print("") if numFailures == 0: print("All tests passed") else: print(str(numFailures)+"/"+str(numTests)+" failed.") print("Failed tests:") for name in failedTests: print(" "+name) end_time = time.monotonic() print ("Tests took "+str(timedelta(seconds=end_time-start_time)))
def run_test(): print("Running simulation") # Delete all output files before starting for filename in equilibFiles + outputFiles: if os.path.isfile(filename): os.remove(filename) s, out = shell(createbgscript + " > createbg.log") if s is not 0: raise ValueError("Failed to run " + createbgscript)
def test_git_hash(self): # Try to get git hash directly instead of through versioneer's get_versions() from boututils.run_wrapper import shell, shell_safe # check if git exists retval, git_version = shell("git --version") if retval == 0: # git exists from pathlib import Path from hypnotoad.__init__ import __file__ as hypnotoad_init_file hypnotoad_path = Path(hypnotoad_init_file).parent # check if hypnotoad is in it's own git repo. hypnotoad.__init__.py # should be in the hypnotoad/ subdirectory of the git repo if it is. # So the parent directory of hypnotoad_path should contain a '.git' # directory if hypnotoad is in a git repo if hypnotoad_path.parent.joinpath(".git").is_dir(): retval, git_hash = shell_safe( "cd " + str(hypnotoad_path) + '&& git describe --always --abbrev=0 --dirty --match "NOT A TAG"', pipe=True, ) git_hash = git_hash.strip() # found a git hash, check it is consistent with the one from versioneer dirty_pos = git_hash.find("-dirty") if dirty_pos != -1: # Check versioneer says the repo is dirty assert get_versions()["dirty"] # remove "-dirty" from git_hash git_hash = git_hash[:dirty_pos] else: # repo is clean assert not get_versions()["dirty"] assert git_hash == get_versions()["full-revisionid"] elif retval == 127: # git not installed pass else: raise RuntimeError( "'git --version' failed with {retval}. Output was {git_version}" )
def __init__(self, path=None, verbose=False): selflocation = os.path.realpath(__file__) selflocation = selflocation.rsplit("/", 1)[0] if path is None: path = selflocation # Get list of files in subdirectory, excluding common temporaries, # hidden files, and python .py and .pyc files requirements_list = [ x for x in os.listdir(path) if not (("#" in x) or ("~" in x) or (x[0] == ".") or (".py" in x)) ] if verbose: print("======= Requirement checks ========") self._verbose = verbose self._requirements = {} for requirement in requirements_list: status, out = shell(os.path.join(path, requirement), pipe=True) self.add(requirement, (status == 0)) with open(selflocation + "/../../bin/bout-config") as configfile: config = configfile.read() matches = re.findall("^has_(.*)=(.*)", config, re.MULTILINE) for match in matches: key = match[0] value = match[1] yesno = {'"yes"': True, '"no"': False} try: value = yesno[value] except KeyError: print("Error parsing " + match + " - %s is not \"yes\"/\"no\"" % match[1]) else: self.add(key, value)
def __init__(self, path=None, verbose=False): selflocation = os.path.realpath(__file__) selflocation = selflocation.rsplit("/", 1)[0] if path is None: path = selflocation # Get list of files in subdirectory, excluding common temporaries, # hidden files, and python .py and .pyc files requirements_list = [x for x in os.listdir(path) if not (("#" in x) or ("~" in x) or (x[0] == ".") or (".py" in x))] if verbose: print("======= Requirement checks ========") self._verbose = verbose self._requirements = {} for requirement in requirements_list: status, out = shell(os.path.join(path, requirement), pipe=True) self.add(requirement, (status == 0)) with open(selflocation+"/../../bin/bout-config") as configfile: config = configfile.read() matches = re.findall("^has_(.*)=(.*)", config, re.MULTILINE) for match in matches: key = match[0] value = match[1] yesno = {'"yes"': True, '"no"': False} try: value = yesno[value] except KeyError: print("Error parsing "+match + " - %s is not \"yes\"/\"no\"" % match[1]) else: self.add(key, value)
# Import the grid file grid = file_import("uedge.grd_std.cdl") code = 0 # Return code for zeff in zlist: # Create the input file, setting Zeff # If we get passed Staggered or something like this, use staggered config file inp='BOUT_stag.inp' if 'stag' in [i.lower()[:4] for i in argv] else 'BOUT.inp' shell_safe("sed 's/Zeff = 128.0/Zeff = "+str(zeff)+"/g' "+inp+" > data/BOUT.inp") timestep = 5e3 if zeff < 128: # reduce time-step. At large times these cases produce noise timestep = 1e3 # Delete old output files shell("rm data/BOUT.dmp.*.nc") print("Running drift instability test, zeff = ", zeff) # Run the case s, out = launch_safe("./2fluid timestep="+str(timestep), runcmd=MPIRUN, nproc=nproc, mthread=nthreads, pipe=True) f = open("run.log."+str(zeff), "w") f.write(out) f.close() # Collect data Ni = collect("Ni", path="data", xind=2, yind=20, info=False) phi = collect("phi", path="data", xind=2, yind=20, info=False) zmax = collect("ZMAX", path="data", info=False) rho_s = collect("rho_s", path="data", info=False)
def test_filament_3d(numProcs, retest=False): global tests, testcommand, retestOption start_time = time.monotonic() numTests = 0 numFailures = 0 failedTests = [] currentDir = os.getcwd() testcommand = testcommand + " " + str(numProcs) if retest: print("Rechecking results - NOT running test examples") testcommand = testcommand + retestOption warnings = [] for test in tests: testdir = test.name os.chdir(currentDir) os.chdir(testdir) test_start = time.monotonic() s, out = shell(testcommand, pipe=False) test_time = timedelta(seconds=time.monotonic() - test_start) numTests = numTests + 1 if s != 0: numFailures = numFailures + 1 failedTests.append(testdir) this_warning = None if test_time - test.runtime > time_tolerance: this_warning = testdir + ' took ' + str( test_time) + '. This is longer than the expected ' + str( test.runtime) + '.' elif test.runtime - test_time > time_tolerance: this_warning = testdir + ' took ' + str( test_time) + '. This is faster than the expected ' + str( test.runtime) + '.' if this_warning is not None: print(this_warning, flush=True) warnings.append(this_warning) print("", flush=True) if numFailures == 0: print("All " + str(numTests) + " tests passed") else: print(str(numFailures) + "/" + str(numTests) + " failed.") print("Failed tests:") for name in failedTests: print(" " + name) if warnings: for warning in warnings: print(warning) print( 'Expected times are from running on 1 node (48 cores) on the A3 (SKL) partition of Marconi, with optimized configuration of BOUT++. If a test is slower for the same case, check for performance regression. If it is faster, the expected time may need updating to account for improved performance.' ) end_time = time.monotonic() print("Tests took " + str(timedelta(seconds=end_time - start_time)))
# Normalisation Lbar = 1. Bbar = 1. J0 = - J0 * shape.Bxy / (MU0 * Lbar) # Turn into A/m^2 P0 = P0 * Bbar**2 / (2.0*MU0) # Pascals shape.add(P0, "pressure") shape.add(J0, "Jpar0") shape.add(bxcvz, "bxcvz") for nx in nxlist: # Generate a new mesh file filename = "grid%d.nc" % nx if isfile(filename): print("Grid file '%s' already exists" % filename) else: print("Creating grid file '%s'" % filename) f = DataFile(filename, create=True) shape.write(nx,nx, f) f.close() # Generate BOUT.inp file directory = "grid%d" % nx shell("mkdir " + directory) shell("cp data/BOUT.inp "+directory) shell("sed -i 's/MZ = 17/MZ = %d/g' %s/BOUT.inp" % (nx, directory)) shell("sed -i 's/grid = \"grid16.nc\"/grid = \"%s\"/g' %s/BOUT.inp" % (filename, directory))
from boututils.run_wrapper import shell from glob import glob from pathlib import Path from sys import exit tests_dir = str(Path(__file__).parent) test_dirs = list(glob(tests_dir + "/*/")) # no test in grid_files directory skip_dirs = ["grid_files", "__pycache__"] test_dirs = [d for d in test_dirs if not any(x in d for x in skip_dirs)] results = [] for d in test_dirs: name = str(Path(d).name) print("\nrunning", name, "...\n", flush=True) retcode, _ = shell(d + "/runtest.py") results.append((name, retcode)) for r in results: if r[1] == 0: print(r[0], "passed") else: print(r[0], "failed") if all([r[1] == 0 for r in results]): exit(0) else: exit(1)
def main(): parser = argparse.ArgumentParser( description="git bisect script for performance regression") parser.add_argument("--nout", type=int, default=100, help="Number of timesteps") parser.add_argument("--no-clean", action="store_false", dest="clean", help="Don't clean library") parser.add_argument( "--no-configure", action="store_false", dest="configure", help="Don't configure library", ) parser.add_argument("--no-make", action="store_false", dest="make", help="Don't build library") parser.add_argument("--no-write", action="store_false", dest="write", help="Don't write to file") parser.add_argument("--just-run", action="store_true", help="Don't cleanup/configure/build/write") parser.add_argument("--repeat", type=int, default=5, help="Number of repeat runs") parser.add_argument("--good", default=None, help="Time for 'good' run") parser.add_argument("--bad", default=None, help="Time for 'bad' run") parser.add_argument("--path", default=DEFAULT_MODEL_PATH, help="Path to model") parser.add_argument("--model", default=DEFAULT_MODEL_EXE, help="Model executable") parser.add_argument("--log-dir", default="logs", help="Backup log file directory") parser.add_argument("--script", default=None, help="Other script to run to determine good/bad") # How to keep in sync with dict `metrics` below? metric_choices = [ "runtime-low", "runtime-mean", "inv_per_rhs", "time_per_rhs" ] parser.add_argument( "--metric", choices=metric_choices, default="runtime-low", help="What metric to use", ) args = parser.parse_args() if (args.good is None) ^ (args.bad is None): raise RuntimeError( "You must supply either both of good and bad, or neither") if args.just_run: args.clean = args.configure = args.make = args.write = False git = git_info() log_dir = os.path.join(args.log_dir, git["commit"]) try: if args.clean: cleanup() if args.configure: configure_bout() if args.make: build_bout() runtime = runtest( args.nout, repeat=args.repeat, log_dir=log_dir, path=args.path, model=args.model, ) except RuntimeError: exit(GIT_SKIP_COMMIT_EXIT_CODE) if args.script is not None: # If we're running an external script, use that instead of # computing our own metric status, result = shell(args.script, pipe=True) log_line = '{commit}, {date}, "{result}", {dir}\n'.format( result=result.rstrip("\n"), dir=log_dir, **git) print(log_line) if args.write: with open("bisect_script_log", "a") as f: f.write(log_line) exit(status) timings = "{commit}, {date}, {mean}, {std}, {low}, {dir}\n".format( **git, **runtime, dir=log_dir) print(timings) if args.write: with open("bisect_timings", "a") as f: f.write(timings) if args.good is not None: invs_per_rhs = 0.0 times_per_rhs = 0.0 if not args.metric.startswith("runtime"): runs = [ os.path.join(log_dir, "run{:02d}".format(run)) for run in range(args.repeat) ] dfs = { run: read_timings_from_logfile(args.nout, directory=run) for run in runs } invs_per_rhs = [ average_per_rhs(df, "Inv (absolute)") for df in dfs.values() ] times_per_rhs = [time_per_rhs(df) for df in dfs.values()] metrics = { "runtime-low": { "metric": runtime["low"], "std": runtime["std"] }, "runtime-mean": { "metric": runtime["mean"], "std": runtime["std"] }, "inv_per_rhs": { "metric": np.min(invs_per_rhs), "std": np.std(invs_per_rhs), }, "time_per_rhs": { "metric": np.min(times_per_rhs), "std": np.std(times_per_rhs), }, } if metric_is_good( good=float(args.good), bad=float(args.bad), metric=metrics[args.metric]["metric"], metric_std=metrics[args.metric]["std"], ): exit(0) else: exit(1)