def reload_test_suite(status_list): cprint("Reading previous tests from pickle file", "yellow") with open(".prev_run.pickle", "rb") as fh: test_suite = pickle.load(fh) print("Selecting tests with status in %s" % str(status_list)) test_list = [t for t in test_suite if t.status in status_list] return AbinitTestSuite(test_suite.abenv, test_list=test_list)
def _run(ctx, input_name, exec_name, run_make): """"Execute `exec_name input_name`""" if run_make: make(ctx) top = find_top_build_tree(".", with_abinit=True) binpath = os.path.join(top, "src", "98_main", exec_name) cprint(f"Using binpath: {binpath}", "green") cmd = f"{binpath} {input_name}" cprint(f"Executing {cmd}", color="green") ctx.run(cmd, pty=True)
def links(ctx): """ Create symbolic links to Abinit executables in current working directory. """ top = find_top_build_tree(".", with_abinit=True) main98 = os.path.join(top, "src", "98_main") for dest in ALL_BINARIES: if os.path.islink(os.path.join(os.getcwd(), dest)): continue source = os.path.join(main98, dest) if os.path.isfile(source): os.symlink(source, dest) else: cprint("Cannot find `%s` in dir `%s" % (source, main98), "yellow")
def on_modified(event): print(f"hey buddy, {event.src_path} has been modified") cmd = "make -j%d > >(tee -a make.log) 2> >(tee -a make.stderr >&2)" % jobs cprint("Executing: %s" % cmd, color="yellow") with cd(top): try: result = ctx.run(cmd, pty=True) if result.ok: cprint("Make completed successfully", color="green") cprint("Watching for changes ...", color="green") except Exception: cprint(f"Make returned non-zero exit status", color="red") cprint( f"Keep on watching for changes hoping you get it right ...", color="red")
def robodoc(ctx): with cd(ABINIT_ROOTDIR): result = ctx.run("./mkrobodoc.sh", pty=True) if result.ok: cprint("ROBODOC BUILD OK", "green") # https://stackoverflow.com/questions/44447469/cannot-open-an-html-file-from-python-in-a-web-browser-notepad-opens-instead html_path = os.path.join( ABINIT_ROOTDIR, "./tmp-robodoc/www/robodoc/masterindex.html") print("Trying to open %s in browser ..." % html_path) return webbrowser.open_new_tab(html_path) else: cprint("ROBODOC BUILD FAILED", "red") return result.ok
def panel(options): """ Open GUI in web browser, requires panel package """ try: import panel # noqa: F401 except ImportError as exc: cprint("Use `conda install panel` or `pip install panel` to install the python package.", "red") raise exc import matplotlib matplotlib.use("Agg") for memfile in options.memfiles: memfile.get_panel().show() return 0
def get_database(self, regenerate=False, with_pickle=False): """ Return an instance of TestsDatabase initialized from an external pickle file. Args: regenerate: True to force the regeneration of the database and the writing of a new pickle file. with_pickle: Save the generated database in pickle format. """ if regenerate or not os.path.exists(database_path): cprint("Regenerating database...", "yellow") database = self.build_database() # Save the database in the cpickle file. # Use file locking mechanism to prevent IO from other processes. if with_pickle: print("Saving database to %s" % database_path) lock = FileLock(database_path) lock.acquire() with open(database_path, "wb") as fh: pickle.dump(database, fh, protocol=-1) lock.release() else: cprint("Loading database from: %s" % database_path, "yellow") # Read the database from the cpickle file. # Use file locking mechanism to prevent IO from other processes. lock = FileLock(database_path) lock.acquire() with open(database_path, "rb") as fh: database = pickle.load(fh) lock.release() return database
def make(ctx, jobs="auto", touch=False, clean=False, binary=""): """ Touch all modified files and recompile the code Args: jobs: Use `jobs` threads for make -jNUM touch: Touch all changed files clean: Issue `make clean` before `make`. binary: Binary to recompile, default: all """ if touch: with cd(ABINIT_ROOTDIR): cmd = "./abisrc.py touch" cprint("Executing: %s" % cmd, "yellow") result = ctx.run(cmd, pty=True) if not result.ok: cprint("`%s` failed. Aborting now!" % cmd, "red") return 1 top = find_top_build_tree(".", with_abinit=False) jobs = max(1, number_of_cpus() // 2) if jobs == "auto" else int(jobs) with cd(top): if clean: ctx.run("cd src && make clean && cd ..", pty=True) ctx.run("cd shared && make clean && cd ..", pty=True) cmd = "make -j%d %s > >(tee -a make.log) 2> >(tee -a make.stderr >&2)" % ( jobs, binary) cprint("Executing: %s" % cmd, "yellow") results = ctx.run(cmd, pty=True)
def lldb(ctx, input_name, exec_name="abinit", run_make=False): """ Execute `lldb` debugger with the given `input_name`. """ if run_make: make(ctx) top = find_top_build_tree(".", with_abinit=True) binpath = os.path.join(top, "src", "98_main", exec_name) cprint(f"Using binpath: {binpath}", "green") cmd = f"lldb {binpath} --one-line 'settings set target.run-args {input_name}'" cprint(f"Executing lldb command: {cmd}", color="green") cprint("Type run to start lldb debugger", color="green") cprint("Then use `bt` to get the backtrace\n\n", color="green") ctx.run(cmd, pty=True)
def abichecks(ctx): """Execute (some of the) abichecks scripts.""" import time retcode = 0 with cd(ABINIT_ROOTDIR): script_dir = os.path.join("abichecks", "scripts") exclude = [ "check-libpaw.py", "warningschk.py", "abirules_tools.py", "__init__.py" ] for py_script in [ f for f in os.listdir(script_dir) if f.endswith(".py") ]: if py_script in exclude: continue py_script = os.path.join(script_dir, py_script) print("Running", py_script, "... ") start = time.time() result = ctx.run(py_script, warn=True, pty=True) #print(result.ok) msg, color = ("[OK]", "green") if result.ok else ("[FAILED]", "red") cprint("%s (%.2f s)" % (msg, time.time() - start), color=color) if not result.ok: retcode += 1 if retcode != 0: cprint("%d FAILED TESTS" % retcode, "red") else: cprint("ALL TESTS OK", "green") return retcode
def runemall(ctx, make=True, jobs="auto", touch=False, clean=False, keywords=None): """Run all tests (sequential and parallel). Exit immediately if errors""" make(ctx, jobs=jobs, touch=touch, clean=clean) top = find_top_build_tree(".", with_abinit=True) jobs = max(1, number_of_cpus() // 2) if jobs == "auto" else int(jobs) kws = "" if keywords is None else "-k %s" % keywords with cd(os.path.join(top, "tests")): cmd = "./runtests.py -j%d %s" % (jobs, kws) cprint("Executing: %s" % cmd, "yellow") ctx.run(cmd, pty=True) # Now run the parallel tests. for n in [2, 4, 10]: j = jobs // n if j == 0: continue cmd = "./runtests.py paral mpiio -j%d -n%d %s" % (j, n, kws) cprint("Executing: %s" % cmd, "yellow") ctx.run(cmd, pty=True)
def make(ctx, jobs="auto", clean=False): """ Touch all modified files and recompile the code with -jNUM. """ with cd(ABINIT_SRCDIR): cmd = "./abisrc.py touch" cprint("Executing: %s" % cmd, "yellow") result = ctx.run(cmd, pty=True) if not result.ok: cprint("`%s` failed. Aborting now!" % cmd, "red") return 1 top = find_top_build_tree(".", with_abinit=False) jobs = max(1, number_of_cpus() // 2) if jobs == "auto" else int(jobs) with cd(top): if clean: ctx.run("make clean", pty=True) #cmd = "make -j%d > make.log 2> make.stderr" % jobs cmd = "make -j%d > >(tee -a make.log) 2> >(tee -a make.stderr >&2)" % jobs cprint("Executing: %s" % cmd, "yellow") ctx.run(cmd, pty=True)
def main(): usage = "usage: %prog [suite_args] [options]. Use [-h|--help] for help." version = "%prog " + str(__version__) class MyOptionParser(OptionParser): def print_help(self): OptionParser.print_help(self) print("\n" + str_examples()) parser = MyOptionParser(usage=usage, version=version) #parser.add_argument('-v', '--version', action='version', version="%(prog)s version " + __version__) parser.add_option('--no-colors', default=False, action="store_true", help='Disable ASCII colors') parser.add_option('--no-logo', default=False, action="store_true", help='Disable Abinit logo') parser.add_option("-c", "--cfg_file", dest="cfg_fname", type="string", help="Read options from configuration FILE.", metavar="FILE") parser.add_option( "--force-mpirun", default=False, action="store_true", help= "Force execution via mpiruner even for sequential jobs, i.e. np==1, defaults to False" ) parser.add_option("--mpi-args", type="string", help="Options passed to mpirun.", default="") parser.add_option( "--use-mpiexec", default=False, action="store_true", help="Replace mpirun with mpiexec (ignored if `-c` option is provided)" ) parser.add_option( "--use-srun", default=False, action="store_true", help="Use Slurm `srun` to run parallel jobs (ignored if -c is provided)" ) parser.add_option("-n", "--num-mpi-processors", dest="mpi_nprocs", type="int", default=1, help="Maximum number of MPI processes used for tests.") parser.add_option( "-i", "--input-vars", dest="input_vars", type="string", default="", help= ("String with the variables (and values) that should be present in the input file. " "Format: 'name1 value1, name2 value2, name3' " "If value is not given, a wild card is assumed. " "Example: -i 'optdriver 3, getden' will execute only those tests where the " "input file contains optdriver with value 3, and the variable getden " "(irrespectively of its value).")) parser.add_option("-j", "--jobs", dest="py_nprocs", type="int", default=1, help="Number of python processes.") parser.add_option( "--use-cache", default=False, action="store_true", help= ("Load database from pickle file." "WARNING: This could lead to unexpected behaviour if the pickle database " "is non up-to-date with the tests available in the active git branch." )) parser.add_option("-k", "--keywords", dest="keys", default=[], action="callback", callback=vararg_callback, help="Run the tests containing these keywords.") parser.add_option("-a", "--authors", dest="authors", default=[], action="callback", callback=vararg_callback, help="Run the tests contributed by these developers.") parser.add_option( "-t", "--timeout", dest="timeout_time", type="int", default=900, help= "Timeout value for Fortran executables (in seconds). -t 0 disables the timeout" ) parser.add_option( "-b", "--build-tree", dest="build_dir_path", default="", help="Path to the top level directory of the build tree.") parser.add_option("-d", "--dry-run", default=False, action="store_true", help="Print list of tests and exit") parser.add_option( "--gdb", action="store_true", help= ("Run the test(s) under the control of the GNU gdb debugger. " "Support both sequential and MPI executions. In the case of MPI runs, " "the script will open multiple instances of xterm " "(it may not work depending of your architecture).")) parser.add_option("--nag", action="store_true", help="Activate NAG mode. Option used by developers") parser.add_option("--perf", default="", help="Use `perf` command to profile the test") parser.add_option( "--abimem", action="store_true", default=False, help=("Inspect abimem.mocc files produced by the tests. " "Requires HAVE_MEM_PROFILE and call abimem_init(2) in main.")) parser.add_option( "--etsf", action="store_true", default=False, help="Validate netcdf files produced by the tests. Requires netcdf4") parser.add_option( "--touch", default="", help= ("Used in conjunction with `-m`." "Touch the source files containing the given expression(s) before recompiling the code. " "Use comma-separated strings *without* empty spaces to specify more than one pattern." )) parser.add_option( "-s", "--show-info", dest="show_info", default=False, action="store_true", help= "Show information on the test suite (keywords, authors ...) and exit") parser.add_option( "-l", "--list-tests-info", dest="list_info", default=False, action="store_true", help= "List the tests in test suite (echo description section in ListOfFile files) and exit" ) parser.add_option( "-m", "--make", dest="make", type="int", default=0, help= "Find the abinit build tree, and compile to code with 'make -j#NUM' before running the tests." ) parser.add_option( "-w", "--workdir", dest="workdir", type="string", default="", help="Directory where the test suite results will be produced.") parser.add_option( "-o", "--omp_num-threads", dest="omp_nthreads", type="int", default=0, help= "Number of OMP threads to use (set the value of the env variable OMP_NUM_THREADS.\n" + "Not compatible with -c. Use the cfg file to specify the OpenMP runtime variables.\n" ) parser.add_option( "-p", "--patch", dest="patch", type="str", default="", help= ("Patch the reference files of the tests with the status specified by -p." "Diff tool can be specified via $PATCHER e.g. export PATCHER=kdiff3. default: vimdiff." "Examples: `-p failed` to patch the reference files of the failed tests. " "`-p all` to patch all files." "`-p failed+passed` to patch both failed and passed tests or, equivalently, `-p not_succeed`" )) parser.add_option( "--rerun", dest="rerun", type="str", default="", help= "Rerun previous tests. Example: `--rerun failed`. Same syntax as patch option." ) parser.add_option( "--looponfail", default=False, action="store_true", help=("Execute the tests and enter a busy loop that will " "recompile the code upon change in the source files and rerun " "the failing tests. Exit when all tests are OK.")) parser.add_option( "-e", "--edit", dest="edit", type="str", default="", help= ("Edit the input files of the tests with the specified status. Use $EDITOR as editor." "Examples: -i failed to edit the input files of the the failed tests. " "Status can be concatenated by '+' e.g. failed+passed")) parser.add_option( "--stderr", type="str", default="", help= ("Edit the stderr files of the tests with the specified status. Use $EDITOR as editor. " "Examples: --stderr failed will edit the error files of the the failed tests. " "Status can be concatenated by '+' e.g. failed+passed")) parser.add_option( "-v", "--verbose", dest="verbose", action="count", default=0, # -vv --> verbose=2 help='Verbose, can be supplied multiple times to increase verbosity') parser.add_option( "-V", "--valgrind_cmdline", type="str", default="", help=("Run test(s) under the control of valgrind." "Examples: runtests.py -V memcheck or " "runtests.py -V 'memcheck -v' to pass options to valgrind")) parser.add_option( "--Vmem", action="store_true", help="Shortcut to run test(s) under the control of valgrind memcheck:\n" + "Use --leak-check=full --show-reachable=yes --track-origins=yes") parser.add_option("--pedantic", action="store_true", help="Mark test(s) as failed if stderr is not empty.") parser.add_option( "--erase-files", dest="erase_files", type="int", default=2, help=("0 => Keep all files produced by the test\n" + "1 => Remove files but only if the test passed or succeeded.\n" + "2 => Remove files even if the test failed.\n" + "default=2\n")) parser.add_option( "--make-html-diff", dest="make_html_diff", type="int", default=0, help= ("0 => Do not produce diff files in HTML format\n" + "1 => Produce HTML diff but only if the test failed\n" + "2 => Produce HTML diff independently of the final status of the test.\n" + "default=0\n")) parser.add_option( "--sub-timeout", dest="sub_timeout", type="int", default=30, help="Timeout (s) for small subprocesses (fldiff.pl, python functions)" ) parser.add_option( "--with-pickle", type="int", default=1, help="Save test database in pickle format (default: True).") parser.add_option( '--loglevel', default="ERROR", type="str", help= "set the loglevel. Possible values: CRITICAL, ERROR (default), WARNING, INFO, DEBUG" ) # Parse command line. options, suite_args = parser.parse_args() if options.show_info: abitests.show_info() return 0 # loglevel is bound to the string value obtained from the command line argument. # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug import logging numeric_level = getattr(logging, options.loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % options.loglevel) logging.basicConfig(level=numeric_level) if options.no_colors: # Disable colors termcolor.enable(False) if not options.no_logo: nrows, ncols = get_terminal_size() if ncols > 100: cprint(ascii_abinit(), "green") ncpus_detected = max(1, number_of_cpus()) system, node, release, version, machine, processor = platform.uname() mpi_nprocs = options.mpi_nprocs omp_nthreads = options.omp_nthreads py_nprocs = options.py_nprocs cprint("Running on %s -- system %s -- ncpus %s -- Python %s -- %s" % (gethostname(), system, ncpus_detected, platform.python_version(), _my_name), 'green', attrs=['underline']) # Compile the code before running the tests. if options.make: retcode = make_abinit(options.make, touch_patterns=options.touch) if retcode: return retcode # Initialize info on the build. User's option has the precedence. build_dir_path = os.path.curdir if options.build_dir_path: build_dir_path = os.path.abspath(options.build_dir_path) build_env = BuildEnvironment(build_dir_path) timeout_time = options.timeout_time if timeout_time > 0 and build_env.has_bin("timeout"): # Run executables under the control of timeout. timeout_path = build_env.path_of_bin("timeout") #timeout_signal = "" timebomb = TimeBomb(timeout_time, exec_path=timeout_path) else: #print("Cannot find timeout executable at: %s" % build_env.path_of_bin("timeout")) timebomb = TimeBomb(timeout_time) # ------------------------------------------------ # Initialize the jobrunner for the (MPI|seq) mode # ------------------------------------------------ if options.cfg_fname: # read the [mpi] and the [openmp] sections from the external cfg file. assert omp_nthreads == 0 cfg_fname = options.cfg_fname logger.info("Initalizing JobRunner from cnf file: %s" % cfg_fname) runner = JobRunner.fromfile(cfg_fname, timebomb=timebomb) else: if mpi_nprocs == 1 and not (options.force_mpirun or options.use_srun): logger.info("Initalizing JobRunner for sequential runs.") runner = JobRunner.sequential(timebomb=timebomb) else: logger.info( "Initalizing JobRunner assuming generic_mpi. [-c option not provided]" ) # Decide whether we should use mpirun or mpiexec # If `use_mpiexec` is specified on the command line args, use it (user is always right) # else test for the presence of (mpirun, mpiexec) in $PATH, in this order # mpiexec is a MPI standard but we continue to prefer mpirun to # maintain the previous behavior. if options.use_srun: print("initializing srun jobrunner") if options.use_mpiexec: raise ValueError( "use_srun and use_mpiexec are mutually exclusive") if which("srun") is None: raise RuntimeError("Cannot locate srun in $PATH. " "Please check your environment") runner = JobRunner.srun(timebomb=timebomb, mpi_args=options.mpi_args) else: if options.use_mpiexec: use_mpiexec = options.use_mpiexec else: # Use which to select mpirun/mpiexec. use_mpiexec = True if which("mpirun") is not None: use_mpiexec = False elif which("mpiexec") is None: raise RuntimeError( "Cannot locate neither mpirun nor mpiexec in $PATH. " "Please check your environment") runner = JobRunner.generic_mpi(use_mpiexec=use_mpiexec, timebomb=timebomb, mpi_args=options.mpi_args) if omp_nthreads > 0: omp_env = OMPEnvironment(OMP_NUM_THREADS=omp_nthreads) runner.set_ompenv(omp_env) # Valgrind support. if options.valgrind_cmdline: runner.set_valgrind_cmdline(options.valgrind_cmdline) # Valgrind shortcuts. if options.Vmem: runner.set_valgrind_cmdline( "memcheck --leak-check=full --show-reachable=yes --track-origins=yes" ) if runner.has_valgrind: cmd = "valgrind --tool=%s " % runner.valgrind_cmdline cprint("Will invoke valgrind with cmd:\n %s" % cmd, "yellow") # Debugging with GNU gdb if options.gdb: runner.set_debugger("gdb") # Profiling with perf. if options.perf: runner.set_perf_command(options.perf) # Select tests according to the input variables. # Note that the parser is very primitive and it does not # have acces to the default values used by the codes. ivars = None if options.input_vars: ivars = {} string = options.input_vars if "," in string: tokens = string.split(",") else: tokens = [ string, ] for tok in tokens: keyval = tok.split() if len(keyval) == 1: ivars[keyval[0]] = None elif len(keyval) == 2: k, v = keyval[0], int(keyval[1]) ivars[k] = v else: raise ValueError("Don't know how to interpret string: %s" % tok) if options.rerun: # Rerun tests with status given by rerun. test_suite = reload_test_suite(status_list=parse_stats(options.rerun)) else: regenerate = not options.use_cache try: test_suite = abitests.select_tests(suite_args, regenerate=regenerate, keys=options.keys, authors=options.authors, ivars=ivars, with_pickle=options.with_pickle) except Exception as exc: raise show_examples_and_exit(str(exc)) if not test_suite: cprint("No test fulfills the requirements specified by the user!", "red") return 99 workdir = options.workdir if not workdir: workdir = "Test_suite" # Create workdir. if not os.path.exists(workdir): os.mkdir(workdir) else: cprint("%s directory already exists. Files will be removed" % workdir, "yellow") # Run the tested selected by the user. if omp_nthreads == 0: ncpus_used = mpi_nprocs * py_nprocs msg = ("Running %s test(s) with MPI_procs: %s, py_nprocs: %s" % (test_suite.full_length, mpi_nprocs, py_nprocs)) else: ncpus_used = mpi_nprocs * omp_nthreads * py_nprocs msg = ( "Running %s test(s) with MPI_nprocs: %s, OMP_nthreads: %s, py_nprocs: %s" % (test_suite.full_length, mpi_nprocs, omp_nthreads, py_nprocs)) cprint(msg, "yellow") if ncpus_used < 0.3 * ncpus_detected: msg = ( "[TIP] runtests.py is using %s CPUs but your architecture has %s CPUs (including Hyper-Threading if Intel)\n" "You may want to use python processes to speed up the execution\n" "Use `runtests -jNUM` to run with NUM processes" % (ncpus_used, ncpus_detected)) cprint(msg, "blue") elif ncpus_used > 1.5 * ncpus_detected: msg = ( "[OVERLOAD] runtests.py is using %s CPUs but your architecture has only %s CPUs!!\n" % (ncpus_used, ncpus_detected)) cprint(msg, "magenta") if options.list_info: with open("ListOfTests.html", "w") as fh: fh.write(test_suite.make_listoftests(width=160, html=True)) with open("ListOfTests.txt", "w") as fh: fh.write(test_suite.make_listoftests(width=100, html=False)) sys.exit(0) if options.dry_run: print("Dry-run mode, print list of tests and exit(0)") for i, test in enumerate(test_suite): print("%d) %s" % (i, test)) sys.exit(0) # If np > 1, use dynamic runmode. runmode = "static" if mpi_nprocs > 1: runmode = "dynamic" results = test_suite.run_tests(build_env, workdir, runner, nprocs=mpi_nprocs, py_nprocs=py_nprocs, runmode=runmode, erase_files=options.erase_files, make_html_diff=options.make_html_diff, sub_timeout=options.sub_timeout, pedantic=options.pedantic, abimem_check=options.abimem, etsf_check=options.etsf) if results is None: return 99 if options.looponfail: count, max_iterations = 0, 100 cprint( "\n\nEntering looponfail loop with max_iterations %d" % max_iterations, "yellow") abenv.start_watching_sources() while count < max_iterations: count += 1 test_list = [t for t in test_suite if t.status == "failed"] if not test_list: cprint("All tests ok. Exiting looponfail", "green") break else: cprint("%d test(s) are still failing" % len(test_list), "red") changed = abenv.changed_sources() if not changed: sleep_time = 10 cprint( "No change in source files detected. Will sleep for %s seconds..." % sleep_time, "yellow") time.sleep(sleep_time) continue else: print( "Invoking `make` because the following files have been changed:" ) for i, path in enumerate(changed): print("[%d] %s" % (i, os.path.relpath(path))) rc = make_abinit(ncpus_detected) if rc != 0: cprint( "make_abinit returned %s, tests are postponed" % rc, "red") continue test_suite = AbinitTestSuite(test_suite.abenv, test_list=test_list) results = test_suite.run_tests( build_env, workdir, runner, nprocs=mpi_nprocs, py_nprocs=py_nprocs, runmode=runmode, erase_files=options.erase_files, make_html_diff=options.make_html_diff, sub_timeout=options.sub_timeout, pedantic=options.pedantic, abimem_check=options.abimem, etsf_check=options.etsf) if results is None: return 99 if count == max_iterations: cprint("Reached max_iterations", "red") # Threads do not play well with KeyBoardInterrupt #except KeyboardInterrupt: # all_programs = ["abinit", "anaddb", "mrgscr", "mrgddb", "mrgdv", "mpirun", "mpiexec"] # cprint("Interrupt sent by user. Will try to `killall executables` where:") # print("executables:", str(all_programs)) # answer = prompt("Do you want to kill'em all? [Y/n]") # if not answer.lower().strip() in ["n", "no"]: # for prog in all_programs: # os.system("killall %s" % prog) # return 66 # Edit input files. if options.edit: for status in parse_stats(options.edit): print("Editing input files of tests with status %s" % status) results.edit_inputs(status=status) # Edit error files. if options.stderr: for status in parse_stats(options.stderr): print("Opening stderror files of tests with status %s" % status) results.inspect_stderrs(status=status) # Patch reference files. if options.patch: for status in parse_stats(options.patch): cprint("Patching tests with status %s" % status, "yellow") results.patch_refs(status=status) if options.nag: for test in results.failed_tests: for trace in test.get_backtraces(): cprint(trace, "red") trace.edit_source() # Save test_suite after execution so that we can reread it. with open(".prev_run.pickle", "wb") as fh: pickle.dump(test_suite, fh) print("") print("Execution completed.") print("Results in HTML format are available in %s" % (os.path.join(workdir, "suite_report.html"))) try: return results.nfailed except AttributeError: return 99
def watchdog(ctx, jobs="auto", sleep_time=5): """ Start watchdog service to watch F90 files and execute `make` when changes are detected. """ cprint( "Starting watchdog service to watch F90 files and execute `make` when changes are detected", color="green") cprint("Enter <CTRL + C> in the terminal to kill the service.", color="green") cprint(f"Start watching F90 files with sleep_time {sleep_time} s ....", color="green") top = find_top_build_tree(".", with_abinit=True) jobs = max(1, number_of_cpus() // 2) if jobs == "auto" else int(jobs) # http://thepythoncorner.com/dev/how-to-create-a-watchdog-in-python-to-look-for-filesystem-changes/ # https://stackoverflow.com/questions/19991033/generating-multiple-observers-with-python-watchdog import time from watchdog.observers import Observer from watchdog.events import PatternMatchingEventHandler event_handler = PatternMatchingEventHandler(patterns="*.F90", ignore_patterns="", ignore_directories=False, case_sensitive=True) def on_created(event): print(f"hey, {event.src_path} has been created!") def on_deleted(event): print(f"what the f**k! Someone deleted {event.src_path}!") def on_modified(event): print(f"hey buddy, {event.src_path} has been modified") cmd = "make -j%d > >(tee -a make.log) 2> >(tee -a make.stderr >&2)" % jobs cprint("Executing: %s" % cmd, color="yellow") with cd(top): try: result = ctx.run(cmd, pty=True) if result.ok: cprint("Make completed successfully", color="green") cprint("Watching for changes ...", color="green") except Exception: cprint(f"Make returned non-zero exit status", color="red") cprint( f"Keep on watching for changes hoping you get it right ...", color="red") def on_moved(event): print(f"ok ok ok, someone moved {event.src_path} to {event.dest_path}") event_handler.on_created = on_created event_handler.on_deleted = on_deleted event_handler.on_modified = on_modified event_handler.on_moved = on_moved observer = Observer() path = ABINIT_SRCDIR observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(sleep_time) except KeyboardInterrupt: observer.stop() observer.join()
def run(cmd): cprint(f"Executing: `{cmd}`", color="green") ctx.run(cmd)
def main(warno, home_dir=""): debug = 0 if not home_dir: cwd_dir = os.getcwd() if os.path.isabs(sys.argv[0]): home_dir = os.path.normpath( os.path.join(os.path.dirname(sys.argv[0]), "../..")) inp_dir = os.path.join(home_dir, "abichecks/abirules/Input") else: inp_dir = os.path.join("..", "Input") home_dir = os.path.join(cwd_dir, "../../..") else: inp_dir = os.path.join(home_dir, "abichecks", "abirules", "Input") assert os.path.isdir(inp_dir) warno = int(warno) Warning = gnu_warnings[warno][0] Warning_len = len(Warning.split(" ")) src_excluded = gnu_warnings[warno][1] # header print( "**********************************************************************" ) print("Warning pattern : '" + Warning + "'") print( "**********************************************************************" ) makelog = os.path.join(home_dir, "make.log") if not os.path.exists(makelog): raise RuntimeError( "Cannot find `make.log` file in `%s`.\nUse `make -O multi -j8 > make.log 2>&1`" % home_dir) # make.log contains utf-8 characters #import io #logfile = io.open(makelog, "r", encoding="utf-8") logfile = open(makelog) words = [] Buffer = [] linec = 0 warning_count = 0 start = False for line in logfile: linec = linec + 1 if linec > 5: Buffer.pop(0) Buffer.append(line) if start == False: # Examine the make.log file, starting with the section where the directory 10_defs was treated. if line.find("Making all in 10_defs") == -1: continue else: start = True if line.find(Warning) != -1: if debug: print("[DEBUG] Buffer[0]:", Buffer[0]) # source.F90:line.pos: print("[DEBUG] Buffer[2]:", Buffer[2]) # instruction print("[DEBUG] Buffer[1]:", Buffer[1]) # position print("[DEBUG] Buffer[4]:", Buffer[4]) # Warning: msg if True: if debug: print("[DEBUG] len of Buffer[0]:", len(Buffer[0].strip())) if len(Buffer[0].strip()) != 0: source = Buffer[0].split(":")[0] if source.find('Included at'): source = source.split(" ")[-1] sourceline = Buffer[0].split(":")[1] try: sourceline = sourceline.split(".")[0] except IndexError: pass pattern = os.path.join(home_dir, "src") + "/*/" + source path = glob.glob(pattern) assert len(path) < 2 try: source_dir = path[0].split('/') if debug: print("[DEBUG] source_dir :" + source_dir[-2]) if src_excluded.index(source_dir[-2]): pass except IndexError: pass except ValueError: warning_count += 1 try: if warno in [3, 4]: warn_msg = Buffer[4].split(" ")[Warning_len + 1] print(source + ' = line: ' + sourceline + ', var: ' + warn_msg + ' [' + source_dir[-2] + ']') elif warno in [6, 10]: warn_msg = Buffer[4].split(":")[1].rstrip() warn_code = Buffer[2].rstrip() warn_pos = Buffer[3].rstrip() print("%s = line: %s, " % (source, sourceline), end='') cprint("warn: %s" % (warn_msg), "red") cprint( " ->%s\n ->%s" % (warn_code, warn_pos), "red") elif warno in [7]: warn_code = Buffer[2].rstrip().lstrip() print("%s = line: %s, " % (source, sourceline), end='') cprint("code: %s" % (warn_code), "red") elif warno in [20]: a = Buffer[4].split(":")[1].split( " declared")[0] print(source + ' = line: ' + sourceline + ', warn:' + a + ' [' + source_dir[-2] + ']') else: print(source + ' = line: ' + sourceline + ' [' + source_dir[-2] + ']') except IndexError: print(source + ' = line: ' + sourceline + ' [' + source_dir[-2] + ']') else: print( " ***** Can't determine source but warning exists...") if debug: break else: source = Buffer[4].split(":")[0] sourceline = Buffer[4].split(":")[1] pattern = os.path.join(home_dir, "src") + "/*/" + source path = glob.glob(pattern) source_dir = path[0].split('/') if debug: print("[DEBUG] source_dir :" + source_dir[-2]) try: if src_excluded.index(source_dir[-2]): warning_count += 1 print(Buffer[4].strip(), ' [' + source_dir[-2] + ']') except ValueError: pass logfile.close() # footer print( "**********************************************************************" ) print("Warning count = " + str(warning_count)) print( "**********************************************************************" ) return warning_count