def run_programs(bench): "Run generated programs." # This matches argument parsing in the generated main files bench = 'b' if bench else '' # Get a list of all files test_programs = [f for f in os.listdir(".") if f.endswith(".bin")] test_programs.sort() begin("Running generated programs (%d programs found)" % len(test_programs)) # Iterate over all files for f in test_programs: # Compile test code prefix = f.split(".bin")[0] ok = run_command(".%s%s.bin %s" % (os.path.sep, prefix, bench)) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def generate_code(args, only_forms): "Generate code for all test cases." # Get a list of all files form_files = [f for f in os.listdir(".") if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() begin("Generating code (%d form files found)" % len(form_files)) # TODO: Parse additional options from .ufl file? I.e. grep for # some sort of tag like '#ffc: <flags>'. special = { "AdaptivePoisson.ufl": "-e", } # Iterate over all files for f in form_files: options = special.get(f, "") cmd = ("ffc %s %s -f precision=8 -fconvert_exceptions_to_warnings %s" % (options, " ".join(args), f)) # Generate code ok = run_command(cmd) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def generate_code(args, only_forms): "Generate code for all test cases." # Get a list of all files form_files = [f for f in os.listdir(".") if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() begin("Generating code (%d form files found)" % len(form_files)) # TODO: Parse additional options from .ufl file? I.e. grep for # some sort of tag like '#ffc: <flags>'. special = { "AdaptivePoisson.ufl": "-e", } # Iterate over all files for f in form_files: options = special.get(f, "") cmd = ("ffc %s %s -f precision=8 -fconvert_exceptions_to_warnings %s" % (options, " ".join(args), f)) # Generate code ok = run_command(cmd) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def run_programs(bench): "Run generated programs." # This matches argument parsing in the generated main files bench = 'b' if bench else '' # Get a list of all files test_programs = [f for f in os.listdir(".") if f.endswith(".bin")] test_programs.sort() begin("Running generated programs (%d programs found)" % len(test_programs)) # Iterate over all files for f in test_programs: # Compile test code prefix = f.split(".bin")[0] try: os.remove(prefix + ".out") except: pass ok = run_command(".%s%s.bin %s > %s.out" % (os.path.sep, prefix, bench, prefix)) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def compile_element(ufl_element, ffc_fail, log_file): "Create UFL form file with a single element in it and compile it with FFC" f = open("test.ufl", "w") f.write("element = " + repr(ufl_element)) f.close() error, output = get_status_output("ffc test.ufl") if error: info_red("FFC compilation failed.") log_error("element: %s,\n%s\n" % (str(ufl_element), output), log_file) ffc_fail.append(str(ufl_element)) return error
def generate_code(args, only_forms, skip_forms, debug): "Generate code for all test cases." global _command_timings # Get a list of all files form_files = [f for f in os.listdir(".") if f.endswith(".ufl") and f not in skip_forms] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() begin("Generating code (%d form files found)" % len(form_files)) # TODO: Parse additional options from .ufl file? I.e. grep for # some sort of tag like '#ffc: <flags>'. special = {"AdaptivePoisson.ufl": "-e", } failures = [] # Iterate over all files for f in form_files: options = [special.get(f, "")] options.extend(args) options.extend(["-f", "precision=8", "-f", "epsilon=1e-7", "-fconvert_exceptions_to_warnings"]) options.append(f) options = list(filter(None, options)) cmd = sys.executable + " -m ffc " + " ".join(options) # Generate code t1 = time.time() try: ok = ffc.main(options) except Exception as e: if debug: raise e msg = traceback.format_exc() log_error(cmd) log_error(msg) ok = 1 finally: t2 = time.time() _command_timings.append((cmd, t2 - t1)) # Check status if ok == 0: info_green("%s OK" % f) else: info_red("%s failed" % f) failures.append(f) end() return failures
def run_code(ufl_element, deriv_order, run_fail, log_file): "Compute values of basis functions for given element." # Run compiled code and get values error, output = get_status_output(".%sevaluate_basis %d" % (os.path.sep, deriv_order)) if error: info_red("Runtime error (segmentation fault?).") log_error("element: %s,\n%s\n" % (str(ufl_element), output), log_file) run_fail.append(str(ufl_element)) return None values = [[float(value) for value in line.strip().split(" ") if value] for line in output.strip().split("\n")] return numpy.array(values)
def find_boost_cflags(): # Get Boost dir (code copied from ufc/src/utils/python/ufc_utils/build.py) # Set a default directory for the boost installation if sys.platform == "darwin": # Use Brew as default default = os.path.join(os.path.sep, "usr", "local") else: default = os.path.join(os.path.sep, "usr") # If BOOST_DIR is not set use default directory boost_inc_dir = "" boost_lib_dir = "" boost_math_tr1_lib = "boost_math_tr1" boost_dir = os.getenv("BOOST_DIR", default) boost_is_found = False for inc_dir in ["", "include"]: if os.path.isfile(os.path.join(boost_dir, inc_dir, "boost", "version.hpp")): boost_inc_dir = os.path.join(boost_dir, inc_dir) break libdir_multiarch = "lib/" + sysconfig.get_config_vars().get("MULTIARCH", "") for lib_dir in ["", "lib", libdir_multiarch, "lib64"]: for ext in [".so", "-mt.so", ".dylib", "-mt.dylib"]: _lib = os.path.join(boost_dir, lib_dir, "lib" + boost_math_tr1_lib + ext) if os.path.isfile(_lib): if "-mt" in _lib: boost_math_tr1_lib += "-mt" boost_lib_dir = os.path.join(boost_dir, lib_dir) break if boost_inc_dir != "" and boost_lib_dir != "": boost_is_found = True if boost_is_found: boost_cflags = " -I%s -L%s" % (boost_inc_dir, boost_lib_dir) boost_linkflags = "-l%s" % boost_math_tr1_lib else: boost_cflags = "" boost_linkflags = "" info_red("""The Boost library was not found. If Boost is installed in a nonstandard location, set the environment variable BOOST_DIR. Forms using bessel functions will fail to build. """) return boost_cflags, boost_linkflags
def validate_code(reference_dir): "Validate generated code against references." # Get a list of all files header_files = sorted([f for f in os.listdir(".") if f.endswith(".h")]) begin("Validating generated code (%d header files found)" % len(header_files)) failures = [] # Iterate over all files for f in header_files: # Get generated code generated_code = open(f).read() # Get reference code reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_code = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference if generated_code == reference_code: info_green("%s OK" % f) else: info_red("%s differs" % f) difflines = difflib.unified_diff( reference_code.split("\n"), generated_code.split("\n")) diff = "\n".join(difflines) s = ("Code differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) log_error("\n" + s + "\n" + len(s) * "-") log_error(diff) failures.append(f) end() return failures
def validate_code(reference_dir): "Validate generated code against references." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Validating generated code (%d header files found)" % len(header_files)) # Iterate over all files for f in header_files: # Get generated code generated_code = open(f).read() # Get reference code reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_code = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference if generated_code == reference_code: info_green("%s OK" % f) else: info_red("%s differs" % f) diff = "\n".join([ line for line in difflib.unified_diff( reference_code.split("\n"), generated_code.split("\n")) ]) s = ( "Code differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) log_error("\n" + s + "\n" + len(s) * "-") log_error(diff) end()
def validate_programs(reference_dir): "Validate generated programs against references." # Get a list of all files output_files = sorted(f for f in os.listdir(".") if f.endswith(".json")) begin("Validating generated programs (%d .json program output files found)" % len(output_files)) failures = [] # Iterate over all files for fj in output_files: # Get generated json output if os.path.exists(fj): generated_json_output = open(fj).read() if "nan" in generated_json_output: info_red("Found nan in generated json output, replacing with 999 to be able to parse as python dict.") generated_json_output = generated_json_output.replace("nan", "999") else: generated_json_output = "{}" # Get reference json output reference_json_file = os.path.join(reference_dir, fj) if os.path.isfile(reference_json_file): reference_json_output = open(reference_json_file).read() else: info_blue("Missing reference for %s" % reference_json_file) reference_json_output = "{}" # Compare json with reference using recursive diff algorithm # TODO: Write to different error file? from recdiff import recdiff, print_recdiff, DiffEqual # Assuming reference is well formed reference_json_output = eval(reference_json_output) try: generated_json_output = eval(generated_json_output) except Exception as e: info_red("Failed to evaluate json output for %s" % fj) log_error(str(e)) generated_json_output = None json_diff = (None if generated_json_output is None else recdiff(generated_json_output, reference_json_output, tolerance=output_tolerance)) json_ok = json_diff == DiffEqual # Check status if json_ok: info_green("%s OK" % fj) else: info_red("%s differs" % fj) log_error("Json output differs for %s, diff follows (generated first, reference second)" % os.path.join(*reference_json_file.split(os.path.sep)[-3:])) print_recdiff(json_diff, printer=log_error) failures.append(fj) end() return failures
def build_pyop2_programs(bench, permissive, debug=False): # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Set compiler options if not permissive: compiler_options = " -Werror" if bench > 0: info("Benchmarking activated") compiler_options = "-Wall -Werror" if debug: info("Debugging activated") compiler_options = "-Wall -Werror -g" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = _generate_test_code(f, bench) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -lboost_math_tr1" % (compiler_options, prefix, prefix) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end()
def verify_values(ufl_element, ref_values, ffc_values, dif_cri, dif_acc, correct, log_file): "Check the values from evaluate_basis*() against some reference values." num_tests = len(ffc_values) if num_tests != len(ref_values): raise RuntimeError("The number of computed values is not equal to the number of reference values.") errors = [str(ufl_element)] for deriv_order in range(num_tests): s = "" if deriv_order == 0: s = " evaluate_basis" else: s = " evaluate_basis_derivatives, order = %d" % deriv_order e = abs(ffc_values[deriv_order] - ref_values[deriv_order]) error = e.max() if error > tol: if error > crit_tol: m = "%s failed: error = %s (crit_tol: %s)" % (s, str(error), str(crit_tol)) info_red(m) dif_cri.append(str(ufl_element)) s = s + "\n" + m else: m = "%s ok: error = %s (tol: %s)" % (s, str(error), str(tol)) info_blue(m) dif_acc.append(str(ufl_element)) s = s + "\n" + m errors.append(s) else: info_green("%s OK" % s) correct.append(str(ufl_element)) # Log errors if any if len(errors) > 1: log_error("\n".join(errors), log_file) return num_tests
def compile_gcc_code(ufl_element, code, gcc_fail, log_file): # Write code. f = open("evaluate_basis.cpp", "w") f.write(code) f.close() # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Compile g++ code c = "g++ %s -Wall -Werror -o evaluate_basis evaluate_basis.cpp" % ufc_cflags f = open("compile.sh", "w") f.write(c + "\n") f.close() error, output = get_status_output(c) if error: info_red("GCC compilation failed.") log_error("element: %s,\n%s\n" % (str(ufl_element), output), log_file) gcc_fail.append(str(ufl_element)) if error and ("-f" in sys.argv or "--failfast" in sys.argv): print("FAIL") exit(1) return error
def build_programs(bench, permissive): "Build test programs for all test cases." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Get Boost dir (code copied from ufc/src/utils/python/ufc_utils/build.py) # Set a default directory for the boost installation if sys.platform == "darwin": # Use Brew as default default = os.path.join(os.path.sep, "usr", "local") else: default = os.path.join(os.path.sep, "usr") # If BOOST_DIR is not set use default directory boost_inc_dir = "" boost_lib_dir = "" boost_math_tr1_lib = "boost_math_tr1" boost_dir = os.getenv("BOOST_DIR", default) boost_is_found = False for inc_dir in ["", "include"]: if os.path.isfile(os.path.join(boost_dir, inc_dir, "boost", "version.hpp")): boost_inc_dir = os.path.join(boost_dir, inc_dir) break libdir_multiarch = "lib/" + sysconfig.get_config_vars().get("MULTIARCH", "") for lib_dir in ["", "lib", libdir_multiarch, "lib64"]: for ext in [".so", "-mt.so", ".dylib", "-mt.dylib"]: _lib = os.path.join(boost_dir, lib_dir, "lib" + boost_math_tr1_lib + ext) if os.path.isfile(_lib): if "-mt" in _lib: boost_math_tr1_lib += "-mt" boost_lib_dir = os.path.join(boost_dir, lib_dir) break if boost_inc_dir != "" and boost_lib_dir != "": boost_is_found = True if not boost_is_found: raise OSError("""The Boost library was not found. If Boost is installed in a nonstandard location, set the environment variable BOOST_DIR. """) ufc_cflags += " -I%s -L%s" % (boost_inc_dir, boost_lib_dir) # Set compiler options compiler_options = "%s -Wall " % ufc_cflags if not permissive: compiler_options += " -Werror -pedantic" if bench: info("Benchmarking activated") # Takes too long to build with -O2 #compiler_options += " -O2" compiler_options += " -O3" #compiler_options += " -O3 -fno-math-errno -march=native" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = generate_test_code(f) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -l%s" % \ (compiler_options, prefix, prefix, boost_math_tr1_lib) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end()
def build_programs(bench, permissive, debug, verbose): "Build test programs for all test cases." # Get a list of all files header_files = sorted([f for f in os.listdir(".") if f.endswith(".h")]) begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = "-I" + get_ufc_include() + " " + " ".join(get_ufc_cxx_flags()) # Get boost flags boost_cflags, boost_linkflags = find_boost_cflags() # Get compiler compiler = os.getenv("CXX", "g++") # Set compiler options compiler_options = " -Wall" if not permissive: compiler_options += " -Werror -pedantic" # Always need ufc compiler_options += " " + ufc_cflags if bench: info("Benchmarking activated") compiler_options += " -O3 -march=native" # Workaround for gcc bug: gcc is too eager to report array-bounds warning with -O3 compiler_options += " -Wno-array-bounds" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) failures = [] # Iterate over all files for f in header_files: prefix = f.split(".h")[0] # Options for all files cpp_flags = compiler_options ld_flags = "" # Only add boost flags if necessary needs_boost = prefix == "MathFunctions" if needs_boost: info("Additional compiler options for %s: %s" % (prefix, boost_cflags)) info("Additional linker options for %s: %s" % (prefix, boost_linkflags)) cpp_flags += " " + boost_cflags ld_flags += " " + boost_linkflags # Generate test code filename = generate_test_code(f) # Compile test code command = "%s %s -o %s.bin %s.cpp %s" % \ (compiler, cpp_flags, prefix, prefix, ld_flags) ok = run_command(command, verbose) # Store compile command for easy reproduction with open("%s.build" % (prefix,), "w") as f: f.write(command + "\n") # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) failures.append(prefix) end() return failures
def main(args): "Run all regression tests." # Check command-line arguments TODO: Use getargs or something generate_only = "--generate-only" in args fast = "--fast" in args bench = "--bench" in args use_quad = "--skip-quad" not in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args permissive = "--permissive" in args tolerant = "--tolerant" in args print_timing = "--print-timing" in args skip_download = "--skip-download" in args flags = ( "--generate-only", "--fast", "--bench", "--skip-quad", "--ext-quad", "--ext-uflacs", "--permissive", "--tolerant", "--print-timing", "--skip-download", ) args = [arg for arg in args if not arg in flags] # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: failure, output = get_status_output("./scripts/download") print(output) if failure: info_red("Download reference data failed") else: info_green("Download reference data ok") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to # run here test_cases = ["-r auto"] if use_quad and (not bench and not fast): test_cases += ["-r quadrature", "-r quadrature -O"] if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases = ext_uflacs test_cases += ["-r quadrature"] #test_cases += ["-r quadrature -O"] for argument in test_cases: begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Generate test cases generate_test_cases(bench, only_forms) # Generate code generate_code(args + [argument], only_forms) # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if not bench and argument not in ext_quad: validate_code(code_reference_dir) # Build and run programs and validate output to common # reference if fast or generate_only: info("Skipping program validation") elif bench: build_programs(bench, permissive) run_programs(bench) else: build_programs(bench, permissive) run_programs(bench) validate_programs(output_reference_dir) # Go back up os.chdir(os.path.pardir) end() # Print results if print_timing: timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_green("Timing of all commands executed:") info(timings) if logfile is None: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info("Error messages stored in error.log") return 1
def build_programs(bench, permissive): "Build test programs for all test cases." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Get Boost dir (code copied from ufc/src/utils/python/ufc_utils/build.py) # Set a default directory for the boost installation if sys.platform == "darwin": # Use Brew as default default = os.path.join(os.path.sep, "usr", "local") else: default = os.path.join(os.path.sep, "usr") # If BOOST_DIR is not set use default directory boost_inc_dir = "" boost_lib_dir = "" boost_math_tr1_lib = "boost_math_tr1" boost_dir = os.getenv("BOOST_DIR", default) boost_is_found = False for inc_dir in ["", "include"]: if os.path.isfile( os.path.join(boost_dir, inc_dir, "boost", "version.hpp")): boost_inc_dir = os.path.join(boost_dir, inc_dir) break for lib_dir in ["", "lib", "lib/x86_64-linux-gnu"]: for ext in [".so", "-mt.so", ".dylib", "-mt.dylib"]: _lib = os.path.join(boost_dir, lib_dir, "lib" + boost_math_tr1_lib + ext) if os.path.isfile(_lib): if "-mt" in _lib: boost_math_tr1_lib += "-mt" boost_lib_dir = os.path.join(boost_dir, lib_dir) break if boost_inc_dir != "" and boost_lib_dir != "": boost_is_found = True if not boost_is_found: raise OSError("""The Boost library was not found. If Boost is installed in a nonstandard location, set the environment variable BOOST_DIR. """) ufc_cflags += " -I%s -L%s" % (boost_inc_dir, boost_lib_dir) # Set compiler options compiler_options = "%s -Wall" % ufc_cflags if not permissive: compiler_options += " -Werror -pedantic" if bench: info("Benchmarking activated") # Takes too long to build with -O2 #compiler_options += " -O2" compiler_options += " -O3" #compiler_options += " -O3 -fno-math-errno -march=native" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = generate_test_code(f) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -l%s" % \ (compiler_options, prefix, prefix, boost_math_tr1_lib) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end()
def main(args): "Run all regression tests." # Check command-line arguments TODO: Use argparse only_auto = "--only-auto" in args use_auto = "--skip-auto" not in args use_uflacs = "--skip-uflacs" not in args use_quad = "--skip-quad" not in args use_tsfc = "--use-tsfc" in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args skip_download = "--skip-download" in args skip_run = "--skip-run" in args skip_code_diff = "--skip-code-diff" in args skip_validate = "--skip-validate" in args bench = "--bench" in args debug = "--debug" in args verbose = ("--verbose" in args) or debug # debug implies verbose permissive = "--permissive" in args or bench tolerant = "--tolerant" in args print_timing = "--print-timing" in args show_help = "--help" in args flags = ( "--only-auto", "--skip-auto", "--skip-uflacs", "--skip-quad", "--use-tsfc", "--ext-quad", "--skip-download", "--skip-run", "--skip-code-diff", "--skip-validate", "--bench", "--debug", "--verbose", "--permissive", "--tolerant", "--print-timing", "--help", ) args = [arg for arg in args if arg not in flags] # Hack: add back --verbose for ffc.main to see if verbose: args = args + ["--verbose"] if show_help: info("Valid arguments:\n" + "\n".join(flags)) return 0 if bench or not skip_validate: skip_run = False if bench: skip_code_diff = True skip_validate = True if use_ext_quad or use_ext_uflacs: skip_code_diff = True # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: try: cmd = "./scripts/download" output = as_native_str(subprocess.check_output(cmd, shell=True)) print(output) info_green("Download reference data ok") except subprocess.CalledProcessError as e: print(e.output) info_red("Download reference data failed") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to run here test_cases = [] if only_auto: test_cases += ["-r auto"] else: if use_auto: test_cases += ["-r auto"] if use_uflacs: test_cases += ["-r uflacs -O0", "-r uflacs -O"] if use_quad: test_cases += ["-r quadrature -O0", "-r quadrature -O"] import warnings from ffc.quadrature.deprecation import QuadratureRepresentationDeprecationWarning warnings.simplefilter("once", QuadratureRepresentationDeprecationWarning) if use_tsfc: test_cases += ["-r tsfc -O0", "-r tsfc -O"] # Silence good-performance messages by COFFEE import coffee coffee.set_log_level(coffee.logger.PERF_WARN) if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases += ext_uflacs test_case_timings = {} fails = OrderedDict() for argument in test_cases: test_case_timings[argument] = time.time() fails[argument] = OrderedDict() begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Workarounds for feature lack in representation if "quadrature" in argument and not only_forms: skip_forms = known_quad_failures info_blue("Skipping forms known to fail with quadrature:\n" + "\n".join(sorted(skip_forms))) elif "uflacs" in argument and not only_forms: skip_forms = known_uflacs_failures info_blue("Skipping forms known to fail with uflacs:\n" + "\n".join(sorted(skip_forms))) elif "tsfc" in argument and not only_forms: skip_forms = known_tsfc_failures info_blue("Skipping forms known to fail with tsfc:\n" + "\n".join(sorted(skip_forms))) else: skip_forms = set() # Generate test cases generate_test_cases(bench, only_forms, skip_forms) # Generate code failures = generate_code(args + argument.split(), only_forms, skip_forms, debug) if failures: fails[argument]["generate_code"] = failures # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if skip_code_diff: info_blue("Skipping code diff validation") else: failures = validate_code(code_reference_dir) if failures: fails[argument]["validate_code"] = failures # Build and run programs and validate output to common # reference if skip_run: info_blue("Skipping program execution") else: failures = build_programs(bench, permissive, debug, verbose) if failures: fails[argument]["build_programs"] = failures failures = run_programs(bench, debug, verbose) if failures: fails[argument]["run_programs"] = failures # Validate output to common reference results if skip_validate: info_blue("Skipping program output validation") else: failures = validate_programs(output_reference_dir) if failures: fails[argument]["validate_programs"] = failures # Go back up os.chdir(os.path.pardir) end() test_case_timings[argument] = time.time() - test_case_timings[argument] # Go back up os.chdir(os.path.pardir) # Print results if print_timing: info_green("Timing of all commands executed:") timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_blue(timings) for argument in test_cases: info_blue("Total time for %s: %.1f s" % (argument, test_case_timings[argument])) num_failures = sum(len(failures_phase) for failures_args in fails.values() for failures_phase in failures_args.values()) if num_failures == 0: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info_red("") info_red("Long summary:") for argument in test_cases: if not fails[argument]: info_green(" No failures with args '%s'" % argument) else: info_red(" Failures with args '%s':" % argument) for phase, failures in fails[argument].items(): info_red(" %d failures in %s:" % (len(failures), phase)) for f in failures: info_red(" %s" % (f,)) info_red("") info_red("Short summary:") phase_fails = defaultdict(int) for argument in test_cases: if not fails[argument]: info_green(" No failures with args '%s'" % argument) else: info_red(" Number of failures with args '%s':" % argument) for phase, failures in fails[argument].items(): info_red(" %d failures in %s." % (len(failures), phase)) phase_fails[phase] += len(failures) info_red("") info_red("Total failures for all args:") for phase, count in phase_fails.items(): info_red(" %s: %d failed" % (phase, count)) info_red("") info_red("Error messages stored in %s" % logfile) return 1
def main(args): "Run all regression tests." # Check command-line arguments TODO: Use argparse generate_only = "--generate-only" in args fast = "--fast" in args bench = "--bench" in args use_auto = "--skip-auto" not in args use_quad = "--skip-quad" not in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args permissive = "--permissive" in args tolerant = "--tolerant" in args print_timing = "--print-timing" in args skip_download = "--skip-download" in args ignore_code_diff = "--ignore-code-diff" in args pyop2 = "--pyop2" in args flags = ( "--generate-only", "--fast", "--bench", "--skip-auto", "--skip-quad", "--ext-quad", "--ext-uflacs", "--permissive", "--tolerant", "--print-timing", "--skip-download", "--ignore-code-diff", "--pyop2", ) args = [arg for arg in args if not arg in flags] # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: failure, output = get_status_output("./scripts/download") print(output) if failure: info_red("Download reference data failed") else: info_green("Download reference data ok") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to # run here test_cases = [] if use_auto: test_cases += ["-r auto"] if use_quad and (not bench and not fast): test_cases += ["-r quadrature", "-r quadrature -O"] if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases = ext_uflacs test_cases += ["-r quadrature"] #test_cases += ["-r quadrature -O"] if pyop2: test_cases += ext_pyop2 for argument in test_cases: begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Generate test cases generate_test_cases(bench, only_forms) # Generate code generate_code(args + [argument], only_forms) # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if not bench and (argument not in ext_quad) and not ignore_code_diff: validate_code(code_reference_dir) # Build and run programs and validate output to common # reference if fast or generate_only: info("Skipping program validation") elif bench: if argument in ext_pyop2: build_pyop2_programs(bench, permissive, debug=debug) else: build_ufc_programs(bench, permissive, debug=debug) run_programs(bench) else: if argument in ext_pyop2: build_pyop2_programs(bench, permissive, debug=debug) else: build_ufc_programs(bench, permissive, debug=debug) run_programs(bench) validate_programs(output_reference_dir) # Go back up os.chdir(os.path.pardir) end() # Print results if print_timing: timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_green("Timing of all commands executed:") info(timings) if logfile is None: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info("Error messages stored in error.log") return 1
def validate_programs(reference_dir): "Validate generated programs against references." # Get a list of all files output_files = [f for f in os.listdir(".") if f.endswith(".json")] output_files.sort() begin("Validating generated programs (%d programs found)" % len(output_files)) # Iterate over all files for f in output_files: # Get generated output generated_output = open(f).read() # Get reference output reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_output = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference ok = True old = [ line.split(" = ") for line in reference_output.split("\n") if " = " in line ] new = dict([ line.split(" = ") for line in generated_output.split("\n") if " = " in line ]) header = ( "Output differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) for (key, value) in old: # Check if value is present if not key in new: if ok: log_error("\n" + header + "\n" + len(header) * "-") log_error("%s: missing value in generated code" % key) ok = False continue # Extract float values old_values = array([float(v) for v in value.split(" ")]) new_values = array([float(v) for v in new[key].split(" ")]) # Check that shape is correct if not shape(old_values) == shape(new_values): if ok: log_error("\n" + header + "\n" + len(header) * "-") log_error("%s: shape mismatch" % key) ok = False continue # Check that values match to within tolerance set by 'output_tolerance' diff = max(abs(old_values - new_values)) if diff > output_tolerance or isnan(diff): if ok: log_error("\n" + header + "\n" + len(header) * "-") log_error("%s: values differ, error = %g (tolerance = %g)" % (key, diff, output_tolerance)) log_error(" old = " + " ".join("%.16g" % v for v in old_values)) log_error(" new = " + " ".join("%.16g" % v for v in new_values)) ok = False # Add debugging output to log file debug = "\n".join( [line for line in generated_output.split("\n") if "debug" in line]) if debug: log_error(debug) # Check status if ok: info_green("%s OK" % f) else: info_red("%s differs" % f) # Now check json references fj = f.replace(".out", ".json") # Get generated json output if os.path.exists(fj): generated_json_output = open(fj).read() if "nan" in generated_json_output: info_red( "Found nan in generated json output, replacing with 999 to be able to parse as python dict." ) generated_json_output = generated_json_output.replace( "nan", "999") else: generated_json_output = "{}" # Get reference json output reference_json_file = os.path.join(reference_dir, fj) if os.path.isfile(reference_json_file): reference_json_output = open(reference_json_file).read() else: info_blue("Missing reference for %s" % reference_json_file) reference_json_output = "{}" # Compare json with reference using recursive diff algorithm # TODO: Write to different error file? from recdiff import recdiff, print_recdiff, DiffEqual # Assuming reference is well formed reference_json_output = eval(reference_json_output) try: generated_json_output = eval(generated_json_output) except Exception as e: info_red("Failed to evaluate json output for %s" % fj) log_error(str(e)) generated_json_output = None json_diff = (None if generated_json_output is None else recdiff( generated_json_output, reference_json_output, tolerance=output_tolerance)) json_ok = json_diff == DiffEqual # Check status if json_ok: info_green("%s OK" % fj) else: info_red("%s differs" % fj) log_error( "Json output differs for %s, diff follows (generated first, reference second)" % os.path.join(*reference_json_file.split(os.path.sep)[-3:])) print_recdiff(json_diff, printer=log_error) end()