def run_programs(bench): "Run generated programs." # This matches argument parsing in the generated main files bench = 'b' if bench else '' # Get a list of all files test_programs = [f for f in os.listdir(".") if f.endswith(".bin")] test_programs.sort() begin("Running generated programs (%d programs found)" % len(test_programs)) # Iterate over all files for f in test_programs: # Compile test code prefix = f.split(".bin")[0] try: os.remove(prefix + ".out") except: pass ok = run_command(".%s%s.bin %s > %s.out" % (os.path.sep, prefix, bench, prefix)) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def run_programs(bench): "Run generated programs." # This matches argument parsing in the generated main files bench = 'b' if bench else '' # Get a list of all files test_programs = [f for f in os.listdir(".") if f.endswith(".bin")] test_programs.sort() begin("Running generated programs (%d programs found)" % len(test_programs)) # Iterate over all files for f in test_programs: # Compile test code prefix = f.split(".bin")[0] ok = run_command(".%s%s.bin %s" % (os.path.sep, prefix, bench)) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def generate_test_cases(bench, only_forms): "Generate form files for all test cases." begin("Generating test cases") # Copy form files if bench: form_directory = bench_directory else: form_directory = demo_directory # Make list of form files form_files = [f for f in os.listdir(form_directory) if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() for f in form_files: shutil.copy("%s/%s" % (form_directory, f), ".") info_green("Found %d form files" % len(form_files)) # Generate form files for forms info("Generating form files for extra forms: Not implemented") # Generate form files for elements if not bench: from elements import elements info("Generating form files for extra elements (%d elements)" % len(elements)) for (i, element) in enumerate(elements): open("X_Element%d.ufl" % i, "w").write("element = %s" % element) end()
def generate_test_cases(bench, only_forms): "Generate form files for all test cases." begin("Generating test cases") # Copy form files if bench: form_directory = bench_directory else: form_directory = demo_directory # Make list of form files form_files = [f for f in os.listdir(form_directory) if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() for f in form_files: shutil.copy(os.path.join(form_directory, f), ".") info_green("Found %d form files" % len(form_files)) # Generate form files for forms info("Generating form files for extra forms: Not implemented") # Generate form files for elements if not bench: from elements import elements info("Generating form files for extra elements (%d elements)" % len(elements)) for (i, element) in enumerate(elements): open("X_Element%d.ufl" % i, "w").write("element = %s" % element) end()
def generate_code(args, only_forms): "Generate code for all test cases." # Get a list of all files form_files = [f for f in os.listdir(".") if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() begin("Generating code (%d form files found)" % len(form_files)) # TODO: Parse additional options from .ufl file? I.e. grep for # some sort of tag like '#ffc: <flags>'. special = { "AdaptivePoisson.ufl": "-e", } # Iterate over all files for f in form_files: options = special.get(f, "") cmd = ("ffc %s %s -f precision=8 -fconvert_exceptions_to_warnings %s" % (options, " ".join(args), f)) # Generate code ok = run_command(cmd) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def validate_programs(reference_dir): "Validate generated programs against references." # Get a list of all files output_files = sorted(f for f in os.listdir(".") if f.endswith(".json")) begin("Validating generated programs (%d .json program output files found)" % len(output_files)) failures = [] # Iterate over all files for fj in output_files: # Get generated json output if os.path.exists(fj): generated_json_output = open(fj).read() if "nan" in generated_json_output: info_red("Found nan in generated json output, replacing with 999 to be able to parse as python dict.") generated_json_output = generated_json_output.replace("nan", "999") else: generated_json_output = "{}" # Get reference json output reference_json_file = os.path.join(reference_dir, fj) if os.path.isfile(reference_json_file): reference_json_output = open(reference_json_file).read() else: info_blue("Missing reference for %s" % reference_json_file) reference_json_output = "{}" # Compare json with reference using recursive diff algorithm # TODO: Write to different error file? from recdiff import recdiff, print_recdiff, DiffEqual # Assuming reference is well formed reference_json_output = eval(reference_json_output) try: generated_json_output = eval(generated_json_output) except Exception as e: info_red("Failed to evaluate json output for %s" % fj) log_error(str(e)) generated_json_output = None json_diff = (None if generated_json_output is None else recdiff(generated_json_output, reference_json_output, tolerance=output_tolerance)) json_ok = json_diff == DiffEqual # Check status if json_ok: info_green("%s OK" % fj) else: info_red("%s differs" % fj) log_error("Json output differs for %s, diff follows (generated first, reference second)" % os.path.join(*reference_json_file.split(os.path.sep)[-3:])) print_recdiff(json_diff, printer=log_error) failures.append(fj) end() return failures
def generate_code(args, only_forms, skip_forms, debug): "Generate code for all test cases." global _command_timings # Get a list of all files form_files = [f for f in os.listdir(".") if f.endswith(".ufl") and f not in skip_forms] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() begin("Generating code (%d form files found)" % len(form_files)) # TODO: Parse additional options from .ufl file? I.e. grep for # some sort of tag like '#ffc: <flags>'. special = {"AdaptivePoisson.ufl": "-e", } failures = [] # Iterate over all files for f in form_files: options = [special.get(f, "")] options.extend(args) options.extend(["-f", "precision=8", "-f", "epsilon=1e-7", "-fconvert_exceptions_to_warnings"]) options.append(f) options = list(filter(None, options)) cmd = sys.executable + " -m ffc " + " ".join(options) # Generate code t1 = time.time() try: ok = ffc.main(options) except Exception as e: if debug: raise e msg = traceback.format_exc() log_error(cmd) log_error(msg) ok = 1 finally: t2 = time.time() _command_timings.append((cmd, t2 - t1)) # Check status if ok == 0: info_green("%s OK" % f) else: info_red("%s failed" % f) failures.append(f) end() return failures
def compile_element(elements, prefix="Element", parameters=default_parameters()): """This function generates UFC code for a given UFL element or list of UFL elements.""" info("Compiling element %s\n" % prefix) # Reset timing cpu_time_0 = time() # Check input arguments elements = _check_elements(elements) parameters = _check_parameters(parameters) if not elements: return # Stage 1: analysis cpu_time = time() analysis = analyze_elements(elements, parameters) _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, parameters) _print_timing(2, time() - cpu_time) # Stage 3: optimization cpu_time = time() oir = optimize_ir(ir, parameters) _print_timing(3, time() - cpu_time) # Stage 4: code generation cpu_time = time() code = generate_code(oir, prefix, parameters) _print_timing(4, time() - cpu_time) # Stage 4.1: generate wrappers cpu_time = time() object_names = {} wrapper_code = generate_wrapper_code(analysis, prefix, object_names, parameters) _print_timing(4.1, time() - cpu_time) # Stage 5: format code cpu_time = time() format_code(code, wrapper_code, prefix, parameters) _print_timing(5, time() - cpu_time) info_green("FFC finished in %g seconds.", time() - cpu_time_0)
def compile_form(forms, object_names={}, prefix="Form",\ parameters=default_parameters()): """This function generates UFC code for a given UFL form or list of UFL forms.""" info("Compiling form %s\n" % prefix) # Reset timing cpu_time_0 = time() # Check input arguments forms = _check_forms(forms) parameters = _check_parameters(parameters) if not forms: return # Stage 1: analysis cpu_time = time() analysis = analyze_forms(forms, object_names, parameters) _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, parameters) _print_timing(2, time() - cpu_time) # Stage 3: optimization cpu_time = time() oir = optimize_ir(ir, parameters) _print_timing(3, time() - cpu_time) # Stage 4: code generation cpu_time = time() code = generate_code(oir, prefix, parameters) _print_timing(4, time() - cpu_time) # Stage 4.1: generate wrappers cpu_time = time() wrapper_code = generate_wrapper_code(analysis, prefix, parameters) _print_timing(4.1, time() - cpu_time) # Stage 5: format code cpu_time = time() format_code(code, wrapper_code, prefix, parameters) _print_timing(5, time() - cpu_time) info_green("FFC finished in %g seconds.", time() - cpu_time_0)
def print_results(num_tests, ffc_fail, gcc_fail, run_fail, dif_cri, dif_acc, correct): "Check print summary." num_ffc = len(ffc_fail) num_gcc = len(gcc_fail) num_run = len(run_fail) num_cri = len(dif_cri) num_acc = len(dif_acc) num_cor = len(correct) if ffc_fail == gcc_fail == run_fail == dif_cri == dif_acc == []: info_green("\nAll %d tests OK" % num_tests) return 0 num_tests = str(num_tests) num_tot = str(num_ffc + num_gcc + num_run + num_cor + num_cri + num_acc) num_ffc = str(num_ffc) num_gcc = str(num_gcc) num_run = str(num_run) num_cor = str(num_cor) num_cri = str(num_cri) num_acc = str(num_acc) num_ffc = " "*(len(num_tests) - len(num_ffc)) + num_ffc num_gcc = " "*(len(num_tests) - len(num_gcc)) + num_gcc num_run = " "*(len(num_tests) - len(num_run)) + num_run num_cor = " "*(len(num_tests) - len(num_cor)) + num_cor num_cri = " "*(len(num_tests) - len(num_cri)) + num_cri num_acc = " "*(len(num_tests) - len(num_acc)) + num_acc num_tot = " "*(len(num_tests) - len(num_tot)) + num_tot info("\n\n*************** SUMMARY ***************") info("\n Number of tests: " + num_tests) info("\n Num ffc fail: " + num_ffc) info(" Num gcc fail: " + num_gcc) info(" Num run fail: " + num_run) info((" Num correct: (tol. %g): " % tol) + num_cor) info((" Num diff. critical: (tol. %g): " % crit_tol) + num_cri) info(" Num diff. acceptable: " + num_acc) info(" Total: " + num_tot) info("") # Return 0 if there was only acceptable errors. if ffc_fail == gcc_fail == run_fail == dif_cri == []: return 0 return 1
def validate_code(reference_dir): "Validate generated code against references." # Get a list of all files header_files = sorted([f for f in os.listdir(".") if f.endswith(".h")]) begin("Validating generated code (%d header files found)" % len(header_files)) failures = [] # Iterate over all files for f in header_files: # Get generated code generated_code = open(f).read() # Get reference code reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_code = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference if generated_code == reference_code: info_green("%s OK" % f) else: info_red("%s differs" % f) difflines = difflib.unified_diff( reference_code.split("\n"), generated_code.split("\n")) diff = "\n".join(difflines) s = ("Code differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) log_error("\n" + s + "\n" + len(s) * "-") log_error(diff) failures.append(f) end() return failures
def validate_code(reference_dir): "Validate generated code against references." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Validating generated code (%d header files found)" % len(header_files)) # Iterate over all files for f in header_files: # Get generated code generated_code = open(f).read() # Get reference code reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_code = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference if generated_code == reference_code: info_green("%s OK" % f) else: info_red("%s differs" % f) diff = "\n".join([ line for line in difflib.unified_diff( reference_code.split("\n"), generated_code.split("\n")) ]) s = ( "Code differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) log_error("\n" + s + "\n" + len(s) * "-") log_error(diff) end()
def build_pyop2_programs(bench, permissive, debug=False): # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Set compiler options if not permissive: compiler_options = " -Werror" if bench > 0: info("Benchmarking activated") compiler_options = "-Wall -Werror" if debug: info("Debugging activated") compiler_options = "-Wall -Werror -g" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = _generate_test_code(f, bench) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -lboost_math_tr1" % (compiler_options, prefix, prefix) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end()
def verify_values(ufl_element, ref_values, ffc_values, dif_cri, dif_acc, correct, log_file): "Check the values from evaluate_basis*() against some reference values." num_tests = len(ffc_values) if num_tests != len(ref_values): raise RuntimeError("The number of computed values is not equal to the number of reference values.") errors = [str(ufl_element)] for deriv_order in range(num_tests): s = "" if deriv_order == 0: s = " evaluate_basis" else: s = " evaluate_basis_derivatives, order = %d" % deriv_order e = abs(ffc_values[deriv_order] - ref_values[deriv_order]) error = e.max() if error > tol: if error > crit_tol: m = "%s failed: error = %s (crit_tol: %s)" % (s, str(error), str(crit_tol)) info_red(m) dif_cri.append(str(ufl_element)) s = s + "\n" + m else: m = "%s ok: error = %s (tol: %s)" % (s, str(error), str(tol)) info_blue(m) dif_acc.append(str(ufl_element)) s = s + "\n" + m errors.append(s) else: info_green("%s OK" % s) correct.append(str(ufl_element)) # Log errors if any if len(errors) > 1: log_error("\n".join(errors), log_file) return num_tests
def build_programs(bench, permissive): "Build test programs for all test cases." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Get Boost dir (code copied from ufc/src/utils/python/ufc_utils/build.py) # Set a default directory for the boost installation if sys.platform == "darwin": # Use Brew as default default = os.path.join(os.path.sep, "usr", "local") else: default = os.path.join(os.path.sep, "usr") # If BOOST_DIR is not set use default directory boost_inc_dir = "" boost_lib_dir = "" boost_math_tr1_lib = "boost_math_tr1" boost_dir = os.getenv("BOOST_DIR", default) boost_is_found = False for inc_dir in ["", "include"]: if os.path.isfile(os.path.join(boost_dir, inc_dir, "boost", "version.hpp")): boost_inc_dir = os.path.join(boost_dir, inc_dir) break libdir_multiarch = "lib/" + sysconfig.get_config_vars().get("MULTIARCH", "") for lib_dir in ["", "lib", libdir_multiarch, "lib64"]: for ext in [".so", "-mt.so", ".dylib", "-mt.dylib"]: _lib = os.path.join(boost_dir, lib_dir, "lib" + boost_math_tr1_lib + ext) if os.path.isfile(_lib): if "-mt" in _lib: boost_math_tr1_lib += "-mt" boost_lib_dir = os.path.join(boost_dir, lib_dir) break if boost_inc_dir != "" and boost_lib_dir != "": boost_is_found = True if not boost_is_found: raise OSError("""The Boost library was not found. If Boost is installed in a nonstandard location, set the environment variable BOOST_DIR. """) ufc_cflags += " -I%s -L%s" % (boost_inc_dir, boost_lib_dir) # Set compiler options compiler_options = "%s -Wall " % ufc_cflags if not permissive: compiler_options += " -Werror -pedantic" if bench: info("Benchmarking activated") # Takes too long to build with -O2 #compiler_options += " -O2" compiler_options += " -O3" #compiler_options += " -O3 -fno-math-errno -march=native" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = generate_test_code(f) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -l%s" % \ (compiler_options, prefix, prefix, boost_math_tr1_lib) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end()
def build_programs(bench, permissive, debug, verbose): "Build test programs for all test cases." # Get a list of all files header_files = sorted([f for f in os.listdir(".") if f.endswith(".h")]) begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = "-I" + get_ufc_include() + " " + " ".join(get_ufc_cxx_flags()) # Get boost flags boost_cflags, boost_linkflags = find_boost_cflags() # Get compiler compiler = os.getenv("CXX", "g++") # Set compiler options compiler_options = " -Wall" if not permissive: compiler_options += " -Werror -pedantic" # Always need ufc compiler_options += " " + ufc_cflags if bench: info("Benchmarking activated") compiler_options += " -O3 -march=native" # Workaround for gcc bug: gcc is too eager to report array-bounds warning with -O3 compiler_options += " -Wno-array-bounds" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) failures = [] # Iterate over all files for f in header_files: prefix = f.split(".h")[0] # Options for all files cpp_flags = compiler_options ld_flags = "" # Only add boost flags if necessary needs_boost = prefix == "MathFunctions" if needs_boost: info("Additional compiler options for %s: %s" % (prefix, boost_cflags)) info("Additional linker options for %s: %s" % (prefix, boost_linkflags)) cpp_flags += " " + boost_cflags ld_flags += " " + boost_linkflags # Generate test code filename = generate_test_code(f) # Compile test code command = "%s %s -o %s.bin %s.cpp %s" % \ (compiler, cpp_flags, prefix, prefix, ld_flags) ok = run_command(command, verbose) # Store compile command for easy reproduction with open("%s.build" % (prefix,), "w") as f: f.write(command + "\n") # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) failures.append(prefix) end() return failures
def main(args): "Run all regression tests." # Check command-line arguments TODO: Use argparse only_auto = "--only-auto" in args use_auto = "--skip-auto" not in args use_uflacs = "--skip-uflacs" not in args use_quad = "--skip-quad" not in args use_tsfc = "--use-tsfc" in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args skip_download = "--skip-download" in args skip_run = "--skip-run" in args skip_code_diff = "--skip-code-diff" in args skip_validate = "--skip-validate" in args bench = "--bench" in args debug = "--debug" in args verbose = ("--verbose" in args) or debug # debug implies verbose permissive = "--permissive" in args or bench tolerant = "--tolerant" in args print_timing = "--print-timing" in args show_help = "--help" in args flags = ( "--only-auto", "--skip-auto", "--skip-uflacs", "--skip-quad", "--use-tsfc", "--ext-quad", "--skip-download", "--skip-run", "--skip-code-diff", "--skip-validate", "--bench", "--debug", "--verbose", "--permissive", "--tolerant", "--print-timing", "--help", ) args = [arg for arg in args if arg not in flags] # Hack: add back --verbose for ffc.main to see if verbose: args = args + ["--verbose"] if show_help: info("Valid arguments:\n" + "\n".join(flags)) return 0 if bench or not skip_validate: skip_run = False if bench: skip_code_diff = True skip_validate = True if use_ext_quad or use_ext_uflacs: skip_code_diff = True # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: try: cmd = "./scripts/download" output = as_native_str(subprocess.check_output(cmd, shell=True)) print(output) info_green("Download reference data ok") except subprocess.CalledProcessError as e: print(e.output) info_red("Download reference data failed") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to run here test_cases = [] if only_auto: test_cases += ["-r auto"] else: if use_auto: test_cases += ["-r auto"] if use_uflacs: test_cases += ["-r uflacs -O0", "-r uflacs -O"] if use_quad: test_cases += ["-r quadrature -O0", "-r quadrature -O"] import warnings from ffc.quadrature.deprecation import QuadratureRepresentationDeprecationWarning warnings.simplefilter("once", QuadratureRepresentationDeprecationWarning) if use_tsfc: test_cases += ["-r tsfc -O0", "-r tsfc -O"] # Silence good-performance messages by COFFEE import coffee coffee.set_log_level(coffee.logger.PERF_WARN) if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases += ext_uflacs test_case_timings = {} fails = OrderedDict() for argument in test_cases: test_case_timings[argument] = time.time() fails[argument] = OrderedDict() begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Workarounds for feature lack in representation if "quadrature" in argument and not only_forms: skip_forms = known_quad_failures info_blue("Skipping forms known to fail with quadrature:\n" + "\n".join(sorted(skip_forms))) elif "uflacs" in argument and not only_forms: skip_forms = known_uflacs_failures info_blue("Skipping forms known to fail with uflacs:\n" + "\n".join(sorted(skip_forms))) elif "tsfc" in argument and not only_forms: skip_forms = known_tsfc_failures info_blue("Skipping forms known to fail with tsfc:\n" + "\n".join(sorted(skip_forms))) else: skip_forms = set() # Generate test cases generate_test_cases(bench, only_forms, skip_forms) # Generate code failures = generate_code(args + argument.split(), only_forms, skip_forms, debug) if failures: fails[argument]["generate_code"] = failures # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if skip_code_diff: info_blue("Skipping code diff validation") else: failures = validate_code(code_reference_dir) if failures: fails[argument]["validate_code"] = failures # Build and run programs and validate output to common # reference if skip_run: info_blue("Skipping program execution") else: failures = build_programs(bench, permissive, debug, verbose) if failures: fails[argument]["build_programs"] = failures failures = run_programs(bench, debug, verbose) if failures: fails[argument]["run_programs"] = failures # Validate output to common reference results if skip_validate: info_blue("Skipping program output validation") else: failures = validate_programs(output_reference_dir) if failures: fails[argument]["validate_programs"] = failures # Go back up os.chdir(os.path.pardir) end() test_case_timings[argument] = time.time() - test_case_timings[argument] # Go back up os.chdir(os.path.pardir) # Print results if print_timing: info_green("Timing of all commands executed:") timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_blue(timings) for argument in test_cases: info_blue("Total time for %s: %.1f s" % (argument, test_case_timings[argument])) num_failures = sum(len(failures_phase) for failures_args in fails.values() for failures_phase in failures_args.values()) if num_failures == 0: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info_red("") info_red("Long summary:") for argument in test_cases: if not fails[argument]: info_green(" No failures with args '%s'" % argument) else: info_red(" Failures with args '%s':" % argument) for phase, failures in fails[argument].items(): info_red(" %d failures in %s:" % (len(failures), phase)) for f in failures: info_red(" %s" % (f,)) info_red("") info_red("Short summary:") phase_fails = defaultdict(int) for argument in test_cases: if not fails[argument]: info_green(" No failures with args '%s'" % argument) else: info_red(" Number of failures with args '%s':" % argument) for phase, failures in fails[argument].items(): info_red(" %d failures in %s." % (len(failures), phase)) phase_fails[phase] += len(failures) info_red("") info_red("Total failures for all args:") for phase, count in phase_fails.items(): info_red(" %s: %d failed" % (phase, count)) info_red("") info_red("Error messages stored in %s" % logfile) return 1
def compile_form(forms, object_names=None, prefix="Form", parameters=None): """This function generates UFC code for a given UFL form or list of UFL forms.""" info("Compiling form %s\n" % prefix) # Reset timing cpu_time_0 = time() # Check input arguments forms = _check_forms(forms) if not forms: return if prefix != os.path.basename(prefix): prefix = os.path.basename(prefix) warning("Invalid prefix, modified to {}.".format(prefix)) if object_names is None: object_names = {} parameters = _check_parameters(parameters) # Stage 1: analysis cpu_time = time() analysis = analyze_forms(forms, parameters) _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, prefix, parameters, object_names=object_names) _print_timing(2, time() - cpu_time) # Stage 3: optimization cpu_time = time() oir = optimize_ir(ir, parameters) _print_timing(3, time() - cpu_time) # Return IR (PyOP2 mode) or code string (otherwise) if parameters["pyop2-ir"]: try: from ffc.quadrature.quadraturepyop2ir import generate_pyop2_ir except ImportError: raise ImportError("Format pyop2-ir depends on PyOP2, which is not available.") # Stage 4: build PyOP2 intermediate representation cpu_time = time() #FIXME: need a cleaner interface pyop2_ir = [generate_pyop2_ir(ir, prefix, parameters) for ir in oir[3]] _print_timing(4, time() - cpu_time) info_green("FFC finished in %g seconds.", time() - cpu_time_0) return pyop2_ir else: # Stage 4: code generation cpu_time = time() code = generate_code(oir, prefix, parameters) _print_timing(4, time() - cpu_time) # Stage 4.1: generate wrappers cpu_time = time() wrapper_code = generate_wrapper_code(analysis, prefix, object_names, parameters) _print_timing(4.1, time() - cpu_time) # Stage 5: format code cpu_time = time() code_h, code_c = format_code(code, wrapper_code, prefix, parameters) write_code(code_h, code_c, prefix, parameters) # FIXME: Don't write to file in this function (issue #72) _print_timing(5, time() - cpu_time) info_green("FFC finished in %g seconds.", time() - cpu_time_0) return code
def compile_ufl_objects(ufl_objects, kind, object_names=None, prefix=None, parameters=None, jit=False): """This function generates UFC code for a given UFL form or list of UFL forms.""" info("Compiling %s %s\n" % (kind, prefix)) # Reset timing cpu_time_0 = time() # Note that jit will always pass validated parameters so # this is only for commandline and direct call from python if not jit: parameters = validate_parameters(parameters) # Check input arguments if not isinstance(ufl_objects, (list, tuple)): ufl_objects = (ufl_objects, ) if not ufl_objects: return "", "" if prefix != os.path.basename(prefix): error("Invalid prefix, looks like a full path? prefix='{}'.".format( prefix)) if object_names is None: object_names = {} # Stage 1: analysis cpu_time = time() analysis = analyze_ufl_objects(ufl_objects, kind, parameters) _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, prefix, parameters, jit) _print_timing(2, time() - cpu_time) # Stage 3: optimization cpu_time = time() oir = optimize_ir(ir, parameters) _print_timing(3, time() - cpu_time) # Stage 4: code generation cpu_time = time() code = generate_code(oir, parameters) _print_timing(4, time() - cpu_time) # Stage 4.1: generate wrappers cpu_time = time() wrapper_code = generate_wrapper_code(analysis, prefix, object_names, parameters) _print_timing(4.1, time() - cpu_time) # Stage 5: format code cpu_time = time() code_h, code_c = format_code(code, wrapper_code, prefix, parameters, jit) _print_timing(5, time() - cpu_time) info_green("FFC finished in %g seconds.", time() - cpu_time_0) if jit: # Must use processed elements from analysis here form_datas, unique_elements, element_numbers, unique_coordinate_elements = analysis # Wrap coordinate elements in Mesh object to represent that # we want a ufc::coordinate_mapping not a ufc::finite_element unique_meshes = [ ufl.Mesh(element, ufl_id=0) for element in unique_coordinate_elements ] # Avoid returning self as dependency for infinite recursion unique_elements = tuple(element for element in unique_elements if element not in ufl_objects) unique_meshes = tuple(mesh for mesh in unique_meshes if mesh not in ufl_objects) # Setup dependencies (these will be jitted before continuing to compile ufl_objects) dependent_ufl_objects = { "element": unique_elements, "coordinate_mapping": unique_meshes, } return code_h, code_c, dependent_ufl_objects else: return code_h, code_c
def main(args): "Run all regression tests." # Check command-line arguments TODO: Use getargs or something generate_only = "--generate-only" in args fast = "--fast" in args bench = "--bench" in args use_quad = "--skip-quad" not in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args permissive = "--permissive" in args tolerant = "--tolerant" in args print_timing = "--print-timing" in args skip_download = "--skip-download" in args flags = ( "--generate-only", "--fast", "--bench", "--skip-quad", "--ext-quad", "--ext-uflacs", "--permissive", "--tolerant", "--print-timing", "--skip-download", ) args = [arg for arg in args if not arg in flags] # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: failure, output = get_status_output("./scripts/download") print(output) if failure: info_red("Download reference data failed") else: info_green("Download reference data ok") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to # run here test_cases = ["-r auto"] if use_quad and (not bench and not fast): test_cases += ["-r quadrature", "-r quadrature -O"] if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases = ext_uflacs test_cases += ["-r quadrature"] #test_cases += ["-r quadrature -O"] for argument in test_cases: begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Generate test cases generate_test_cases(bench, only_forms) # Generate code generate_code(args + [argument], only_forms) # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if not bench and argument not in ext_quad: validate_code(code_reference_dir) # Build and run programs and validate output to common # reference if fast or generate_only: info("Skipping program validation") elif bench: build_programs(bench, permissive) run_programs(bench) else: build_programs(bench, permissive) run_programs(bench) validate_programs(output_reference_dir) # Go back up os.chdir(os.path.pardir) end() # Print results if print_timing: timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_green("Timing of all commands executed:") info(timings) if logfile is None: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info("Error messages stored in error.log") return 1
def main(args): "Run all regression tests." # Check command-line arguments TODO: Use argparse generate_only = "--generate-only" in args fast = "--fast" in args bench = "--bench" in args use_auto = "--skip-auto" not in args use_quad = "--skip-quad" not in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args permissive = "--permissive" in args tolerant = "--tolerant" in args print_timing = "--print-timing" in args skip_download = "--skip-download" in args ignore_code_diff = "--ignore-code-diff" in args pyop2 = "--pyop2" in args flags = ( "--generate-only", "--fast", "--bench", "--skip-auto", "--skip-quad", "--ext-quad", "--ext-uflacs", "--permissive", "--tolerant", "--print-timing", "--skip-download", "--ignore-code-diff", "--pyop2", ) args = [arg for arg in args if not arg in flags] # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: failure, output = get_status_output("./scripts/download") print(output) if failure: info_red("Download reference data failed") else: info_green("Download reference data ok") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to # run here test_cases = [] if use_auto: test_cases += ["-r auto"] if use_quad and (not bench and not fast): test_cases += ["-r quadrature", "-r quadrature -O"] if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases = ext_uflacs test_cases += ["-r quadrature"] #test_cases += ["-r quadrature -O"] if pyop2: test_cases += ext_pyop2 for argument in test_cases: begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Generate test cases generate_test_cases(bench, only_forms) # Generate code generate_code(args + [argument], only_forms) # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if not bench and (argument not in ext_quad) and not ignore_code_diff: validate_code(code_reference_dir) # Build and run programs and validate output to common # reference if fast or generate_only: info("Skipping program validation") elif bench: if argument in ext_pyop2: build_pyop2_programs(bench, permissive, debug=debug) else: build_ufc_programs(bench, permissive, debug=debug) run_programs(bench) else: if argument in ext_pyop2: build_pyop2_programs(bench, permissive, debug=debug) else: build_ufc_programs(bench, permissive, debug=debug) run_programs(bench) validate_programs(output_reference_dir) # Go back up os.chdir(os.path.pardir) end() # Print results if print_timing: timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_green("Timing of all commands executed:") info(timings) if logfile is None: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info("Error messages stored in error.log") return 1
def build_programs(bench, permissive): "Build test programs for all test cases." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Get Boost dir (code copied from ufc/src/utils/python/ufc_utils/build.py) # Set a default directory for the boost installation if sys.platform == "darwin": # Use Brew as default default = os.path.join(os.path.sep, "usr", "local") else: default = os.path.join(os.path.sep, "usr") # If BOOST_DIR is not set use default directory boost_inc_dir = "" boost_lib_dir = "" boost_math_tr1_lib = "boost_math_tr1" boost_dir = os.getenv("BOOST_DIR", default) boost_is_found = False for inc_dir in ["", "include"]: if os.path.isfile( os.path.join(boost_dir, inc_dir, "boost", "version.hpp")): boost_inc_dir = os.path.join(boost_dir, inc_dir) break for lib_dir in ["", "lib", "lib/x86_64-linux-gnu"]: for ext in [".so", "-mt.so", ".dylib", "-mt.dylib"]: _lib = os.path.join(boost_dir, lib_dir, "lib" + boost_math_tr1_lib + ext) if os.path.isfile(_lib): if "-mt" in _lib: boost_math_tr1_lib += "-mt" boost_lib_dir = os.path.join(boost_dir, lib_dir) break if boost_inc_dir != "" and boost_lib_dir != "": boost_is_found = True if not boost_is_found: raise OSError("""The Boost library was not found. If Boost is installed in a nonstandard location, set the environment variable BOOST_DIR. """) ufc_cflags += " -I%s -L%s" % (boost_inc_dir, boost_lib_dir) # Set compiler options compiler_options = "%s -Wall" % ufc_cflags if not permissive: compiler_options += " -Werror -pedantic" if bench: info("Benchmarking activated") # Takes too long to build with -O2 #compiler_options += " -O2" compiler_options += " -O3" #compiler_options += " -O3 -fno-math-errno -march=native" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = generate_test_code(f) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -l%s" % \ (compiler_options, prefix, prefix, boost_math_tr1_lib) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end()
def compile_ufl_objects(ufl_objects, kind, object_names=None, prefix=None, parameters=None, jit=False): """This function generates UFC code for a given UFL form or list of UFL forms.""" info("Compiling %s %s\n" % (kind, prefix)) # Reset timing cpu_time_0 = time() # Note that jit will always pass validated parameters so # this is only for commandline and direct call from python if not jit: parameters = validate_parameters(parameters) # Check input arguments if not isinstance(ufl_objects, (list, tuple)): ufl_objects = (ufl_objects,) if not ufl_objects: return "", "" if prefix != os.path.basename(prefix): error("Invalid prefix, looks like a full path? prefix='{}'.".format(prefix)) if object_names is None: object_names = {} # Stage 1: analysis cpu_time = time() analysis = analyze_ufl_objects(ufl_objects, kind, parameters) _print_timing(1, time() - cpu_time) # Stage 2: intermediate representation cpu_time = time() ir = compute_ir(analysis, prefix, parameters, jit) _print_timing(2, time() - cpu_time) # Stage 3: optimization cpu_time = time() oir = optimize_ir(ir, parameters) _print_timing(3, time() - cpu_time) # Stage 4: code generation cpu_time = time() code = generate_code(oir, parameters) _print_timing(4, time() - cpu_time) # Stage 4.1: generate wrappers cpu_time = time() wrapper_code = generate_wrapper_code(analysis, prefix, object_names, parameters) _print_timing(4.1, time() - cpu_time) # Stage 5: format code cpu_time = time() code_h, code_c = format_code(code, wrapper_code, prefix, parameters, jit) _print_timing(5, time() - cpu_time) info_green("FFC finished in %g seconds.", time() - cpu_time_0) if jit: # Must use processed elements from analysis here form_datas, unique_elements, element_numbers, unique_coordinate_elements = analysis # Wrap coordinate elements in Mesh object to represent that # we want a ufc::coordinate_mapping not a ufc::finite_element unique_meshes = [ufl.Mesh(element, ufl_id=0) for element in unique_coordinate_elements] # Avoid returning self as dependency for infinite recursion unique_elements = tuple(element for element in unique_elements if element not in ufl_objects) unique_meshes = tuple(mesh for mesh in unique_meshes if mesh not in ufl_objects) # Setup dependencies (these will be jitted before continuing to compile ufl_objects) dependent_ufl_objects = { "element": unique_elements, "coordinate_mapping": unique_meshes, } return code_h, code_c, dependent_ufl_objects else: return code_h, code_c
def validate_programs(reference_dir): "Validate generated programs against references." # Get a list of all files output_files = [f for f in os.listdir(".") if f.endswith(".json")] output_files.sort() begin("Validating generated programs (%d programs found)" % len(output_files)) # Iterate over all files for f in output_files: # Get generated output generated_output = open(f).read() # Get reference output reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_output = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference ok = True old = [ line.split(" = ") for line in reference_output.split("\n") if " = " in line ] new = dict([ line.split(" = ") for line in generated_output.split("\n") if " = " in line ]) header = ( "Output differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) for (key, value) in old: # Check if value is present if not key in new: if ok: log_error("\n" + header + "\n" + len(header) * "-") log_error("%s: missing value in generated code" % key) ok = False continue # Extract float values old_values = array([float(v) for v in value.split(" ")]) new_values = array([float(v) for v in new[key].split(" ")]) # Check that shape is correct if not shape(old_values) == shape(new_values): if ok: log_error("\n" + header + "\n" + len(header) * "-") log_error("%s: shape mismatch" % key) ok = False continue # Check that values match to within tolerance set by 'output_tolerance' diff = max(abs(old_values - new_values)) if diff > output_tolerance or isnan(diff): if ok: log_error("\n" + header + "\n" + len(header) * "-") log_error("%s: values differ, error = %g (tolerance = %g)" % (key, diff, output_tolerance)) log_error(" old = " + " ".join("%.16g" % v for v in old_values)) log_error(" new = " + " ".join("%.16g" % v for v in new_values)) ok = False # Add debugging output to log file debug = "\n".join( [line for line in generated_output.split("\n") if "debug" in line]) if debug: log_error(debug) # Check status if ok: info_green("%s OK" % f) else: info_red("%s differs" % f) # Now check json references fj = f.replace(".out", ".json") # Get generated json output if os.path.exists(fj): generated_json_output = open(fj).read() if "nan" in generated_json_output: info_red( "Found nan in generated json output, replacing with 999 to be able to parse as python dict." ) generated_json_output = generated_json_output.replace( "nan", "999") else: generated_json_output = "{}" # Get reference json output reference_json_file = os.path.join(reference_dir, fj) if os.path.isfile(reference_json_file): reference_json_output = open(reference_json_file).read() else: info_blue("Missing reference for %s" % reference_json_file) reference_json_output = "{}" # Compare json with reference using recursive diff algorithm # TODO: Write to different error file? from recdiff import recdiff, print_recdiff, DiffEqual # Assuming reference is well formed reference_json_output = eval(reference_json_output) try: generated_json_output = eval(generated_json_output) except Exception as e: info_red("Failed to evaluate json output for %s" % fj) log_error(str(e)) generated_json_output = None json_diff = (None if generated_json_output is None else recdiff( generated_json_output, reference_json_output, tolerance=output_tolerance)) json_ok = json_diff == DiffEqual # Check status if json_ok: info_green("%s OK" % fj) else: info_red("%s differs" % fj) log_error( "Json output differs for %s, diff follows (generated first, reference second)" % os.path.join(*reference_json_file.split(os.path.sep)[-3:])) print_recdiff(json_diff, printer=log_error) end()