def generate_test_cases(bench, only_forms): "Generate form files for all test cases." begin("Generating test cases") # Copy form files if bench: form_directory = bench_directory else: form_directory = demo_directory # Make list of form files form_files = [f for f in os.listdir(form_directory) if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() for f in form_files: shutil.copy("%s/%s" % (form_directory, f), ".") info_green("Found %d form files" % len(form_files)) # Generate form files for forms info("Generating form files for extra forms: Not implemented") # Generate form files for elements if not bench: from elements import elements info("Generating form files for extra elements (%d elements)" % len(elements)) for (i, element) in enumerate(elements): open("X_Element%d.ufl" % i, "w").write("element = %s" % element) end()
def analyze_elements(elements, parameters): begin("Compiler stage 1: Analyzing form(s)") # Extract unique elements unique_elements = [] element_numbers = {} for element in elements: # Get all (unique) nested elements. for e in _get_nested_elements(element): # Check if element is present if not e in element_numbers: element_numbers[e] = len(unique_elements) unique_elements.append(e) # Sort elements unique_elements = sort_elements(unique_elements) # Build element map element_numbers = _compute_element_numbers(unique_elements) # Update scheme for QuadratureElements scheme = parameters["quadrature_rule"] if scheme == "auto": scheme = "default" for element in unique_elements: if element.family() == "Quadrature": element._quad_scheme = scheme end() return (), unique_elements, element_numbers
def analyze_forms(forms, object_names, parameters): """ Analyze form(s), returning form_datas - a tuple of form_data objects unique_elements - a tuple of unique elements across all forms element_numbers - a mapping to unique numbers for all elements """ begin("Compiler stage 1: Analyzing form(s)") # Analyze forms form_datas = tuple(_analyze_form(form, object_names, parameters) for form in forms) # Extract unique elements accross all forms unique_elements = [] for form_data in form_datas: for element in form_data.unique_sub_elements: if not element in unique_elements: unique_elements.append(element) # Sort elements unique_elements = sort_elements(unique_elements) # Compute element numbers element_numbers = _compute_element_numbers(unique_elements) end() return form_datas, unique_elements, element_numbers
def analyze_forms(forms, object_names, parameters): """ Analyze form(s), returning form_datas - a tuple of form_data objects unique_elements - a tuple of unique elements across all forms element_numbers - a mapping to unique numbers for all elements """ begin("Compiler stage 1: Analyzing form(s)") # Analyze forms form_datas = tuple( _analyze_form(form, object_names, parameters) for form in forms) # Extract unique elements accross all forms unique_elements = [] for form_data in form_datas: for element in form_data.unique_sub_elements: if not element in unique_elements: unique_elements.append(element) # Sort elements unique_elements = sort_elements(unique_elements) # Compute element numbers element_numbers = _compute_element_numbers(unique_elements) end() return form_datas, unique_elements, element_numbers
def generate_test_cases(bench, only_forms): "Generate form files for all test cases." begin("Generating test cases") # Copy form files if bench: form_directory = bench_directory else: form_directory = demo_directory # Make list of form files form_files = [f for f in os.listdir(form_directory) if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() for f in form_files: shutil.copy(os.path.join(form_directory, f), ".") info_green("Found %d form files" % len(form_files)) # Generate form files for forms info("Generating form files for extra forms: Not implemented") # Generate form files for elements if not bench: from elements import elements info("Generating form files for extra elements (%d elements)" % len(elements)) for (i, element) in enumerate(elements): open("X_Element%d.ufl" % i, "w").write("element = %s" % element) end()
def generate_code(args, only_forms): "Generate code for all test cases." # Get a list of all files form_files = [f for f in os.listdir(".") if f.endswith(".ufl")] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() begin("Generating code (%d form files found)" % len(form_files)) # TODO: Parse additional options from .ufl file? I.e. grep for # some sort of tag like '#ffc: <flags>'. special = { "AdaptivePoisson.ufl": "-e", } # Iterate over all files for f in form_files: options = special.get(f, "") cmd = ("ffc %s %s -f precision=8 -fconvert_exceptions_to_warnings %s" % (options, " ".join(args), f)) # Generate code ok = run_command(cmd) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def run_programs(bench): "Run generated programs." # This matches argument parsing in the generated main files bench = 'b' if bench else '' # Get a list of all files test_programs = [f for f in os.listdir(".") if f.endswith(".bin")] test_programs.sort() begin("Running generated programs (%d programs found)" % len(test_programs)) # Iterate over all files for f in test_programs: # Compile test code prefix = f.split(".bin")[0] try: os.remove(prefix + ".out") except: pass ok = run_command(".%s%s.bin %s > %s.out" % (os.path.sep, prefix, bench, prefix)) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def run_programs(bench): "Run generated programs." # This matches argument parsing in the generated main files bench = 'b' if bench else '' # Get a list of all files test_programs = [f for f in os.listdir(".") if f.endswith(".bin")] test_programs.sort() begin("Running generated programs (%d programs found)" % len(test_programs)) # Iterate over all files for f in test_programs: # Compile test code prefix = f.split(".bin")[0] ok = run_command(".%s%s.bin %s" % (os.path.sep, prefix, bench)) # Check status if ok: info_green("%s OK" % f) else: info_red("%s failed" % f) end()
def generate_code(ir, prefix, parameters): "Generate code from intermediate representation." begin("Compiler stage 4: Generating code") # FIXME: Document option -fconvert_exceptions_to_warnings # FIXME: Remove option epsilon and just rely on precision? # Set code generation parameters # set_float_formatting(int(parameters["precision"])) set_exception_handling(parameters["convert_exceptions_to_warnings"]) # Extract representations ir_elements, ir_dofmaps, ir_integrals, ir_forms = ir # Generate code for elements info("Generating code for %d element(s)" % len(ir_elements)) code_elements = [_generate_element_code(ir, prefix, parameters) for ir in ir_elements] # Generate code for dofmaps info("Generating code for %d dofmap(s)" % len(ir_dofmaps)) code_dofmaps = [_generate_dofmap_code(ir, prefix, parameters) for ir in ir_dofmaps] # Generate code for integrals info("Generating code for integrals") code_integrals = [_generate_integral_code(ir, prefix, parameters) for ir in ir_integrals] # Generate code for forms info("Generating code for forms") code_forms = [_generate_form_code(ir, prefix, parameters) for ir in ir_forms] end() return code_elements, code_dofmaps, code_integrals, code_forms
def compute_ir(analysis, parameters): "Compute intermediate representation." begin("Compiler stage 2: Computing intermediate representation") # Set code generation parameters set_float_formatting(int(parameters["precision"])) # Extract data from analysis form_datas, elements, element_numbers = analysis # Compute representation of elements info("Computing representation of %d elements" % len(elements)) ir_elements = [_compute_element_ir(e, i, element_numbers) \ for (i, e) in enumerate(elements)] # Compute representation of dofmaps info("Computing representation of %d dofmaps" % len(elements)) ir_dofmaps = [_compute_dofmap_ir(e, i, element_numbers) for (i, e) in enumerate(elements)] # Compute and flatten representation of integrals info("Computing representation of integrals") irs = [_compute_integral_ir(fd, i, parameters) \ for (i, fd) in enumerate(form_datas)] ir_integrals = [ir for ir in chain(*irs) if not ir is None] # Compute representation of forms info("Computing representation of forms") ir_forms = [_compute_form_ir(fd, i, element_numbers) \ for (i, fd) in enumerate(form_datas)] end() return ir_elements, ir_dofmaps, ir_integrals, ir_forms
def analyze_forms(forms, parameters): """ Analyze form(s), returning form_datas - a tuple of form_data objects unique_elements - a tuple of unique elements across all forms element_numbers - a mapping to unique numbers for all elements """ begin("Compiler stage 1: Analyzing form(s)") # Analyze forms form_datas = tuple(_analyze_form(form, parameters) for form in forms) # Extract unique elements accross all forms unique_elements = set() for form_data in form_datas: unique_elements.update(form_data.unique_sub_elements) # Sort elements unique_elements = sort_elements(unique_elements) # Compute element numbers element_numbers = _compute_element_numbers(unique_elements) # Extract coordinate elements unique_coordinate_elements = sorted(set(chain(*[form_data.coordinate_elements for form_data in form_datas]))) end() return form_datas, unique_elements, element_numbers, unique_coordinate_elements
def analyze_elements(elements, parameters): begin("Compiler stage 1: Analyzing form(s)") # Extract unique (sub)elements unique_elements = set(extract_sub_elements(elements)) # Sort elements unique_elements = sort_elements(unique_elements) # Build element map element_numbers = _compute_element_numbers(unique_elements) # Update scheme for QuadratureElements scheme = parameters["quadrature_rule"] if scheme == "auto": scheme = "default" for element in unique_elements: if element.family() == "Quadrature": element._quad_scheme = scheme end() form_datas = () unique_coordinate_elements = () return form_datas, unique_elements, element_numbers, unique_coordinate_elements
def validate_programs(reference_dir): "Validate generated programs against references." # Get a list of all files output_files = sorted(f for f in os.listdir(".") if f.endswith(".json")) begin("Validating generated programs (%d .json program output files found)" % len(output_files)) failures = [] # Iterate over all files for fj in output_files: # Get generated json output if os.path.exists(fj): generated_json_output = open(fj).read() if "nan" in generated_json_output: info_red("Found nan in generated json output, replacing with 999 to be able to parse as python dict.") generated_json_output = generated_json_output.replace("nan", "999") else: generated_json_output = "{}" # Get reference json output reference_json_file = os.path.join(reference_dir, fj) if os.path.isfile(reference_json_file): reference_json_output = open(reference_json_file).read() else: info_blue("Missing reference for %s" % reference_json_file) reference_json_output = "{}" # Compare json with reference using recursive diff algorithm # TODO: Write to different error file? from recdiff import recdiff, print_recdiff, DiffEqual # Assuming reference is well formed reference_json_output = eval(reference_json_output) try: generated_json_output = eval(generated_json_output) except Exception as e: info_red("Failed to evaluate json output for %s" % fj) log_error(str(e)) generated_json_output = None json_diff = (None if generated_json_output is None else recdiff(generated_json_output, reference_json_output, tolerance=output_tolerance)) json_ok = json_diff == DiffEqual # Check status if json_ok: info_green("%s OK" % fj) else: info_red("%s differs" % fj) log_error("Json output differs for %s, diff follows (generated first, reference second)" % os.path.join(*reference_json_file.split(os.path.sep)[-3:])) print_recdiff(json_diff, printer=log_error) failures.append(fj) end() return failures
def generate_code(args, only_forms, skip_forms, debug): "Generate code for all test cases." global _command_timings # Get a list of all files form_files = [f for f in os.listdir(".") if f.endswith(".ufl") and f not in skip_forms] if only_forms: form_files = [f for f in form_files if f in only_forms] form_files.sort() begin("Generating code (%d form files found)" % len(form_files)) # TODO: Parse additional options from .ufl file? I.e. grep for # some sort of tag like '#ffc: <flags>'. special = {"AdaptivePoisson.ufl": "-e", } failures = [] # Iterate over all files for f in form_files: options = [special.get(f, "")] options.extend(args) options.extend(["-f", "precision=8", "-f", "epsilon=1e-7", "-fconvert_exceptions_to_warnings"]) options.append(f) options = list(filter(None, options)) cmd = sys.executable + " -m ffc " + " ".join(options) # Generate code t1 = time.time() try: ok = ffc.main(options) except Exception as e: if debug: raise e msg = traceback.format_exc() log_error(cmd) log_error(msg) ok = 1 finally: t2 = time.time() _command_timings.append((cmd, t2 - t1)) # Check status if ok == 0: info_green("%s OK" % f) else: info_red("%s failed" % f) failures.append(f) end() return failures
def compute_ir(analysis, prefix, parameters, object_names=None): "Compute intermediate representation." begin("Compiler stage 2: Computing intermediate representation") # Set code generation parameters set_float_formatting(int(parameters["precision"])) # Extract data from analysis form_datas, elements, element_numbers, coordinate_elements = analysis # Compute representation of elements if not parameters["format"] == "pyop2": info("Computing representation of %d elements" % len(elements)) ir_elements = [_compute_element_ir(e, prefix, element_numbers) for e in elements] else: ir_elements = [None] # Compute representation of dofmaps if not parameters["format"] == "pyop2": info("Computing representation of %d dofmaps" % len(elements)) ir_dofmaps = [_compute_dofmap_ir(e, prefix, element_numbers) for e in elements] # Compute representation of coordinate mappings info("Computing representation of %d coordinate mappings" % len(coordinate_elements)) ir_compute_coordinate_mappings = [_compute_coordinate_mapping_ir(e, prefix, element_numbers) for e in coordinate_elements] else: ir_dofmaps = [None] ir_compute_coordinate_mappings = [None] # Compute and flatten representation of integrals info("Computing representation of integrals") irs = [_compute_integral_ir(fd, i, prefix, element_numbers, parameters, object_names=object_names) for (i, fd) in enumerate(form_datas)] ir_integrals = [ir for ir in chain(*irs) if not ir is None] # Compute representation of forms if not parameters["format"] == "pyop2": info("Computing representation of forms") ir_forms = [_compute_form_ir(fd, i, prefix, element_numbers) for (i, fd) in enumerate(form_datas)] else: ir_forms = [None] end() return ir_elements, ir_dofmaps, ir_compute_coordinate_mappings, ir_integrals, ir_forms
def _generate_dolfin_wrapper(analysis, prefix, object_names, parameters): begin("Compiler stage 4.1: Generating additional wrapper code") # Encapsulate data (capsules, common_space) = _encapsulate(prefix, object_names, analysis, parameters) # Generate code info("Generating wrapper code for DOLFIN") code = generate_dolfin_code(prefix, "", capsules, common_space, error_control=parameters["error_control"]) code += "\n\n" end() return code
def generate_code(ir, parameters): "Generate code from intermediate representation." begin("Compiler stage 4: Generating code") full_ir = ir # FIXME: This has global side effects # Set code generation parameters # set_float_formatting(parameters["precision"]) # set_exception_handling(parameters["convert_exceptions_to_warnings"]) # Extract representations ir_finite_elements, ir_dofmaps, ir_coordinate_mappings, ir_integrals, ir_forms = ir # Generate code for finite_elements info("Generating code for %d finite_element(s)" % len(ir_finite_elements)) code_finite_elements = [_generate_finite_element_code(ir, parameters) for ir in ir_finite_elements] # Generate code for dofmaps info("Generating code for %d dofmap(s)" % len(ir_dofmaps)) code_dofmaps = [_generate_dofmap_code(ir, parameters) for ir in ir_dofmaps] # Generate code for coordinate_mappings info("Generating code for %d coordinate_mapping(s)" % len(ir_coordinate_mappings)) code_coordinate_mappings = [_generate_coordinate_mapping_code(ir, parameters) for ir in ir_coordinate_mappings] # Generate code for integrals info("Generating code for integrals") code_integrals = [_generate_integral_code(ir, parameters) for ir in ir_integrals] # Generate code for forms info("Generating code for forms") code_forms = [_generate_form_code(ir, parameters) for ir in ir_forms] # Extract additional includes includes = _extract_includes(full_ir, code_integrals) end() return (code_finite_elements, code_dofmaps, code_coordinate_mappings, code_integrals, code_forms, includes)
def validate_code(reference_dir): "Validate generated code against references." # Get a list of all files header_files = sorted([f for f in os.listdir(".") if f.endswith(".h")]) begin("Validating generated code (%d header files found)" % len(header_files)) failures = [] # Iterate over all files for f in header_files: # Get generated code generated_code = open(f).read() # Get reference code reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_code = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference if generated_code == reference_code: info_green("%s OK" % f) else: info_red("%s differs" % f) difflines = difflib.unified_diff( reference_code.split("\n"), generated_code.split("\n")) diff = "\n".join(difflines) s = ("Code differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) log_error("\n" + s + "\n" + len(s) * "-") log_error(diff) failures.append(f) end() return failures
def optimize_ir(ir, parameters): "Optimize intermediate form representation." begin("Compiler stage 3: Optimizing intermediate representation") # Check if optimization is requested if not parameters["optimize"]: info("Skipping optimizations, add -O to optimize") end() return ir # Extract representations ir_elements, ir_dofmaps, ir_integrals, ir_forms = ir # Iterate over integrals oir_integrals = [_optimize_integral_ir(ir, parameters) for ir in ir_integrals] end() return ir_elements, ir_dofmaps, oir_integrals, ir_forms
def optimize_ir(ir, parameters): "Optimize intermediate form representation." begin("Compiler stage 3: Optimizing intermediate representation") # Extract representations ir_elements, ir_dofmaps, ir_coordinate_mappings, ir_integrals, ir_forms = ir # Check if optimization is requested if not any(ir["integrals_metadata"]["optimize"] for ir in ir_integrals): info(r"Skipping optimizations, add -O or attach {'optimize': True} " "metadata to integrals") # Call on every bunch of integrals wich are compiled together oir_integrals = [_optimize_integral_ir(ir, parameters) if ir["integrals_metadata"]["optimize"] else ir for ir in ir_integrals] end() return ir_elements, ir_dofmaps, ir_coordinate_mappings, oir_integrals, ir_forms
def validate_code(reference_dir): "Validate generated code against references." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Validating generated code (%d header files found)" % len(header_files)) # Iterate over all files for f in header_files: # Get generated code generated_code = open(f).read() # Get reference code reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_code = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference if generated_code == reference_code: info_green("%s OK" % f) else: info_red("%s differs" % f) diff = "\n".join([ line for line in difflib.unified_diff( reference_code.split("\n"), generated_code.split("\n")) ]) s = ( "Code differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) log_error("\n" + s + "\n" + len(s) * "-") log_error(diff) end()
def optimize_ir(ir, parameters): "Optimize intermediate form representation." begin("Compiler stage 3: Optimizing intermediate representation") # Extract representations ir_elements, ir_dofmaps, ir_coordinate_mappings, ir_integrals, ir_forms = ir # Check if optimization is requested if not any(ir["integrals_metadata"]["optimize"] for ir in ir_integrals): info(r"Skipping optimizations, add -O or attach {'optimize': True} " "metadata to integrals") # Call on every bunch of integrals wich are compiled together oir_integrals = [ _optimize_integral_ir(ir, parameters) if ir["integrals_metadata"]["optimize"] else ir for ir in ir_integrals ] end() return ir_elements, ir_dofmaps, ir_coordinate_mappings, oir_integrals, ir_forms
def optimize_ir(ir, parameters): "Optimize intermediate form representation." begin("Compiler stage 3: Optimizing intermediate representation") # Check if optimization is requested if not parameters["optimize"]: info("Skipping optimizations, add -O to optimize") end() return ir # Extract representations ir_elements, ir_dofmaps, ir_coordinate_mappings, ir_integrals, ir_forms = ir # Iterate over integrals oir_integrals = [ _optimize_integral_ir(ir, parameters) for ir in ir_integrals ] end() return ir_elements, ir_dofmaps, ir_coordinate_mappings, oir_integrals, ir_forms
def build_pyop2_programs(bench, permissive, debug=False): # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Set compiler options if not permissive: compiler_options = " -Werror" if bench > 0: info("Benchmarking activated") compiler_options = "-Wall -Werror" if debug: info("Debugging activated") compiler_options = "-Wall -Werror -g" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = _generate_test_code(f, bench) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -lboost_math_tr1" % (compiler_options, prefix, prefix) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end()
def compute_ir(analysis, parameters): "Compute intermediate representation." begin("Compiler stage 2: Computing intermediate representation") # Set code generation parameters set_float_formatting(int(parameters["precision"])) # Extract data from analysis form_datas, elements, element_numbers = analysis # Compute representation of elements info("Computing representation of %d elements" % len(elements)) ir_elements = [_compute_element_ir(e, i, element_numbers) \ for (i, e) in enumerate(elements)] # Compute representation of dofmaps info("Computing representation of %d dofmaps" % len(elements)) ir_dofmaps = [ _compute_dofmap_ir(e, i, element_numbers) for (i, e) in enumerate(elements) ] # Compute and flatten representation of integrals info("Computing representation of integrals") irs = [_compute_integral_ir(fd, i, parameters) \ for (i, fd) in enumerate(form_datas)] ir_integrals = [ir for ir in chain(*irs) if not ir is None] # Compute representation of forms info("Computing representation of forms") ir_forms = [_compute_form_ir(fd, i, element_numbers) \ for (i, fd) in enumerate(form_datas)] end() return ir_elements, ir_dofmaps, ir_integrals, ir_forms
def format_code(code, wrapper_code, prefix, parameters): "Format given code in UFC format." begin("Compiler stage 5: Formatting code") # Extract code code_elements, code_dofmaps, code_integrals, code_forms = code # Header and implementation code code_h = "" code_c = "" # Generate code for comment on top of file code_h += _generate_comment(parameters) + "\n" code_c += _generate_comment(parameters) + "\n" # Generate code for header code_h += format["header_h"] % {"prefix_upper": prefix.upper()} code_h += _generate_additional_includes(code_integrals) + "\n" code_c += format["header_c"] % {"prefix": prefix} # Generate code for elements if code_elements: for code_element in code_elements: code_h += _format_h("finite_element", code_element, parameters) code_c += _format_c("finite_element", code_element, parameters) # Generate code for dofmaps if code_dofmaps: for code_dofmap in code_dofmaps: code_h += _format_h("dofmap", code_dofmap, parameters) code_c += _format_c("dofmap", code_dofmap, parameters) # Generate code for integrals if code_integrals: for code_integral in code_integrals: classname = code_integral["classname"] if "cell_integral" in classname: code_h += _format_h("cell_integral", code_integral, parameters) code_c += _format_c("cell_integral", code_integral, parameters) elif "exterior_facet_integral" in classname: code_h += _format_h("exterior_facet_integral", code_integral, parameters) code_c += _format_c("exterior_facet_integral", code_integral, parameters) elif "interior_facet_integral" in classname: code_h += _format_h("interior_facet_integral", code_integral, parameters) code_c += _format_c("interior_facet_integral", code_integral, parameters) elif "point_integral" in classname: code_h += _format_h("point_integral", code_integral, parameters) code_c += _format_c("point_integral", code_integral, parameters) elif "custom_integral" in classname: code_h += _format_h("custom_integral", code_integral, parameters) code_c += _format_c("custom_integral", code_integral, parameters) else: error("Unable to figure out base class for %s" % classname) # Generate code for form if code_forms: for code_form in code_forms: code_h += _format_h("form", code_form, parameters) code_c += _format_c("form", code_form, parameters) # Add wrappers if wrapper_code: code_h += wrapper_code # Generate code for footer code_h += format["footer"] # Write file(s) if parameters["split"]: _write_file(code_h, prefix, ".h", parameters) _write_file(code_c, prefix, ".cpp", parameters) else: _write_file(code_h, prefix, ".h", parameters) end()
def main(args): "Run all regression tests." # Check command-line arguments TODO: Use argparse only_auto = "--only-auto" in args use_auto = "--skip-auto" not in args use_uflacs = "--skip-uflacs" not in args use_quad = "--skip-quad" not in args use_tsfc = "--use-tsfc" in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args skip_download = "--skip-download" in args skip_run = "--skip-run" in args skip_code_diff = "--skip-code-diff" in args skip_validate = "--skip-validate" in args bench = "--bench" in args debug = "--debug" in args verbose = ("--verbose" in args) or debug # debug implies verbose permissive = "--permissive" in args or bench tolerant = "--tolerant" in args print_timing = "--print-timing" in args show_help = "--help" in args flags = ( "--only-auto", "--skip-auto", "--skip-uflacs", "--skip-quad", "--use-tsfc", "--ext-quad", "--skip-download", "--skip-run", "--skip-code-diff", "--skip-validate", "--bench", "--debug", "--verbose", "--permissive", "--tolerant", "--print-timing", "--help", ) args = [arg for arg in args if arg not in flags] # Hack: add back --verbose for ffc.main to see if verbose: args = args + ["--verbose"] if show_help: info("Valid arguments:\n" + "\n".join(flags)) return 0 if bench or not skip_validate: skip_run = False if bench: skip_code_diff = True skip_validate = True if use_ext_quad or use_ext_uflacs: skip_code_diff = True # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: try: cmd = "./scripts/download" output = as_native_str(subprocess.check_output(cmd, shell=True)) print(output) info_green("Download reference data ok") except subprocess.CalledProcessError as e: print(e.output) info_red("Download reference data failed") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to run here test_cases = [] if only_auto: test_cases += ["-r auto"] else: if use_auto: test_cases += ["-r auto"] if use_uflacs: test_cases += ["-r uflacs -O0", "-r uflacs -O"] if use_quad: test_cases += ["-r quadrature -O0", "-r quadrature -O"] import warnings from ffc.quadrature.deprecation import QuadratureRepresentationDeprecationWarning warnings.simplefilter("once", QuadratureRepresentationDeprecationWarning) if use_tsfc: test_cases += ["-r tsfc -O0", "-r tsfc -O"] # Silence good-performance messages by COFFEE import coffee coffee.set_log_level(coffee.logger.PERF_WARN) if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases += ext_uflacs test_case_timings = {} fails = OrderedDict() for argument in test_cases: test_case_timings[argument] = time.time() fails[argument] = OrderedDict() begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Workarounds for feature lack in representation if "quadrature" in argument and not only_forms: skip_forms = known_quad_failures info_blue("Skipping forms known to fail with quadrature:\n" + "\n".join(sorted(skip_forms))) elif "uflacs" in argument and not only_forms: skip_forms = known_uflacs_failures info_blue("Skipping forms known to fail with uflacs:\n" + "\n".join(sorted(skip_forms))) elif "tsfc" in argument and not only_forms: skip_forms = known_tsfc_failures info_blue("Skipping forms known to fail with tsfc:\n" + "\n".join(sorted(skip_forms))) else: skip_forms = set() # Generate test cases generate_test_cases(bench, only_forms, skip_forms) # Generate code failures = generate_code(args + argument.split(), only_forms, skip_forms, debug) if failures: fails[argument]["generate_code"] = failures # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if skip_code_diff: info_blue("Skipping code diff validation") else: failures = validate_code(code_reference_dir) if failures: fails[argument]["validate_code"] = failures # Build and run programs and validate output to common # reference if skip_run: info_blue("Skipping program execution") else: failures = build_programs(bench, permissive, debug, verbose) if failures: fails[argument]["build_programs"] = failures failures = run_programs(bench, debug, verbose) if failures: fails[argument]["run_programs"] = failures # Validate output to common reference results if skip_validate: info_blue("Skipping program output validation") else: failures = validate_programs(output_reference_dir) if failures: fails[argument]["validate_programs"] = failures # Go back up os.chdir(os.path.pardir) end() test_case_timings[argument] = time.time() - test_case_timings[argument] # Go back up os.chdir(os.path.pardir) # Print results if print_timing: info_green("Timing of all commands executed:") timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_blue(timings) for argument in test_cases: info_blue("Total time for %s: %.1f s" % (argument, test_case_timings[argument])) num_failures = sum(len(failures_phase) for failures_args in fails.values() for failures_phase in failures_args.values()) if num_failures == 0: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info_red("") info_red("Long summary:") for argument in test_cases: if not fails[argument]: info_green(" No failures with args '%s'" % argument) else: info_red(" Failures with args '%s':" % argument) for phase, failures in fails[argument].items(): info_red(" %d failures in %s:" % (len(failures), phase)) for f in failures: info_red(" %s" % (f,)) info_red("") info_red("Short summary:") phase_fails = defaultdict(int) for argument in test_cases: if not fails[argument]: info_green(" No failures with args '%s'" % argument) else: info_red(" Number of failures with args '%s':" % argument) for phase, failures in fails[argument].items(): info_red(" %d failures in %s." % (len(failures), phase)) phase_fails[phase] += len(failures) info_red("") info_red("Total failures for all args:") for phase, count in phase_fails.items(): info_red(" %s: %d failed" % (phase, count)) info_red("") info_red("Error messages stored in %s" % logfile) return 1
def main(args): "Run all regression tests." # Check command-line arguments TODO: Use argparse generate_only = "--generate-only" in args fast = "--fast" in args bench = "--bench" in args use_auto = "--skip-auto" not in args use_quad = "--skip-quad" not in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args permissive = "--permissive" in args tolerant = "--tolerant" in args print_timing = "--print-timing" in args skip_download = "--skip-download" in args ignore_code_diff = "--ignore-code-diff" in args pyop2 = "--pyop2" in args flags = ( "--generate-only", "--fast", "--bench", "--skip-auto", "--skip-quad", "--ext-quad", "--ext-uflacs", "--permissive", "--tolerant", "--print-timing", "--skip-download", "--ignore-code-diff", "--pyop2", ) args = [arg for arg in args if not arg in flags] # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: failure, output = get_status_output("./scripts/download") print(output) if failure: info_red("Download reference data failed") else: info_green("Download reference data ok") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to # run here test_cases = [] if use_auto: test_cases += ["-r auto"] if use_quad and (not bench and not fast): test_cases += ["-r quadrature", "-r quadrature -O"] if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases = ext_uflacs test_cases += ["-r quadrature"] #test_cases += ["-r quadrature -O"] if pyop2: test_cases += ext_pyop2 for argument in test_cases: begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Generate test cases generate_test_cases(bench, only_forms) # Generate code generate_code(args + [argument], only_forms) # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if not bench and (argument not in ext_quad) and not ignore_code_diff: validate_code(code_reference_dir) # Build and run programs and validate output to common # reference if fast or generate_only: info("Skipping program validation") elif bench: if argument in ext_pyop2: build_pyop2_programs(bench, permissive, debug=debug) else: build_ufc_programs(bench, permissive, debug=debug) run_programs(bench) else: if argument in ext_pyop2: build_pyop2_programs(bench, permissive, debug=debug) else: build_ufc_programs(bench, permissive, debug=debug) run_programs(bench) validate_programs(output_reference_dir) # Go back up os.chdir(os.path.pardir) end() # Print results if print_timing: timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_green("Timing of all commands executed:") info(timings) if logfile is None: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info("Error messages stored in error.log") return 1
def build_programs(bench, permissive): "Build test programs for all test cases." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Get Boost dir (code copied from ufc/src/utils/python/ufc_utils/build.py) # Set a default directory for the boost installation if sys.platform == "darwin": # Use Brew as default default = os.path.join(os.path.sep, "usr", "local") else: default = os.path.join(os.path.sep, "usr") # If BOOST_DIR is not set use default directory boost_inc_dir = "" boost_lib_dir = "" boost_math_tr1_lib = "boost_math_tr1" boost_dir = os.getenv("BOOST_DIR", default) boost_is_found = False for inc_dir in ["", "include"]: if os.path.isfile(os.path.join(boost_dir, inc_dir, "boost", "version.hpp")): boost_inc_dir = os.path.join(boost_dir, inc_dir) break libdir_multiarch = "lib/" + sysconfig.get_config_vars().get("MULTIARCH", "") for lib_dir in ["", "lib", libdir_multiarch, "lib64"]: for ext in [".so", "-mt.so", ".dylib", "-mt.dylib"]: _lib = os.path.join(boost_dir, lib_dir, "lib" + boost_math_tr1_lib + ext) if os.path.isfile(_lib): if "-mt" in _lib: boost_math_tr1_lib += "-mt" boost_lib_dir = os.path.join(boost_dir, lib_dir) break if boost_inc_dir != "" and boost_lib_dir != "": boost_is_found = True if not boost_is_found: raise OSError("""The Boost library was not found. If Boost is installed in a nonstandard location, set the environment variable BOOST_DIR. """) ufc_cflags += " -I%s -L%s" % (boost_inc_dir, boost_lib_dir) # Set compiler options compiler_options = "%s -Wall " % ufc_cflags if not permissive: compiler_options += " -Werror -pedantic" if bench: info("Benchmarking activated") # Takes too long to build with -O2 #compiler_options += " -O2" compiler_options += " -O3" #compiler_options += " -O3 -fno-math-errno -march=native" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = generate_test_code(f) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -l%s" % \ (compiler_options, prefix, prefix, boost_math_tr1_lib) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end()
def build_programs(bench, permissive): "Build test programs for all test cases." # Get a list of all files header_files = [f for f in os.listdir(".") if f.endswith(".h")] header_files.sort() begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = get_status_output("pkg-config --cflags ufc-1")[1].strip() # Get Boost dir (code copied from ufc/src/utils/python/ufc_utils/build.py) # Set a default directory for the boost installation if sys.platform == "darwin": # Use Brew as default default = os.path.join(os.path.sep, "usr", "local") else: default = os.path.join(os.path.sep, "usr") # If BOOST_DIR is not set use default directory boost_inc_dir = "" boost_lib_dir = "" boost_math_tr1_lib = "boost_math_tr1" boost_dir = os.getenv("BOOST_DIR", default) boost_is_found = False for inc_dir in ["", "include"]: if os.path.isfile( os.path.join(boost_dir, inc_dir, "boost", "version.hpp")): boost_inc_dir = os.path.join(boost_dir, inc_dir) break for lib_dir in ["", "lib", "lib/x86_64-linux-gnu"]: for ext in [".so", "-mt.so", ".dylib", "-mt.dylib"]: _lib = os.path.join(boost_dir, lib_dir, "lib" + boost_math_tr1_lib + ext) if os.path.isfile(_lib): if "-mt" in _lib: boost_math_tr1_lib += "-mt" boost_lib_dir = os.path.join(boost_dir, lib_dir) break if boost_inc_dir != "" and boost_lib_dir != "": boost_is_found = True if not boost_is_found: raise OSError("""The Boost library was not found. If Boost is installed in a nonstandard location, set the environment variable BOOST_DIR. """) ufc_cflags += " -I%s -L%s" % (boost_inc_dir, boost_lib_dir) # Set compiler options compiler_options = "%s -Wall" % ufc_cflags if not permissive: compiler_options += " -Werror -pedantic" if bench: info("Benchmarking activated") # Takes too long to build with -O2 #compiler_options += " -O2" compiler_options += " -O3" #compiler_options += " -O3 -fno-math-errno -march=native" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) # Iterate over all files for f in header_files: # Generate test code filename = generate_test_code(f) # Compile test code prefix = f.split(".h")[0] command = "g++ %s -o %s.bin %s.cpp -l%s" % \ (compiler_options, prefix, prefix, boost_math_tr1_lib) ok = run_command(command) # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) end()
def analyze_ufl_objects(ufl_objects, kind, parameters): """ Analyze ufl object(s), either forms, elements, or coordinate mappings, returning: form_datas - a tuple of form_data objects unique_elements - a tuple of unique elements across all forms element_numbers - a mapping to unique numbers for all elements """ begin("Compiler stage 1: Analyzing %s(s)" % (kind,)) form_datas = () unique_elements = set() unique_coordinate_elements = set() if kind == "form": forms = ufl_objects # Analyze forms form_datas = tuple(_analyze_form(form, parameters) for form in forms) # Extract unique elements accross all forms for form_data in form_datas: unique_elements.update(form_data.unique_sub_elements) # Extract coordinate elements across all forms for form_data in form_datas: unique_coordinate_elements.update(form_data.coordinate_elements) elif kind == "element": elements = ufl_objects # Extract unique (sub)elements unique_elements.update(extract_sub_elements(elements)) elif kind == "coordinate_mapping": meshes = ufl_objects # Extract unique (sub)elements unique_coordinate_elements = [mesh.ufl_coordinate_element() for mesh in meshes] # Make sure coordinate elements and their subelements are included unique_elements.update(extract_sub_elements(unique_coordinate_elements)) # Sort elements unique_elements = sort_elements(unique_elements) #unique_coordinate_elements = sort_elements(unique_coordinate_elements) unique_coordinate_elements = sorted(unique_coordinate_elements, key=lambda x: repr(x)) # Check for schemes for QuadratureElements for element in unique_elements: if element.family() == "Quadrature": qs = element.quadrature_scheme() if qs is None: error("Missing quad_scheme in quadrature element.") # Compute element numbers element_numbers = _compute_element_numbers(unique_elements) end() return form_datas, unique_elements, element_numbers, unique_coordinate_elements
def format_code(code, wrapper_code, prefix, parameters, jit=False): "Format given code in UFC format. Returns two strings with header and source file contents." begin("Compiler stage 5: Formatting code") # Extract code (code_finite_elements, code_dofmaps, code_coordinate_mappings, code_integrals, code_forms, includes) = code # Generate code for comment on top of file code_h_pre = _generate_comment(parameters) + "\n" code_c_pre = _generate_comment(parameters) + "\n" # Generate code for header code_h_pre += format_template["header_h"] % {"prefix_upper": prefix.upper()} code_c_pre += format_template["header_c"] % {"prefix": prefix} # Add includes includes_h, includes_c = _generate_includes(includes, parameters) code_h_pre += includes_h code_c_pre += includes_c # Header and implementation code code_h = "" code_c = "" if jit: code_c += visibility_snippet # Generate code for finite_elements for code_finite_element in code_finite_elements: code_h += _format_h("finite_element", code_finite_element, parameters, jit) code_c += _format_c("finite_element", code_finite_element, parameters, jit) # Generate code for dofmaps for code_dofmap in code_dofmaps: code_h += _format_h("dofmap", code_dofmap, parameters, jit) code_c += _format_c("dofmap", code_dofmap, parameters, jit) # Generate code for coordinate_mappings for code_coordinate_mapping in code_coordinate_mappings: code_h += _format_h("coordinate_mapping", code_coordinate_mapping, parameters, jit) code_c += _format_c("coordinate_mapping", code_coordinate_mapping, parameters, jit) # Generate code for integrals for code_integral in code_integrals: code_h += _format_h(code_integral["class_type"], code_integral, parameters, jit) code_c += _format_c(code_integral["class_type"], code_integral, parameters, jit) # Generate code for form for code_form in code_forms: code_h += _format_h("form", code_form, parameters, jit) code_c += _format_c("form", code_form, parameters, jit) # Add wrappers if wrapper_code: code_h += wrapper_code # Generate code for footer code_h += format_template["footer"] # Add headers to body code_h = code_h_pre + code_h if code_c: code_c = code_c_pre + code_c end() return code_h, code_c
def main(args): "Run all regression tests." # Check command-line arguments TODO: Use getargs or something generate_only = "--generate-only" in args fast = "--fast" in args bench = "--bench" in args use_quad = "--skip-quad" not in args use_ext_quad = "--ext-quad" in args use_ext_uflacs = "--ext-uflacs" in args permissive = "--permissive" in args tolerant = "--tolerant" in args print_timing = "--print-timing" in args skip_download = "--skip-download" in args flags = ( "--generate-only", "--fast", "--bench", "--skip-quad", "--ext-quad", "--ext-uflacs", "--permissive", "--tolerant", "--print-timing", "--skip-download", ) args = [arg for arg in args if not arg in flags] # Extract .ufl names from args only_forms = set([arg for arg in args if arg.endswith(".ufl")]) args = [arg for arg in args if arg not in only_forms] # Download reference data if skip_download: info_blue("Skipping reference data download") else: failure, output = get_status_output("./scripts/download") print(output) if failure: info_red("Download reference data failed") else: info_green("Download reference data ok") if tolerant: global output_tolerance output_tolerance = 1e-3 # Clean out old output directory output_directory = "output" clean_output(output_directory) os.chdir(output_directory) # Adjust which test cases (combinations of compile arguments) to # run here test_cases = ["-r auto"] if use_quad and (not bench and not fast): test_cases += ["-r quadrature", "-r quadrature -O"] if use_ext_quad: test_cases += ext_quad if use_ext_uflacs: test_cases = ext_uflacs test_cases += ["-r quadrature"] #test_cases += ["-r quadrature -O"] for argument in test_cases: begin("Running regression tests with %s" % argument) # Clear and enter output sub-directory sub_directory = "_".join(argument.split(" ")).replace("-", "") clean_output(sub_directory) os.chdir(sub_directory) # Generate test cases generate_test_cases(bench, only_forms) # Generate code generate_code(args + [argument], only_forms) # Location of reference directories reference_directory = os.path.abspath("../../ffc-reference-data/") code_reference_dir = os.path.join(reference_directory, sub_directory) # Note: We use the r_auto references for all test cases. This # ensures that we continously test that the codes generated by # all different representations are equivalent. output_reference_dir = os.path.join(reference_directory, "r_auto") # Validate code by comparing to code generated with this set # of compiler parameters if not bench and argument not in ext_quad: validate_code(code_reference_dir) # Build and run programs and validate output to common # reference if fast or generate_only: info("Skipping program validation") elif bench: build_programs(bench, permissive) run_programs(bench) else: build_programs(bench, permissive) run_programs(bench) validate_programs(output_reference_dir) # Go back up os.chdir(os.path.pardir) end() # Print results if print_timing: timings = '\n'.join("%10.2e s %s" % (t, name) for (name, t) in _command_timings) info_green("Timing of all commands executed:") info(timings) if logfile is None: info_green("Regression tests OK") return 0 else: info_red("Regression tests failed") info("Error messages stored in error.log") return 1
def analyze_ufl_objects(ufl_objects, kind, parameters): """ Analyze ufl object(s), either forms, elements, or coordinate mappings, returning: form_datas - a tuple of form_data objects unique_elements - a tuple of unique elements across all forms element_numbers - a mapping to unique numbers for all elements """ begin("Compiler stage 1: Analyzing %s(s)" % (kind, )) form_datas = () unique_elements = set() unique_coordinate_elements = set() if kind == "form": forms = ufl_objects # Analyze forms form_datas = tuple(_analyze_form(form, parameters) for form in forms) # Extract unique elements accross all forms for form_data in form_datas: unique_elements.update(form_data.unique_sub_elements) # Extract coordinate elements across all forms for form_data in form_datas: unique_coordinate_elements.update(form_data.coordinate_elements) elif kind == "element": elements = ufl_objects # Extract unique (sub)elements unique_elements.update(extract_sub_elements(elements)) elif kind == "coordinate_mapping": meshes = ufl_objects # Extract unique (sub)elements unique_coordinate_elements = [ mesh.ufl_coordinate_element() for mesh in meshes ] # Make sure coordinate elements and their subelements are included unique_elements.update(extract_sub_elements(unique_coordinate_elements)) # Sort elements unique_elements = sort_elements(unique_elements) #unique_coordinate_elements = sort_elements(unique_coordinate_elements) unique_coordinate_elements = sorted(unique_coordinate_elements, key=lambda x: repr(x)) # Check for schemes for QuadratureElements for element in unique_elements: if element.family() == "Quadrature": qs = element.quadrature_scheme() if qs is None: error("Missing quad_scheme in quadrature element.") # Compute element numbers element_numbers = _compute_element_numbers(unique_elements) end() return form_datas, unique_elements, element_numbers, unique_coordinate_elements
def compute_ir(analysis, prefix, parameters, jit=False): "Compute intermediate representation." begin("Compiler stage 2: Computing intermediate representation") # Set code generation parameters (this is not actually a 'formatting' # parameter, used for table value clamping as well) # FIXME: Global state?! # set_float_formatting(parameters["precision"]) # Extract data from analysis form_datas, elements, element_numbers, coordinate_elements = analysis # Construct classnames for all element objects and coordinate mappings classnames = make_all_element_classnames(prefix, elements, coordinate_elements, element_numbers, parameters, jit) # Skip processing elements if jitting forms # NB! it's important that this happens _after_ the element numbers and classnames # above have been created. if jit and form_datas: # While we may get multiple forms during command line action, # not so during jit assert len(form_datas) == 1, "Expecting only one form data instance during jit." # Drop some processing elements = [] coordinate_elements = [] elif jit and coordinate_elements: # While we may get multiple coordinate elements during command # line action, or during form jit, not so during coordinate # mapping jit assert len(coordinate_elements) == 1, "Expecting only one form data instance during jit." # Drop some processing elements = [] elif jit and elements: # Assuming a topological sorting of the elements, # only process the last (main) element from here on elements = [elements[-1]] # Compute representation of elements info("Computing representation of %d elements" % len(elements)) ir_elements = [_compute_element_ir(e, element_numbers, classnames, parameters, jit) for e in elements] # Compute representation of dofmaps info("Computing representation of %d dofmaps" % len(elements)) ir_dofmaps = [_compute_dofmap_ir(e, element_numbers, classnames, parameters, jit) for e in elements] # Compute representation of coordinate mappings info("Computing representation of %d coordinate mappings" % len(coordinate_elements)) ir_coordinate_mappings = [_compute_coordinate_mapping_ir(e, element_numbers, classnames, parameters, jit) for e in coordinate_elements] # Compute and flatten representation of integrals info("Computing representation of integrals") irs = [_compute_integral_ir(fd, form_id, prefix, element_numbers, classnames, parameters, jit) for (form_id, fd) in enumerate(form_datas)] ir_integrals = list(chain(*irs)) # Compute representation of forms info("Computing representation of forms") ir_forms = [_compute_form_ir(fd, form_id, prefix, element_numbers, classnames, parameters, jit) for (form_id, fd) in enumerate(form_datas)] end() return ir_elements, ir_dofmaps, ir_coordinate_mappings, ir_integrals, ir_forms
def build_programs(bench, permissive, debug, verbose): "Build test programs for all test cases." # Get a list of all files header_files = sorted([f for f in os.listdir(".") if f.endswith(".h")]) begin("Building test programs (%d header files found)" % len(header_files)) # Get UFC flags ufc_cflags = "-I" + get_ufc_include() + " " + " ".join(get_ufc_cxx_flags()) # Get boost flags boost_cflags, boost_linkflags = find_boost_cflags() # Get compiler compiler = os.getenv("CXX", "g++") # Set compiler options compiler_options = " -Wall" if not permissive: compiler_options += " -Werror -pedantic" # Always need ufc compiler_options += " " + ufc_cflags if bench: info("Benchmarking activated") compiler_options += " -O3 -march=native" # Workaround for gcc bug: gcc is too eager to report array-bounds warning with -O3 compiler_options += " -Wno-array-bounds" if debug: info("Debugging activated") compiler_options += " -g -O0" info("Compiler options: %s" % compiler_options) failures = [] # Iterate over all files for f in header_files: prefix = f.split(".h")[0] # Options for all files cpp_flags = compiler_options ld_flags = "" # Only add boost flags if necessary needs_boost = prefix == "MathFunctions" if needs_boost: info("Additional compiler options for %s: %s" % (prefix, boost_cflags)) info("Additional linker options for %s: %s" % (prefix, boost_linkflags)) cpp_flags += " " + boost_cflags ld_flags += " " + boost_linkflags # Generate test code filename = generate_test_code(f) # Compile test code command = "%s %s -o %s.bin %s.cpp %s" % \ (compiler, cpp_flags, prefix, prefix, ld_flags) ok = run_command(command, verbose) # Store compile command for easy reproduction with open("%s.build" % (prefix,), "w") as f: f.write(command + "\n") # Check status if ok: info_green("%s OK" % prefix) else: info_red("%s failed" % prefix) failures.append(prefix) end() return failures
def validate_programs(reference_dir): "Validate generated programs against references." # Get a list of all files output_files = [f for f in os.listdir(".") if f.endswith(".json")] output_files.sort() begin("Validating generated programs (%d programs found)" % len(output_files)) # Iterate over all files for f in output_files: # Get generated output generated_output = open(f).read() # Get reference output reference_file = os.path.join(reference_dir, f) if os.path.isfile(reference_file): reference_output = open(reference_file).read() else: info_blue("Missing reference for %s" % reference_file) continue # Compare with reference ok = True old = [ line.split(" = ") for line in reference_output.split("\n") if " = " in line ] new = dict([ line.split(" = ") for line in generated_output.split("\n") if " = " in line ]) header = ( "Output differs for %s, diff follows (reference first, generated second)" % os.path.join(*reference_file.split(os.path.sep)[-3:])) for (key, value) in old: # Check if value is present if not key in new: if ok: log_error("\n" + header + "\n" + len(header) * "-") log_error("%s: missing value in generated code" % key) ok = False continue # Extract float values old_values = array([float(v) for v in value.split(" ")]) new_values = array([float(v) for v in new[key].split(" ")]) # Check that shape is correct if not shape(old_values) == shape(new_values): if ok: log_error("\n" + header + "\n" + len(header) * "-") log_error("%s: shape mismatch" % key) ok = False continue # Check that values match to within tolerance set by 'output_tolerance' diff = max(abs(old_values - new_values)) if diff > output_tolerance or isnan(diff): if ok: log_error("\n" + header + "\n" + len(header) * "-") log_error("%s: values differ, error = %g (tolerance = %g)" % (key, diff, output_tolerance)) log_error(" old = " + " ".join("%.16g" % v for v in old_values)) log_error(" new = " + " ".join("%.16g" % v for v in new_values)) ok = False # Add debugging output to log file debug = "\n".join( [line for line in generated_output.split("\n") if "debug" in line]) if debug: log_error(debug) # Check status if ok: info_green("%s OK" % f) else: info_red("%s differs" % f) # Now check json references fj = f.replace(".out", ".json") # Get generated json output if os.path.exists(fj): generated_json_output = open(fj).read() if "nan" in generated_json_output: info_red( "Found nan in generated json output, replacing with 999 to be able to parse as python dict." ) generated_json_output = generated_json_output.replace( "nan", "999") else: generated_json_output = "{}" # Get reference json output reference_json_file = os.path.join(reference_dir, fj) if os.path.isfile(reference_json_file): reference_json_output = open(reference_json_file).read() else: info_blue("Missing reference for %s" % reference_json_file) reference_json_output = "{}" # Compare json with reference using recursive diff algorithm # TODO: Write to different error file? from recdiff import recdiff, print_recdiff, DiffEqual # Assuming reference is well formed reference_json_output = eval(reference_json_output) try: generated_json_output = eval(generated_json_output) except Exception as e: info_red("Failed to evaluate json output for %s" % fj) log_error(str(e)) generated_json_output = None json_diff = (None if generated_json_output is None else recdiff( generated_json_output, reference_json_output, tolerance=output_tolerance)) json_ok = json_diff == DiffEqual # Check status if json_ok: info_green("%s OK" % fj) else: info_red("%s differs" % fj) log_error( "Json output differs for %s, diff follows (generated first, reference second)" % os.path.join(*reference_json_file.split(os.path.sep)[-3:])) print_recdiff(json_diff, printer=log_error) end()
def format_code(code, wrapper_code, prefix, parameters): "Format given code in UFC format. Returns two strings with header and source file contents." begin("Compiler stage 5: Formatting code") # Extract code code_elements, code_dofmaps, code_coordinate_mappings, code_integrals, code_forms = code # Header and implementation code code_h = "" code_c = "" # Generate code for comment on top of file code_h += _generate_comment(parameters) + "\n" code_c += _generate_comment(parameters) + "\n" # Generate code for header code_h += format["header_h"] % {"prefix_upper": prefix.upper()} code_h += _generate_additional_includes(code_integrals) + "\n" code_c += format["header_c"] % {"prefix": prefix} # Generate code for elements for code_element in code_elements: code_h += _format_h("finite_element", code_element, parameters) code_c += _format_c("finite_element", code_element, parameters) # Generate code for dofmaps for code_dofmap in code_dofmaps: code_h += _format_h("dofmap", code_dofmap, parameters) code_c += _format_c("dofmap", code_dofmap, parameters) # Generate code for coordinate_mappings code_coordinate_mappings = [ ] # FIXME: This disables output of generated coordinate_mapping class, until implemented properly for code_coordinate_mapping in code_coordinate_mappings: code_h += _format_h("coordinate_mapping", code_coordinate_mapping, parameters) code_c += _format_c("coordinate_mapping", code_coordinate_mapping, parameters) # Generate code for integrals for code_integral in code_integrals: code_h += _format_h(code_integral["class_type"], code_integral, parameters) code_c += _format_c(code_integral["class_type"], code_integral, parameters) # Generate code for form for code_form in code_forms: code_h += _format_h("form", code_form, parameters) code_c += _format_c("form", code_form, parameters) # Add wrappers if wrapper_code: code_h += wrapper_code # Generate code for footer code_h += format["footer"] end() return code_h, code_c
def compute_ir(analysis, prefix, parameters, jit=False): "Compute intermediate representation." begin("Compiler stage 2: Computing intermediate representation") # Set code generation parameters (this is not actually a 'formatting' # parameter, used for table value clamping as well) # FIXME: Global state?! # set_float_formatting(parameters["precision"]) # Extract data from analysis form_datas, elements, element_numbers, coordinate_elements = analysis # Construct classnames for all element objects and coordinate mappings classnames = make_all_element_classnames(prefix, elements, coordinate_elements, element_numbers, parameters, jit) # Skip processing elements if jitting forms # NB! it's important that this happens _after_ the element numbers and classnames # above have been created. if jit and form_datas: # While we may get multiple forms during command line action, # not so during jit assert len(form_datas ) == 1, "Expecting only one form data instance during jit." # Drop some processing elements = [] coordinate_elements = [] elif jit and coordinate_elements: # While we may get multiple coordinate elements during command # line action, or during form jit, not so during coordinate # mapping jit assert len(coordinate_elements ) == 1, "Expecting only one form data instance during jit." # Drop some processing elements = [] elif jit and elements: # Assuming a topological sorting of the elements, # only process the last (main) element from here on elements = [elements[-1]] # Compute representation of elements info("Computing representation of %d elements" % len(elements)) ir_elements = [ _compute_element_ir(e, element_numbers, classnames, parameters, jit) for e in elements ] # Compute representation of dofmaps info("Computing representation of %d dofmaps" % len(elements)) ir_dofmaps = [ _compute_dofmap_ir(e, element_numbers, classnames, parameters, jit) for e in elements ] # Compute representation of coordinate mappings info("Computing representation of %d coordinate mappings" % len(coordinate_elements)) ir_coordinate_mappings = [ _compute_coordinate_mapping_ir(e, element_numbers, classnames, parameters, jit) for e in coordinate_elements ] # Compute and flatten representation of integrals info("Computing representation of integrals") irs = [ _compute_integral_ir(fd, form_id, prefix, element_numbers, classnames, parameters, jit) for (form_id, fd) in enumerate(form_datas) ] ir_integrals = list(chain(*irs)) # Compute representation of forms info("Computing representation of forms") ir_forms = [ _compute_form_ir(fd, form_id, prefix, element_numbers, classnames, parameters, jit) for (form_id, fd) in enumerate(form_datas) ] end() return ir_elements, ir_dofmaps, ir_coordinate_mappings, ir_integrals, ir_forms