def main(): info_yellow("BERNAISE: Post-processing tool") cmd_kwargs = parse_command_line() folder = cmd_kwargs.get("folder", False) scripts_folder = "analysis_scripts" methods = get_methods(scripts_folder) # Get help if it was called for. if cmd_kwargs.get("help", False): get_help(methods, scripts_folder, __file__, skip=1) # Get sought fields sought_fields = cmd_kwargs.get("fields", False) if not sought_fields: sought_fields = None elif not isinstance(sought_fields, list): sought_fields = [sought_fields] if not folder: info("No folder(=[...]) specified.") exit() sought_fields_str = (", ".join(sought_fields) if sought_fields is not None else "All") info_split("Sought fields:", sought_fields_str) ts = TimeSeries(folder, sought_fields=sought_fields) info_split("Found fields:", ", ".join(ts.fields)) method = cmd_kwargs.get("method", "geometry_in_time") if len(ts.fields) == 0: info_on_red("Found no data.") exit() call_method(method, methods, scripts_folder, ts, cmd_kwargs)
def compute_norms(err, vector_norms=["l2", "linf"], function_norms=["L2", "H1"], show=True, tablefmt="simple", save=False): """ Compute norms, output to terminal, etc. """ info_split("Vector norms:", ", ".join(vector_norms)) info_split("Function norms:", ", ".join(function_norms)) headers = ["Fields"] + vector_norms + function_norms table = [] for field in err.keys(): row = [field] for norm_type in vector_norms: row.append(df.norm(err[field].vector(), norm_type=norm_type)) for norm_type in function_norms: row.append(df.norm(err[field], norm_type=norm_type)) table.append(row) from tabulate import tabulate tab_string = tabulate(table, headers, tablefmt=tablefmt, floatfmt="e") if show: info("\n" + tab_string + "\n") if save and rank == 0: info_split("Saving to file:", save) with open(save, "w") as outfile: outfile.write(tab_string)
def _load_timeseries(self, sought_fields=None): if bool(os.path.exists(self.settings_folder) and os.path.exists(self.timeseries_folder)): info_split("Opening folder:", self.folder) else: info_on_red("Folder does not contain " "Settings or Timeseries folders.") exit() data = dict() for params_file in glob.glob( self.params_prefix + "*" + self.params_suffix): parameters = dict() from_tstep = int(get_middle(params_file, self.params_prefix, self.params_suffix)) load_parameters(parameters, params_file) t_0 = float(parameters["t_0"]) self.parameters[t_0] = parameters from_tstep_suffix = "_from_tstep_" + str(from_tstep) + ".h5" from_tstep_xml_suffix = "_from_tstep_" + str(from_tstep) + ".xdmf" for xml_file in glob.glob(os.path.join( self.timeseries_folder, "*" + from_tstep_xml_suffix)): data_file = xml_file[:-4] + "h5" field = get_middle(data_file, self.timeseries_folder + "/", from_tstep_suffix) if bool(sought_fields is None or field in sought_fields): if bool(field not in data): data[field] = dict() dsets, topology_address, geometry_address \ = parse_xdmf(xml_file, get_mesh_address=True) if self.elems is None: with h5py.File(topology_address[0], "r") as h5f: self.elems = np.array(h5f[topology_address[1]]) if self.nodes is None: with h5py.File(geometry_address[0], "r") as h5f: self.nodes = np.array(h5f[geometry_address[1]]) with h5py.File(data_file, "r") as h5f: for time, dset_address in dsets: # If in memory saving mode, only store # address for later use. if self.memory_modest: data[field][time] = (data_file, dset_address) else: data[field][time] = np.array(h5f[dset_address]) for i, field in enumerate(data.keys()): tmps = sorted(data[field].items()) if i == 0: self.times = [tmp[0] for tmp in tmps] self[field] = [tmp[1] for tmp in tmps] self.parameters = sorted(self.parameters.items()) self.fields = self.datasets.keys()
def method(ts, time=None, step=0, show=False, save_fig=False, **kwargs): """ Compare to analytic reference expression at given timestep. This is done by importing the function "reference" in the problem module. """ info_cyan("Comparing to analytic reference at given time or step.") step, time = get_step_and_info(ts, time, step) parameters = ts.get_parameters(time=time) problem = parameters.get("problem", "intrusion_bulk") try: module = importlib.import_module("problems.{}".format(problem)) reference = module.reference except: info_error("No analytic reference available.") ref_exprs = reference(t=time, **parameters) info("Comparing to analytic solution.") info_split("Problem:", "{}".format(problem)) info_split("Time:", "{}".format(time)) f = ts.functions(ref_exprs.keys()) err = dict() f_int = dict() f_ref = dict() for field in ref_exprs.keys(): el = f[field].function_space().ufl_element() degree = el.degree() if bool(el.value_size() != 1): W = df.VectorFunctionSpace(ts.mesh, "CG", degree + 3) else: W = df.FunctionSpace(ts.mesh, "CG", degree + 3) err[field] = df.Function(W) f_int[field] = df.Function(W) f_ref[field] = df.Function(W) for field, ref_expr in ref_exprs.items(): ref_expr.t = time # Update numerical solution f ts.update(f[field], field, step) # Interpolate f to higher space f_int[field].assign( df.interpolate(f[field], f_int[field].function_space())) # Interpolate f_ref to higher space f_ref[field].assign( df.interpolate(ref_expr, f_ref[field].function_space())) err[field].vector()[:] = (f_int[field].vector().get_local() - f_ref[field].vector().get_local()) if show or save_fig: # Interpolate the error to low order space for visualisation. err_int = df.interpolate(err[field], f[field].function_space()) err_arr = ts.nodal_values(err_int) label = "Error in " + field if rank == 0: save_fig_file = None if save_fig: save_fig_file = os.path.join( ts.plots_folder, "error_{}_time{}_analytic.png".format(field, time)) plot_any_field(ts.nodes, ts.elems, err_arr, save=save_fig_file, show=show, label=label) save_file = os.path.join(ts.analysis_folder, "errornorms_time{}_analytic.dat".format(time)) compute_norms(err, save=save_file)
def method(ts, ref=None, time=1., show=False, save_fig=False, **kwargs): """Compare to numerical reference at given timestep. The reference solution is assumed to be on a finer mesh, so the reference solution is interpolated to the coarser mesh, where the comparison is made. """ info_cyan("Comparing to numerical reference.") if not isinstance(ref, str): info_on_red("No reference specified. Use ref=(path).") exit() ts_ref = TimeSeries(ref, sought_fields=ts.fields) info_split("Reference fields:", ", ".join(ts_ref.fields)) # Compute a 'reference ID' for storage purposes ref_id = os.path.relpath(ts_ref.folder, os.path.join(ts.folder, "../")).replace( "../", "-").replace("/", "+") step, time_0 = ts.get_nearest_step_and_time(time) step_ref, time_ref = ts_ref.get_nearest_step_and_time( time, dataset_str="reference") info("Dataset: Time = {}, timestep = {}.".format(time_0, step)) info("Reference: Time = {}, timestep = {}.".format(time_ref, step_ref)) # from fenicstools import interpolate_nonmatching_mesh f = ts.functions() f_ref = ts_ref.functions() err = ts_ref.functions() ts.update_all(f, step=step) ts_ref.update_all(f_ref, step=step_ref) for field in ts_ref.fields: # Interpolate solution to the reference mesh. f_int = df.interpolate(f[field], err[field].function_space()) err[field].vector()[:] = (f_int.vector().array() - f_ref[field].vector().array()) if show or save_fig: err_arr = ts_ref.nodal_values(err[field]) label = "Error in " + field if rank == 0: save_fig_file = None if save_fig: save_fig_file = os.path.join( ts.plots_folder, "error_{}_time{}_ref{}.png".format( field, time, ref_id)) plot_any_field(ts_ref.nodes, ts_ref.elems, err_arr, save=save_fig_file, show=show, label=label) save_file = os.path.join( ts.analysis_folder, "errornorms_time{}_ref{}.dat".format(time, ref_id)) compute_norms(err, save=save_file)