def main(): info_yellow("BERNAISE: Post-processing tool") cmd_kwargs = parse_command_line() folder = cmd_kwargs.get("folder", False) scripts_folder = "analysis_scripts" methods = get_methods(scripts_folder) # Get help if it was called for. if cmd_kwargs.get("help", False): get_help(methods, scripts_folder, __file__, skip=1) # Get sought fields sought_fields = cmd_kwargs.get("fields", False) if not sought_fields: sought_fields = None elif not isinstance(sought_fields, list): sought_fields = [sought_fields] if not folder: info("No folder(=[...]) specified.") exit() sought_fields_str = (", ".join(sought_fields) if sought_fields is not None else "All") info_split("Sought fields:", sought_fields_str) ts = TimeSeries(folder, sought_fields=sought_fields) info_split("Found fields:", ", ".join(ts.fields)) method = cmd_kwargs.get("method", "geometry_in_time") if len(ts.fields) == 0: info_on_red("Found no data.") exit() call_method(method, methods, scripts_folder, ts, cmd_kwargs)
def main(): cmd_kwargs = parse_command_line() image_path = cmd_kwargs.get("image", False) name = os.path.splitext(os.path.basename(image_path))[0] print name if not image_path or not os.path.exists(image_path): info_on_red("Image does not exist.") exit() image = misc.imread(image_path) image = np.array(np.array(np.mean(image[:, :, :3], 2), dtype=int) / 255, dtype=float) contours = measure.find_contours(image, 0.5) nodes = contours[0][::10, 1::-1] nodes /= np.max(nodes) nodes[:, 1] = -nodes[:, 1] nodes_max = np.max(nodes, 0) nodes_min = np.min(nodes, 0) nodes[:, 1] -= nodes_min[1] nodes[:, 0] -= nodes_min[0] edges = round_trip_connect(0, len(nodes) - 1) savefile_prefix = os.path.join(MESHES_DIR, name) np.savetxt(savefile_prefix + ".nodes", nodes) np.savetxt(savefile_prefix + ".edges", edges, fmt='%i') plot_edges(nodes, edges)
def call_method(method, methods, scripts_folder, ts, cmd_kwargs): # Call the specified method if method[-1] == "?" and method[:-1] in methods: m = __import__("{}.{}".format(scripts_folder, method[:-1])).__dict__[method[:-1]] m.description(ts, **cmd_kwargs) elif method in methods: m = __import__("{}.{}".format(scripts_folder, method)).__dict__[method] m.method(ts, **cmd_kwargs) else: info_on_red("The specified analysis method doesn't exist.")
def _load_timeseries(self, sought_fields=None): if bool(os.path.exists(self.settings_folder) and os.path.exists(self.timeseries_folder)): info_split("Opening folder:", self.folder) else: info_on_red("Folder does not contain " "Settings or Timeseries folders.") exit() data = dict() for params_file in glob.glob( self.params_prefix + "*" + self.params_suffix): parameters = dict() from_tstep = int(get_middle(params_file, self.params_prefix, self.params_suffix)) load_parameters(parameters, params_file) t_0 = float(parameters["t_0"]) self.parameters[t_0] = parameters from_tstep_suffix = "_from_tstep_" + str(from_tstep) + ".h5" from_tstep_xml_suffix = "_from_tstep_" + str(from_tstep) + ".xdmf" for xml_file in glob.glob(os.path.join( self.timeseries_folder, "*" + from_tstep_xml_suffix)): data_file = xml_file[:-4] + "h5" field = get_middle(data_file, self.timeseries_folder + "/", from_tstep_suffix) if bool(sought_fields is None or field in sought_fields): if bool(field not in data): data[field] = dict() dsets, topology_address, geometry_address \ = parse_xdmf(xml_file, get_mesh_address=True) if self.elems is None: with h5py.File(topology_address[0], "r") as h5f: self.elems = np.array(h5f[topology_address[1]]) if self.nodes is None: with h5py.File(geometry_address[0], "r") as h5f: self.nodes = np.array(h5f[geometry_address[1]]) with h5py.File(data_file, "r") as h5f: for time, dset_address in dsets: # If in memory saving mode, only store # address for later use. if self.memory_modest: data[field][time] = (data_file, dset_address) else: data[field][time] = np.array(h5f[dset_address]) for i, field in enumerate(data.keys()): tmps = sorted(data[field].items()) if i == 0: self.times = [tmp[0] for tmp in tmps] self[field] = [tmp[1] for tmp in tmps] self.parameters = sorted(self.parameters.items()) self.fields = self.datasets.keys()
def method(ts, dx=0.1, line="[0.,0.]--[1.,1.]", time=None, dt=None, skip=0, **kwargs): """ Probe along a line. """ info_cyan("Probe along a line.") try: x_a, x_b = [tuple(eval(pt)) for pt in line.split("--")] assert (len(x_a) == ts.dim) assert (len(x_b) == ts.dim) assert (all([ bool(isinstance(xd, float) or isinstance(xd, int)) for xd in list(x_a) + list(x_b) ])) except: info_on_red("Faulty line format. Use 'line=[x1,y1]--[x2,y2]'.") exit() x = np.array(line_points(x_a, x_b, dx)) info("Probes {num} points from {a} to {b}".format(num=len(x), a=x_a, b=x_b)) if rank == 0: plot_probes(ts.nodes, ts.elems, x, colorbar=False, title="Probes") f = ts.functions() probes = dict() from fenicstools import Probes for field, func in f.iteritems(): probes[field] = Probes(x.flatten(), func.function_space()) steps = get_steps(ts, dt, time) for step in steps: info("Step " + str(step) + " of " + str(len(ts))) ts.update_all(f, step) for field, probe in probes.iteritems(): probe(f[field]) probe_arr = dict() for field, probe in probes.iteritems(): probe_arr[field] = probe.array() if rank == 0: for i, step in enumerate(steps): chunks = [x] header_list = [index2letter(d) for d in range(ts.dim)] for field, chunk in probe_arr.iteritems(): if chunk.ndim == 1: header_list.append(field) chunk = chunk[:].reshape(-1, 1) elif chunk.ndim == 2: header_list.append(field) chunk = chunk[:, i].reshape(-1, 1) elif chunk.ndim > 2: header_list.extend( [field + "_" + index2letter(d) for d in range(ts.dim)]) chunk = chunk[:, :, i] chunks.append(chunk) data = np.hstack(chunks) header = "\t".join(header_list) makedirs_safe(os.path.join(ts.analysis_folder, "probes")) np.savetxt(os.path.join(ts.analysis_folder, "probes", "probes_{:06d}.dat".format(step)), data, header=header)
def method(ts, ref=None, time=1., show=False, save_fig=False, **kwargs): """Compare to numerical reference at given timestep. The reference solution is assumed to be on a finer mesh, so the reference solution is interpolated to the coarser mesh, where the comparison is made. """ info_cyan("Comparing to numerical reference.") if not isinstance(ref, str): info_on_red("No reference specified. Use ref=(path).") exit() ts_ref = TimeSeries(ref, sought_fields=ts.fields) info_split("Reference fields:", ", ".join(ts_ref.fields)) # Compute a 'reference ID' for storage purposes ref_id = os.path.relpath(ts_ref.folder, os.path.join(ts.folder, "../")).replace( "../", "-").replace("/", "+") step, time_0 = ts.get_nearest_step_and_time(time) step_ref, time_ref = ts_ref.get_nearest_step_and_time( time, dataset_str="reference") info("Dataset: Time = {}, timestep = {}.".format(time_0, step)) info("Reference: Time = {}, timestep = {}.".format(time_ref, step_ref)) # from fenicstools import interpolate_nonmatching_mesh f = ts.functions() f_ref = ts_ref.functions() err = ts_ref.functions() ts.update_all(f, step=step) ts_ref.update_all(f_ref, step=step_ref) for field in ts_ref.fields: # Interpolate solution to the reference mesh. f_int = df.interpolate(f[field], err[field].function_space()) err[field].vector()[:] = (f_int.vector().array() - f_ref[field].vector().array()) if show or save_fig: err_arr = ts_ref.nodal_values(err[field]) label = "Error in " + field if rank == 0: save_fig_file = None if save_fig: save_fig_file = os.path.join( ts.plots_folder, "error_{}_time{}_ref{}.png".format( field, time, ref_id)) plot_any_field(ts_ref.nodes, ts_ref.elems, err_arr, save=save_fig_file, show=show, label=label) save_file = os.path.join( ts.analysis_folder, "errornorms_time{}_ref{}.dat".format(time, ref_id)) compute_norms(err, save=save_file)