@pytest.mark.parametrize("case", [ 1, ]) # lower (1) vs. higher (2) density ratio @pytest.mark.parametrize("matching_p", [ False, ]) @pytest.mark.parametrize("scheme", ["SemiDecoupled", "FullyDecoupled"]) # "Monolithic" def test_bubble(scheme, matching_p, case, postprocessor): set_log_level(WARNING) # Read parameters scriptdir = os.path.dirname(os.path.realpath(__file__)) prm_file = os.path.join(scriptdir, "bubble-parameters.xml") mpset.read(prm_file) # Adjust parameters if case == 2: mpset["model"]["nu"]["2"] = 0.1 mpset["model"]["rho"]["2"] = 1.0 mpset["model"]["sigma"]["12"] = 1.96 # Fixed parameters t_end = postprocessor.t_end OTD = postprocessor.OTD # Names and directories basename = postprocessor.basename outdir = postprocessor.outdir
def test_shear(scheme, nu_interp, postprocessor): #set_log_level(WARNING) assert scheme == "SemiDecoupled" dt = 0.0 # solve as the stationary problem # Read parameters scriptdir = os.path.dirname(os.path.realpath(__file__)) prm_file = os.path.join(scriptdir, "interface-parameters.xml") mpset.read(prm_file) # Adjust parameters c = postprocessor.get_coefficients() mpset["model"]["eps"] = c[r"\eps"] mpset["model"]["rho"]["1"] = c[r"\rho_1"] mpset["model"]["rho"]["2"] = c[r"\rho_2"] mpset["model"]["nu"]["1"] = c[r"\nu_1"] mpset["model"]["nu"]["2"] = c[r"\nu_2"] mpset["model"]["chq"]["L"] = c[r"L_0"] mpset["model"]["chq"]["V"] = c[r"V_0"] mpset["model"]["chq"]["rho"] = c[r"\rho_0"] mpset["model"]["mobility"]["M0"] = 1.0e+0 mpset["model"]["sigma"]["12"] = 1.0e-0 #mpset.show() cc = wrap_coeffs_as_constants(c) # Names and directories basename = postprocessor.basename label = "{}_{}".format(basename, nu_interp) outdir = postprocessor.outdir for level in range(2, 3): # Prepare domain and discretization mesh, boundary_markers, pinpoint, periodic_bnd = create_domain( level, "crossed") del periodic_bnd DS, div_v = create_discretization(scheme, mesh, div_projection=True) DS.parameters["PTL"] = 1 DS.setup() # Prepare initial and boundary conditions load_initial_conditions(DS, c) bcs = create_bcs(DS, boundary_markers, pinpoint) # for Dirichlet p_h = create_hydrostatic_pressure(mesh, cc) # for Neumann # Force applied on the top plate B = 0.0 if dt == 0.0 else 1.0 applied_force = df.Expression( ("A*(1.0 - B*exp(-alpha*t))", "0.0"), degree=DS.subspace("v", 0).ufl_element().degree(), t=0.0, alpha=1.0, A=1.0, B=B) # Prepare model model = ModelFactory.create("Incompressible", DS, bcs) model.parameters["THETA2"] = 0.0 #model.parameters["rho"]["itype"] = "lin" #model.parameters["rho"]["trunc"] = "minmax" model.parameters["nu"]["itype"] = nu_interp model.parameters["nu"]["trunc"] = "minmax" #model.parameters["nu"]["trunc"] = "clamp_hard" #model.parameters["mobility"]["cut"] = True # Prepare external source term g_a = c[r"g_a"] g_a /= mpset["model"]["chq"]["V"]**2.0 * mpset["model"]["chq"]["L"] f_src = df.Constant((0.0, -g_a), cell=mesh.ufl_cell(), name="f_src") model.load_sources(f_src) # Create forms forms = model.create_forms() # Add boundary integrals n = DS.facet_normal() ds = df.Measure("ds", subdomain_data=boundary_markers) test = DS.test_functions() forms["lin"]["rhs"] +=\ df.inner(applied_force, test["v"]) * ds(3) # driving force forms["lin"]["rhs"] -=\ p_h * df.inner(n, test["v"]) * (ds(2) + ds(4)) # hydrostatic balance # Prepare solver solver = SolverFactory.create(model, forms, fix_p=False) # Prepare time-stepping algorithm comm = mesh.mpi_comm() pv = DS.primitive_vars_ctl() modulo_factor = 1 xfields = list(zip(pv["phi"].split(), ("phi", ))) xfields.append((pv["p"].dolfin_repr(), "p")) if scheme == "FullyDecoupled": xfields += list(zip(pv["v"].split(), ("v1", "v2"))) else: xfields.append((pv["v"].dolfin_repr(), "v")) if div_v is not None: xfields.append((div_v, "div_v")) functionals = {"t": [], "E_kin": [], "Psi": [], "mean_p": []} hook = prepare_hook(model, applied_force, functionals, modulo_factor, div_v) logfile = "log_{}.dat".format(label) TS = TimeSteppingFactory.create("ConstantTimeStep", comm, solver, hook=hook, logfile=logfile, xfields=xfields, outdir=outdir) TS.parameters["xdmf"]["folder"] = "XDMF_{}".format(label) TS.parameters["xdmf"]["modulo"] = modulo_factor TS.parameters["xdmf"]["flush"] = True TS.parameters["xdmf"]["iconds"] = True # Time-stepping with Timer("Time stepping") as tmr_tstepping: result = TS.run(0.0, 2.0, dt, OTD=1) # Pre-process results v = pv["v"].dolfin_repr() p = pv["p"].dolfin_repr() phi = pv["phi"].split()[0] w_diff = DS.solution_ctl()[0].copy(True) w0 = DS.solution_ptl(0)[0] w_diff.vector().axpy(-1.0, w0.vector()) phi_diff = w_diff.split(True)[0] phi_diff.rename("phi_diff", "phi_tstep_difference") xdmfdir = \ os.path.join(outdir, TS.parameters["xdmf"]["folder"], "phi_diff.xdmf") with df.XDMFFile(xdmfdir) as file: file.write(phi_diff, 0.0) D_12 = df.project(0.5 * v.sub(0).dx(1), div_v.function_space()) if nu_interp in [ "har", ]: deg = DS.subspace("phi", 0).ufl_element().degree() V_nu = df.FunctionSpace(mesh, "DG", deg) else: V_nu = DS.subspace("phi", 0, deepcopy=True) nu_0 = df.project(model.coeffs["nu"], V_nu) T_12 = df.project(model.coeffs["nu"] * v.sub(0).dx(1), V_nu) #p_ref = df.project(p_h, df.FunctionSpace(mesh, W.sub(1).ufl_element())) # Save results make_cut = postprocessor._make_cut rs = dict(level=level, r_dens=c[r"r_dens"], r_visc=c[r"r_visc"], nu_interp=nu_interp) rs[r"$v_1$"] = make_cut(v.sub(0)) rs[r"$p$"] = make_cut(p) rs[r"$\phi$"] = make_cut(phi) rs[r"$D_{12}$"] = make_cut(D_12) rs[r"$T_{12}$"] = make_cut(T_12) rs[r"$\nu$"] = make_cut(nu_0) print(label, level) # Send to posprocessor comm = mesh.mpi_comm() rank = df.MPI.rank(comm) postprocessor.add_result(rank, rs) # Save results into a binary file filename = "results_{}.pickle".format(label) postprocessor.save_results(filename) # Flush plots as we now have data for all level values postprocessor.flush_plots() # Cleanup df.set_log_level(df.INFO) gc.collect()
def test_scaling_time(scheme, matching_p, postprocessor): """ Compute convergence rates for fixed element order, fixed mesh and gradually decreasing time step. """ # Additional test configuration w.r.t. chosen scheme if scheme == "SemiDecoupled": OTD = 1 method = "it" elif scheme == "FullyDecoupled": OTD = 2 method = "lu" # iterative solvers not yet supported else: assert False # Run test set_log_level(WARNING) degrise = 3 # degree rise for computation of errornorm # Read parameters scriptdir = os.path.dirname(os.path.realpath(__file__)) prm_file = os.path.join(scriptdir, "mms-parameters.xml") mpset.read(prm_file) # Fixed parameters level = postprocessor.level k = postprocessor.OPA t_end = postprocessor.t_end # Names and directories basename = postprocessor.basename outdir = postprocessor.outdir # Mesh independent predefined quantities msol = create_manufactured_solution() ic = create_initial_conditions(msol) # Prepare space discretization, exact solution and bcs mesh, boundary_markers = create_domain(level) DS = create_discretization(scheme, mesh, k) DS.parameters["PTL"] = OTD DS.setup() esol = create_exact_solution(msol, DS.finite_elements(), degrise) bcs = create_bcs(DS, boundary_markers, esol, method) # Iterate over time step for m in range(6): # CHANGE #1: set "m in range(7)" dt = 0.1*0.5**m label = "{}_dt_{}_{}".format(scheme, dt, basename) with Timer("Prepare") as tmr_prepare: # Reset sol_ptl[0] back to initial conditions DS.load_ic_from_simple_cpp(ic) # Prepare model model = ModelFactory.create("Incompressible", DS, bcs) cell = DS.mesh().ufl_cell() t_src_ctl = Constant(0.0, cell=cell, name="t_src_ctl") t_src_ptl = Constant(0.0, cell=cell, name="t_src_ptl") f_src_ctl, g_src_ctl = \ create_source_terms(t_src_ctl, mesh, model, msol, matching_p) f_src_ptl, g_src_ptl = \ create_source_terms(t_src_ptl, mesh, model, msol, matching_p) t_src = [t_src_ctl,] f_src = [f_src_ctl,] g_src = [g_src_ctl,] if OTD == 2 and scheme in ["Monolithic", "SemiDecoupled"]: t_src.append(t_src_ptl) g_src.append(g_src_ptl) if scheme == "Monolithic": f_src.append(f_src_ptl) model.load_sources(f_src, g_src) forms = model.create_forms(matching_p) # Prepare solver comm = mesh.mpi_comm() solver = SolverFactory.create(model, forms, fix_p=(method == "it")) if method == "it": solver.data["solver"]["CH"]["lin"] = \ create_ch_solver(comm, "bjacobi") solver.data["solver"]["NS"] = \ create_pcd_solver(comm, "BRM1", "iterative") # prefix_ch = solver.data["solver"]["CH"]["lin"].get_options_prefix() # PETScOptions.set(prefix_ch+"ksp_monitor_true_residual") # solver.data["solver"]["CH"]["lin"].set_from_options() # prefix_ns = solver.data["solver"]["NS"].get_options_prefix() # PETScOptions.set(prefix_ns+"ksp_monitor") # solver.data["solver"]["NS"].set_from_options() # Prepare time-stepping algorithm xfields = None # NOTE: Uncomment the following block of code to get XDMF output # pv = DS.primitive_vars_ctl() # phi, chi, v, p = pv["phi"], pv["chi"], pv["v"], pv["p"] # phi_, chi_, v_ = phi.split(), chi.split(), v.split() # xfields = list(zip(phi_, len(phi_)*[None,])) \ # + list(zip(v_, len(v_)*[None,])) \ # + [(p.dolfin_repr(), None),] hook = prepare_hook(t_src, model, esol, degrise, {}) logfile = "log_{}.dat".format(label) TS = TimeSteppingFactory.create("ConstantTimeStep", comm, solver, hook=hook, logfile=logfile, xfields=xfields, outdir=outdir) # Time-stepping t_beg = 0.0 with Timer("Time stepping") as tmr_tstepping: it = 0 if OTD == 2: if scheme == "FullyDecoupled": dt0 = dt result = TS.run(t_beg, dt0, dt0, OTD=1, it=it) t_beg = dt elif scheme == "Monolithic": dt0 = 1.0e-4*dt result = TS.run(t_beg, dt0, dt0, OTD=1, it=it) if dt - dt0 > 0.0: it = 0.5 result = TS.run(dt0, dt, dt - dt0, OTD=2, it=it) t_beg = dt it = 1 result = TS.run(t_beg, t_end, dt, OTD, it) # Prepare results name = logfile[4:-4] result.update( method=method, ndofs=DS.num_dofs(), scheme=scheme, dt=dt, t_end=t_end, OTD=OTD, err=hook.err, level=level, k=k, tmr_prepare=tmr_prepare.elapsed()[0], tmr_tstepping=tmr_tstepping.elapsed()[0] ) print(name, result["ndofs"], result["tmr_prepare"], result["tmr_solve"], result["it"], result["tmr_tstepping"]) # Send to posprocessor rank = MPI.rank(comm) postprocessor.add_result(rank, result) # Save results into a binary file filename = "results_{}.pickle".format(label) postprocessor.save_results(filename) # Pop results that we do not want to report at the moment postprocessor.pop_items(["ndofs", "tmr_prepare", "tmr_solve", "it", "OTD"]) # Flush plots as we now have data for all level values postprocessor.flush_plots() # Store timings #datafile = os.path.join(outdir, "timings.xml") #dump_timings_to_xml(datafile, TimingClear_clear) # Cleanup set_log_level(INFO) #mpset.write(comm, prm_file) # uncomment to save parameters mpset.refresh() gc.collect()
def test_scaling_mesh(pcd_variant, ls, matching_p, postprocessor): """ Compute convergence rates for fixed time step and gradually refined mesh or increasing element order. """ set_log_level(WARNING) degrise = 3 # degree rise for computation of errornorm # Read parameters scriptdir = os.path.dirname(os.path.realpath(__file__)) prm_file = os.path.join(scriptdir, "mms-parameters.xml") mpset.read(prm_file) # Fixed parameters OTD = postprocessor.OTD dt = postprocessor.dt t_end = postprocessor.t_end test_type = postprocessor.test # Discretization scheme = "SemiDecoupled" # Names and directories basename = postprocessor.basename outdir = postprocessor.outdir # Mesh independent predefined quantities msol = create_manufactured_solution() ic = create_initial_conditions(msol) # Iterate over refinement level for it in range(1, 6): # CHANGE #1: set "it in range(1, 8)" # Decide which test to perform if test_type == "ref": level = it k = 1 elif test_type == "ord": level = 1 k = it label = "{}_{}_level_{}_k_{}_{}".format(pcd_variant, ls, level, k, basename) with Timer("Prepare") as tmr_prepare: # Prepare discretization mesh, boundary_markers = create_domain(level) DS = create_discretization(scheme, mesh, k) DS.parameters["PTL"] = OTD DS.setup() DS.load_ic_from_simple_cpp(ic) esol = create_exact_solution(msol, DS.finite_elements(), degrise) bcs = create_bcs(DS, boundary_markers, esol) # Prepare model model = ModelFactory.create("Incompressible", DS, bcs) cell = DS.mesh().ufl_cell() t_src_ctl = Constant(0.0, cell=cell, name="t_src_ctl") t_src_ptl = Constant(0.0, cell=cell, name="t_src_ptl") f_src_ctl, g_src_ctl = \ create_source_terms(t_src_ctl, mesh, model, msol, matching_p) f_src_ptl, g_src_ptl = \ create_source_terms(t_src_ptl, mesh, model, msol, matching_p) # NOTE: Source terms are time-dependent. Updates to these terms # are possible via 't_src.assign(Constant(t))', where 't' # denotes the actual time value. t_src = [ t_src_ctl, ] f_src = [ f_src_ctl, ] g_src = [ g_src_ctl, ] if OTD == 2: t_src.append(t_src_ptl) g_src.append(g_src_ptl) model.load_sources(f_src, g_src) forms = model.create_forms(matching_p) # NOTE: Here is the possibility to modify forms, e.g. by adding # boundary integrals. # Prepare solver comm = mesh.mpi_comm() solver = SolverFactory.create(model, forms, fix_p=True) solver.data["solver"]["NS"] = \ create_pcd_solver(comm, pcd_variant, ls, mumps_debug=False) # PETScOptions.set("ksp_monitor") # solver.data["solver"]["NS"].set_from_options() # Prepare time-stepping algorithm xfields = None # NOTE: Uncomment the following block of code to get XDMF output # pv = DS.primitive_vars_ctl() # phi, chi, v, p = pv["phi"], pv["chi"], pv["v"], pv["p"] # phi_, chi_, v_ = phi.split(), chi.split(), v.split() # xfields = list(zip(phi_, len(phi_)*[None,])) \ # + list(zip(v_, len(v_)*[None,])) \ # + [(p.dolfin_repr(), None),] hook = prepare_hook(t_src, model, esol, degrise, {}) logfile = "log_{}.dat".format(label) #info("BREAK POINT %ia" % level) TS = TimeSteppingFactory.create("ConstantTimeStep", comm, solver, hook=hook, logfile=logfile, xfields=xfields, outdir=outdir) #info("BREAK POINT %ib" % level) <-- not reached for level == 2 # when running in parallel # Time-stepping t_beg = 0.0 with Timer("Time stepping") as tmr_tstepping: result = TS.run(t_beg, t_end, dt, OTD) # Get number of Krylov iterations if relevant try: krylov_it = solver.iters["NS"][0] except AttributeError: krylov_it = 0 # Prepare results name = logfile[4:-4] x_var = k if test_type == "ord" else mesh.hmin() result.update(scheme=scheme, pcd_variant=pcd_variant, ls=ls, krylov_it=krylov_it, ndofs=DS.num_dofs(), dt=dt, t_end=t_end, OTD=OTD, err=hook.err, x_var=x_var, tmr_prepare=tmr_prepare.elapsed()[0], tmr_tstepping=tmr_tstepping.elapsed()[0]) print(name, result["ndofs"], result["it"], result["krylov_it"], result["tmr_tstepping"]) # Send to posprocessor rank = MPI.rank(comm) postprocessor.add_result(rank, result) # Save results into a binary file filename = "results_{}.pickle".format(label) postprocessor.save_results(filename) # Pop results that we do not want to report at the moment postprocessor.pop_items(["ndofs", "scheme", "tmr_prepare"]) # Flush plots as we now have data for all level values postprocessor.flush_plots() # Store timings #datafile = os.path.join(outdir, "timings.xml") #dump_timings_to_xml(datafile, TimingClear_clear) # Cleanup set_log_level(INFO) #mpset.write(comm, prm_file) # uncomment to save parameters mpset.refresh() gc.collect()
def test_scaling_mesh(nu, pcd_variant, ls, postprocessor): #set_log_level(WARNING) # Read parameters scriptdir = os.path.dirname(os.path.realpath(__file__)) prm_file = os.path.join(scriptdir, "step-parameters.xml") mpset.read(prm_file) # Adjust parameters mpset["model"]["nu"]["1"] = nu mpset["model"]["nu"]["2"] = nu # Parameters for setting up MUFLON components dt = postprocessor.dt t_end = postprocessor.t_end # final time of the simulation scheme = "SemiDecoupled" OTD = 1 k = 1 modulo_factor = 2 # Names and directories outdir = postprocessor.outdir # Mesh independent predefined quantities ic = SimpleCppIC() ic.add("phi", "1.0") # Prepare figure for plotting the vorticity fig_curl = pyplot.figure() gs = gridspec.GridSpec(2, 2, height_ratios=[3, 1], width_ratios=[0.01, 1], hspace=0.05) ax_curl = fig_curl.add_subplot(gs[0, 1]) ax_curl.set_xlabel(r"time $t$") ax_curl.set_ylabel(r"$\omega_\Omega = \int_\Omega \nabla \times \mathbf{v}$") del gs for level in range(4): with Timer("Prepare") as tmr_prepare: # Prepare space discretization mesh, boundary_markers = create_domain(level) #pyplot.figure(); plot(mesh) #pyplot.savefig(os.path.join(outdir, "mesh.pdf")) if dt is None: hh = mesh.hmin()/(2.0**0.5) # mesh size in the direction of inflow umax = 1.0 # max velocity at the inlet dt = 0.8*hh/umax # automatically computed time step del hh, umax label = "level_{}_nu_{}_{}_{}_dt_{}_{}".format( level, nu, pcd_variant, ls, dt, postprocessor.basename) DS = create_discretization(scheme, mesh, k) DS.setup() DS.load_ic_from_simple_cpp(ic) # Prepare boundary conditions bcs, inflow = create_bcs(DS, boundary_markers, pcd_variant) #, t0=10*dt # Prepare model model = ModelFactory.create("Incompressible", DS, bcs) #model.parameters["THETA2"] = 0.0 # Create forms forms = model.create_forms() if ls == "direct": forms["pcd"]["a_pc"] = None # Add boundary integrals n = DS.facet_normal() ds_marked = Measure("ds", subdomain_data=boundary_markers) test = DS.test_functions() trial = DS.trial_functions() pv = DS.primitive_vars_ctl(indexed=True) pv0 = DS.primitive_vars_ptl(0, indexed=True) cc = model.coeffs w = cc["rho"]*pv0["v"] + cc["THETA2"]*cc["J"] a_surf = ( 0.5*inner(w, n)*inner(trial["v"], test["v"]) - cc["nu"]*inner(dot(grad(trial["v"]).T, n), test["v"]) )*ds_marked(2) forms["lin"]["lhs"] += a_surf if forms["pcd"]["a_pc"] is not None: forms["pcd"]["a_pc"] += a_surf if pcd_variant == "BRM2": forms["pcd"]["kp"] -= \ (1.0/cc["nu"])*inner(w, n)*test["p"]*trial["p"]*ds_marked(1) # TODO: Is this beneficial? # forms["pcd"]["kp"] -= \ # (1.0/cc["nu"])*inner(w, n)*test["p"]*trial["p"]*ds_marked(0) # TODO: Alternatively try: # forms["pcd"]["kp"] -= \ # (1.0/cc["nu"])*inner(w, n)*test["p"]*trial["p"]*ds # Prepare solver comm = mesh.mpi_comm() solver = SolverFactory.create(model, forms) prefix = "LU" solver.data["solver"]["NS"] = \ create_pcd_solver(comm, pcd_variant, ls, mumps_debug=False) prefix = solver.data["solver"]["NS"].get_options_prefix() #PETScOptions.set(prefix+"ksp_monitor") #solver.data["solver"]["NS"].set_from_options() # Prepare time-stepping algorithm pv = DS.primitive_vars_ctl() xfields = list(zip(pv["phi"].split(), ("phi",))) xfields.append((pv["p"].dolfin_repr(), "p")) xfields.append((pv["v"].dolfin_repr(), "v")) functionals = {"t": [], "vorticity": []} hook = prepare_hook(DS, functionals, modulo_factor, inflow) logfile = "log_{}.dat".format(label) TS = TimeSteppingFactory.create("ConstantTimeStep", comm, solver, hook=hook, logfile=logfile, xfields=xfields, outdir=outdir) TS.parameters["xdmf"]["folder"] = "XDMF_{}".format(label) TS.parameters["xdmf"]["modulo"] = modulo_factor TS.parameters["xdmf"]["flush"] = True TS.parameters["xdmf"]["iconds"] = True # Time-stepping t_beg = 0.0 with Timer("Time stepping") as tmr_tstepping: result = TS.run(t_beg, t_end, dt, OTD) # Get number of Krylov iterations if relevant try: krylov_it = solver.iters["NS"][0] except AttributeError: krylov_it = 0 # Prepare results (already contains dt, it, t_end, tmr_solve) result.update( nu=nu, pcd_variant=pcd_variant, ls=ls, krylov_it=krylov_it, ndofs=DS.num_dofs(), level=level, h_min=mesh.hmin(), tmr_prepare=tmr_prepare.elapsed()[0], tmr_tstepping=tmr_tstepping.elapsed()[0], scheme=scheme, OTD=OTD, k=k, t=functionals["t"], vorticity=functionals["vorticity"] ) print(label, prefix, result["ndofs"], result["it"], result["tmr_tstepping"], result["krylov_it"]) # Send to posprocessor rank = MPI.rank(comm) postprocessor.add_result(rank, result) # Add vorticity plot ax_curl.plot(functionals["t"], functionals["vorticity"], label=label) ax_curl.legend(bbox_to_anchor=(0, -0.2), loc=2, borderaxespad=0, fontsize='x-small', ncol=1) # Save results into a binary file filename = "results_{}.pickle".format(label) postprocessor.save_results(filename) # Pop results that we do not want to report at the moment postprocessor.pop_items([ "level", "h_min", "tmr_prepare", "tmr_tstepping", "scheme", "OTD", "k", "t", "vorticity"]) # Flush plots as we now have data for all level values postprocessor.flush_plots() fig_curl.savefig(os.path.join(outdir, "fig_vorticity_{}.pdf".format(label))) # # Plot last obtained solution # pv = DS.primitive_vars_ctl() # v = as_vector(pv["v"].split()) # p = pv["p"].dolfin_repr() # #phi = pv["phi"].split() # size = MPI.size(mesh.mpi_comm()) # rank = MPI.rank(mesh.mpi_comm()) # pyplot.figure() # pyplot.subplot(2, 1, 1) # plot(v, title="velocity") # pyplot.subplot(2, 1, 2) # plot(p, title="pressure") # pyplot.savefig(os.path.join(outdir, "fig_v_p_size{}_rank{}.pdf".format(size, rank))) # pyplot.figure() # plot(p, title="pressure", mode="warp") # pyplot.savefig(os.path.join(outdir, "fig_warp_size{}_rank{}.pdf".format(size, rank))) # Store timings #datafile = os.path.join(outdir, "timings.xml") #dump_timings_to_xml(datafile, TimingClear_clear) # Cleanup set_log_level(INFO) #mpset.write(comm, prm_file) # uncomment to save parameters mpset.refresh() gc.collect()