def do_demo(): """ show examples of the patch methods / classes """ import util.io as io # illustrate basic mesh operations myg = Grid2d(8, 16, xmax=1.0, ymax=2.0) mydata = CellCenterData2d(myg) bc = bnd.BC() mydata.register_var("a", bc) mydata.create() a = mydata.get_var("a") a[:, :] = np.exp(-(myg.x2d - 0.5)**2 - (myg.y2d - 1.0)**2) print(mydata) # output print("writing\n") mydata.write("mesh_test") print("reading\n") myd2 = io.read("mesh_test") print(myd2) mydata.pretty_print("a")
def makeplot(plotfile, variable, outfile): sim = io.read(plotfile) myd = sim.cc_data myg = myd.grid if variable == "vort": vx = myd.get_var("x-velocity") vy = myd.get_var("y-velocity") v = myg.scratch_array() v[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1] = \ 0.5*(vy[myg.ilo+1:myg.ihi+2,myg.jlo:myg.jhi+1] - vy[myg.ilo-1:myg.ihi,myg.jlo:myg.jhi+1])/myg.dx - \ 0.5*(vx[myg.ilo:myg.ihi+1,myg.jlo+1:myg.jhi+2] - vx[myg.ilo:myg.ihi+1,myg.jlo-1:myg.jhi])/myg.dy else: v = myd.get_var(variable) plt.figure(num=1, figsize=(6.0, 6.0), dpi=100, facecolor='w') plt.imshow(np.transpose(v[myg.ilo:myg.ihi + 1, myg.jlo:myg.jhi + 1]), interpolation="nearest", origin="lower", extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax]) plt.axis("off") plt.subplots_adjust(bottom=0.0, top=1.0, left=0.0, right=1.0) plt.savefig(outfile)
def makeplot(plotfile_name, outfile, width, height): """ plot the data in a plotfile using the solver's vis() method """ sim = io.read(plotfile_name) plt.figure(num=1, figsize=(width, height), dpi=100, facecolor='w') sim.dovis() if outfile.endswith(".pdf"): plt.savefig(outfile, bbox_inches="tight") else: plt.savefig(outfile)
def makeplot(basename, outfile, width, height): """ plot the data in a plotfile using the solver's vis() method """ files = glob.glob(basename + '_[0-9]*_[1-9][0-9]*.h5') print(files) sim_average = io.read(files[0]) sim_average.cc_data.get_vars()[:, :, :] = 0 for f in files: print(f) s = io.read(f) sim_average.cc_data.get_vars( )[:, :, :] += s.cc_data.get_vars()[:, :, :] / len(files) plt.figure(num=1, figsize=(width, height), dpi=100, facecolor='w') sim_average.dovis() if outfile.endswith(".pdf"): plt.savefig(outfile, bbox_inches="tight") else: plt.savefig(outfile) plt.show()
def makeplot(plotfile_name, outfile, width, height): """ plot the data in a plotfile using the solver's vis() method """ sim = io.read(plotfile_name) plt.figure(num=1, figsize=(width, height), dpi=100, facecolor='w') sim.dovis() if outfile.endswith(".pdf"): plt.savefig(outfile, bbox_inches="tight") else: plt.savefig(outfile) plt.show()
def makeplot(plotfile, variable, outfile, width=6.5, height=5.25, log=False, compact=False, quiet=False): sim = io.read(plotfile) if isinstance(sim, patch.CellCenterData2d): myd = sim else: myd = sim.cc_data myg = myd.grid plt.figure(num=1, figsize=(width, height), dpi=100, facecolor='w') var = myd.get_var(variable) if log: var = np.log10(var) img = plt.imshow(np.transpose(var.v()), interpolation="nearest", origin="lower", extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax]) if not compact: plt.colorbar(img) plt.xlabel("x") plt.ylabel("y") if compact: plt.axis("off") plt.subplots_adjust(bottom=0.0, top=1.0, left=0.0, right=1.0) plt.savefig(outfile) else: plt.savefig(outfile, bbox_inches="tight") if not quiet: plt.show()
def compare_to_benchmark(self): """ Are we comparing to a benchmark? """ basename = self.rp.get_param("io.basename") compare_file = "{}/tests/{}{:04d}".format(self.solver_name, basename, self.sim.n) msg.warning("comparing to: {} ".format(compare_file)) try: sim_bench = io.read(compare_file) except IOError: msg.warning("ERROR openning compare file") return "ERROR openning compare file" result = compare.compare(self.sim.cc_data, sim_bench.cc_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result
def test_write_read(): myg = patch.Grid2d(8, 6, ng=2, xmax=1.0, ymax=1.0) myd = patch.CellCenterData2d(myg) bco = bnd.BC(xlb="outflow", xrb="outflow", ylb="outflow", yrb="outflow") myd.register_var("a", bco) myd.create() a = myd.get_var("a") a.v()[:, :] = np.arange(48).reshape(8, 6) myd.write("io_test") # now read it in nd = io.read("io_test") anew = nd.get_var("a") assert_array_equal(anew.v(), a.v())
def makeplot(plotfile, variable, outfile): sim = io.read(plotfile) myd = sim.cc_data myg = myd.grid plt.figure(num=1, figsize=(6.5,5.25), dpi=100, facecolor='w') var = myd.get_var(variable) img = plt.imshow(np.transpose(var.v()), interpolation="nearest", origin="lower", extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax]) plt.colorbar() plt.xlabel("x") plt.ylabel("y") plt.savefig(outfile, bbox_inches="tight") plt.show()
def compare_to_benchmark(self, rtol): """ Are we comparing to a benchmark? """ basename = self.rp.get_param("io.basename") compare_file = "{}/tests/{}{:04d}".format( self.solver_name, basename, self.sim.n) msg.warning("comparing to: {} ".format(compare_file)) try: sim_bench = io.read(compare_file) except IOError: msg.warning("ERROR opening compare file") return "ERROR opening compare file" result = compare.compare(self.sim.cc_data, sim_bench.cc_data, rtol) if result == 0: msg.success("results match benchmark to within relative tolerance of {}\n".format(rtol)) else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result
def makeplot(plotfile, variable, outfile, width=6.5, height=5.25, log=False, compact=False, quiet=False): sim = io.read(plotfile) if isinstance(sim, patch.CellCenterData2d): myd = sim else: myd = sim.cc_data myg = myd.grid plt.figure(num=1, figsize=(width, height), dpi=100, facecolor='w') var = myd.get_var(variable) if log: var = np.log10(var) plt.imshow(np.transpose(var.v()), interpolation="nearest", origin="lower", extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax]) if not compact: plt.colorbar() plt.xlabel("x") plt.ylabel("y") if compact: plt.axis("off") plt.subplots_adjust(bottom=0.0, top=1.0, left=0.0, right=1.0) plt.savefig(outfile) else: plt.savefig(outfile, bbox_inches="tight") if not quiet: plt.show()
def makeplot(plotfile, variable, outfile, vmin=None, vmax=None): sim = io.read(plotfile) myd = sim.cc_data myg = myd.grid if variable == "vort": vx = myd.get_var("x-velocity") vy = myd.get_var("y-velocity") v = myg.scratch_array() v[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1] = \ 0.5*(vy[myg.ilo+1:myg.ihi+2, myg.jlo:myg.jhi+1] - vy[myg.ilo-1:myg.ihi, myg.jlo:myg.jhi+1])/myg.dx - \ 0.5*(vx[myg.ilo:myg.ihi+1, myg.jlo+1:myg.jhi+2] - vx[myg.ilo:myg.ihi+1, myg.jlo-1:myg.jhi])/myg.dy else: v = myd.get_var(variable) if vmin is None: vmin = v.v().min() if vmax is None: vmax = v.v().max() plt.figure(num=1, figsize=(6.0, 6.0), dpi=100, facecolor='w') plt.imshow(np.transpose(v[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1]), interpolation="nearest", origin="lower", extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax], vmin=vmin, vmax=vmax) plt.axis("off") plt.subplots_adjust(bottom=0.0, top=1.0, left=0.0, right=1.0) plt.savefig(outfile)
def process(file): # read the data and convert to the primitive variables (and # velocity magnitude) sim = io.read(file) myd = sim.cc_data myg = myd.grid phi_t = myd.get_var("phi") phi = phi_t[myg.ilo:myg.ihi + 1, myg.jlo:myg.jhi + 1] # get the problem parameters t_0 = myd.get_aux("t_0") phi_0 = myd.get_aux("phi_0") phi_max = myd.get_aux("phi_max") k = myd.get_aux("k") t = myd.t # radially bin # see http://code.google.com/p/agpy/source/browse/trunk/agpy/radialprofile.py?r=317 # for inspiration # first define the bins rmin = 0 rmax = np.sqrt(myg.xmax**2 + myg.ymax**2) nbins = np.int(np.sqrt(myg.nx**2 + myg.ny**2)) # bins holds the edges, so there is one more value than actual bin # bin_centers holds the center value of the bin bins = np.linspace(rmin, rmax, nbins + 1) bin_centers = 0.5 * (bins[1:] + bins[:-1]) # radius of each zone xcenter = 0.5 * (myg.xmin + myg.xmax) ycenter = 0.5 * (myg.ymin + myg.ymax) r = np.sqrt( (myg.x2d[myg.ilo:myg.ihi + 1, myg.jlo:myg.jhi + 1] - xcenter)**2 + (myg.y2d[myg.ilo:myg.ihi + 1, myg.jlo:myg.jhi + 1] - ycenter)**2) # bin the radii -- digitize returns an array with the same shape # as the input array but with elements of the array specifying # which bin that location belongs to. The value of whichbin will # be 1 if we are located in the bin defined by bins[0] to bins[1]. # This means that there will be no 0s whichbin = np.digitize(r.flat, bins) # bincount counts the number of occurrences of each non-negative # integer value in whichbin. Each entry in ncount gives the # number of occurrences of it in whichbin. The length of ncount # is set by the maximum value in whichbin ncount = np.bincount(whichbin) # now bin the associated data phi_bin = np.zeros(len(ncount) - 1, dtype=np.float64) for n in range(len(ncount)): # remember that there are no whichbin == 0, since that # corresponds to the left edge. So we want whichbin == 1 to # correspond to the first value of bin_centers # (bin_centers[0]) phi_bin[n - 1] = np.sum(phi.flat[whichbin == n]) / np.sum(ncount[n]) bin_centers = bin_centers[0:len(ncount) - 1] # get the analytic solution phi_exact = gaussian.phi_analytic(bin_centers, t, t_0, k, phi_0, phi_max) return bin_centers, phi_exact, phi_bin
def doit(solver_name, problem_name, param_file, other_commands=None, comp_bench=False, reset_bench_on_fail=False, make_bench=False): """The main driver to run pyro""" msg.bold('pyro ...') tc = profile.TimerCollection() tm_main = tc.timer("main") tm_main.begin() # import desired solver under "solver" namespace solver = importlib.import_module(solver_name) #------------------------------------------------------------------------- # runtime parameters #------------------------------------------------------------------------- # parameter defaults rp = runparams.RuntimeParameters() rp.load_params("_defaults") rp.load_params(solver_name + "/_defaults") # problem-specific runtime parameters rp.load_params(solver_name + "/problems/_" + problem_name + ".defaults") # now read in the inputs file if not os.path.isfile(param_file): # check if the param file lives in the solver's problems directory param_file = solver_name + "/problems/" + param_file if not os.path.isfile(param_file): msg.fail("ERROR: inputs file does not exist") rp.load_params(param_file, no_new=1) # and any commandline overrides if other_commands is not None: rp.command_line_params(other_commands) # write out the inputs.auto rp.print_paramfile() #------------------------------------------------------------------------- # initialization #------------------------------------------------------------------------- # initialize the Simulation object -- this will hold the grid and # data and know about the runtime parameters and which problem we # are running sim = solver.Simulation(solver_name, problem_name, rp, timers=tc) sim.initialize() sim.preevolve() #------------------------------------------------------------------------- # evolve #------------------------------------------------------------------------- verbose = rp.get_param("driver.verbose") plt.ion() sim.cc_data.t = 0.0 # output the 0th data basename = rp.get_param("io.basename") sim.write("{}{:04d}".format(basename, sim.n)) dovis = rp.get_param("vis.dovis") if dovis: plt.figure(num=1, figsize=(8, 6), dpi=100, facecolor='w') sim.dovis() while not sim.finished(): # fill boundary conditions sim.cc_data.fill_BC_all() # get the timestep sim.compute_timestep() # evolve for a single timestep sim.evolve() if verbose > 0: print("%5d %10.5f %10.5f" % (sim.n, sim.cc_data.t, sim.dt)) # output if sim.do_output(): if verbose > 0: msg.warning("outputting...") basename = rp.get_param("io.basename") sim.write("{}{:04d}".format(basename, sim.n)) # visualization if dovis: tm_vis = tc.timer("vis") tm_vis.begin() sim.dovis() store = rp.get_param("vis.store_images") if store == 1: basename = rp.get_param("io.basename") plt.savefig("{}{:04d}.png".format(basename, sim.n)) tm_vis.end() # final output if verbose > 0: msg.warning("outputting...") basename = rp.get_param("io.basename") sim.write("{}{:04d}".format(basename, sim.n)) tm_main.end() #------------------------------------------------------------------------- # benchmarks (for regression testing) #------------------------------------------------------------------------- result = 0 # are we comparing to a benchmark? if comp_bench: compare_file = "{}/tests/{}{:04d}".format( solver_name, basename, sim.n) msg.warning("comparing to: {} ".format(compare_file)) try: sim_bench = io.read(compare_file) except: msg.warning("ERROR openning compare file") return "ERROR openning compare file" result = compare.compare(sim.cc_data, sim_bench.cc_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") # are we storing a benchmark? if make_bench or (result != 0 and reset_bench_on_fail): if not os.path.isdir(solver_name + "/tests/"): try: os.mkdir(solver_name + "/tests/") except: msg.fail("ERROR: unable to create the solver's tests/ directory") bench_file = solver_name + "/tests/" + basename + "%4.4d" % (sim.n) msg.warning("storing new benchmark: {}\n".format(bench_file)) sim.write(bench_file) #------------------------------------------------------------------------- # final reports #------------------------------------------------------------------------- if verbose > 0: rp.print_unused_params() tc.report() sim.finalize() if comp_bench: return result
def test_poisson_dirichlet(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1, rtol=1e-12): # test the multigrid solver nx = N ny = nx # create the multigrid object a = MG.CellCenterMG2d(nx, ny, xl_BC_type="dirichlet", yl_BC_type="dirichlet", xr_BC_type="dirichlet", yr_BC_type="dirichlet", verbose=verbose) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-11 a.solve(rtol=1.e-11) # alternately, we can just use smoothing by uncommenting the following # a.smooth(a.nlevels-1,50000) # get the solution v = a.get_solution() # compute the error from the analytic solution b = true(a.x2d, a.y2d) e = v - b print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % (e.norm(), a.relative_error, a.num_cycles)) # plot it if make_plot: plt.figure(num=1, figsize=(5.0, 5.0), dpi=100, facecolor='w') plt.imshow(np.transpose(v[a.ilo:a.ihi+1, a.jlo:a.jhi+1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.savefig("mg_test.png") # store the output for later comparison bench = "mg_poisson_dirichlet" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to: %s " % (compare_file)) bench_data = io.read(compare_file) result = compare.compare(my_data, bench_data, rtol) if result == 0: msg.success("results match benchmark to within relative tolerance of {}\n".format(rtol)) else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result return None
usage = """ compare the output for a Sedov problem with the exact solution contained in cylindrical-sedov.out. To do this, we need to bin the Sedov data into radial bins.""" parser = argparse.ArgumentParser(description=usage) parser.add_argument("-o", type=str, default="sedov_compare.png", metavar="plot.png", help="output file name") parser.add_argument("plotfile", type=str, nargs=1, help="the plotfile you wish to plot") args = parser.parse_args() # read the data and convert to the primitive variables (and velocity # magnitude) sim = io.read(args.plotfile[0]) myd = sim.cc_data myg = myd.grid dens = myd.get_var("density") xmom = myd.get_var("x-momentum") ymom = myd.get_var("y-momentum") ener = myd.get_var("energy") rho = dens.v() u = np.sqrt(xmom.v()**2 + ymom.v()**2)/rho e = (ener.v() - 0.5*rho*u*u)/rho gamma = myd.get_aux("gamma") p = rho*e*(gamma - 1.0)
def test_poisson_dirichlet(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1, rtol=1e-12): # test the multigrid solver nx = N ny = nx # create the multigrid object a = MG.CellCenterMG2d(nx, ny, xl_BC_type="dirichlet", yl_BC_type="dirichlet", xr_BC_type="dirichlet", yr_BC_type="dirichlet", verbose=verbose) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-11 a.solve(rtol=1.e-11) # alternately, we can just use smoothing by uncommenting the following # a.smooth(a.nlevels-1,50000) # get the solution v = a.get_solution() # compute the error from the analytic solution b = true(a.x2d, a.y2d) e = v - b print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % (e.norm(), a.relative_error, a.num_cycles)) # plot it if make_plot: plt.figure(num=1, figsize=(5.0, 5.0), dpi=100, facecolor='w') plt.imshow(np.transpose(v[a.ilo:a.ihi+1, a.jlo:a.jhi+1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") print("Saving figure to mg_test.png") plt.savefig("mg_test.png") # store the output for later comparison bench = "mg_poisson_dirichlet" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to: %s " % (compare_file)) bench_data = io.read(compare_file) result = compare.compare(my_data, bench_data, rtol) if result == 0: msg.success("results match benchmark to within relative tolerance of {}\n".format(rtol)) else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result return None
def abort(string): print(string) sys.exit(2) if not len(sys.argv) == 2: print(usage) sys.exit(2) try: file1 = sys.argv[1] except IndexError: print(usage) sys.exit(2) sim = io.read(file1) myd = sim.cc_data myg = myd.grid # time of file t = myd.t if myg.nx > myg.ny: # x-problem xmin = myg.xmin xmax = myg.xmax param_file = "inputs.dam.x" else: # y-problem xmin = myg.ymin xmax = myg.ymax param_file = "inputs.dam.y"
usage: ./convergence.py fine coarse """ def compare(fine, coarse): dens = coarse.get_var("density") dens_avg = fine.restrict("density", N=2) e = coarse.grid.scratch_array() e.v()[:, :] = dens.v() - dens_avg.v() return float(np.abs(e).max()), e.norm() if __name__ == "__main__": if not len(sys.argv) == 3: print(usage) sys.exit(2) fine = sys.argv[1] coarse = sys.argv[2] ff = io.read(fine) cc = io.read(coarse) result = compare(ff.cc_data, cc.cc_data) print("inf/L2 norm of density: ", result)
def process(file): # read the data and convert to the primitive variables (and # velocity magnitude) sim = io.read(file) myd = sim.cc_data myg = myd.grid phi_t = myd.get_var("phi") phi = phi_t[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1] # get the problem parameters t_0 = myd.get_aux("t_0") phi_0 = myd.get_aux("phi_0") phi_max = myd.get_aux("phi_max") k = myd.get_aux("k") t = myd.t # radially bin # see http://code.google.com/p/agpy/source/browse/trunk/agpy/radialprofile.py?r=317 # for inspiration # first define the bins rmin = 0 rmax = np.sqrt(myg.xmax**2 + myg.ymax**2) nbins = np.int(np.sqrt(myg.nx**2 + myg.ny**2)) # bins holds the edges, so there is one more value than actual bin # bin_centers holds the center value of the bin bins = np.linspace(rmin, rmax, nbins+1) bin_centers = 0.5*(bins[1:] + bins[:-1]) # radius of each zone xcenter = 0.5*(myg.xmin + myg.xmax) ycenter = 0.5*(myg.ymin + myg.ymax) r = np.sqrt((myg.x2d[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1] - xcenter)**2 + (myg.y2d[myg.ilo:myg.ihi+1, myg.jlo:myg.jhi+1] - ycenter)**2) # bin the radii -- digitize returns an array with the same shape # as the input array but with elements of the array specifying # which bin that location belongs to. The value of whichbin will # be 1 if we are located in the bin defined by bins[0] to bins[1]. # This means that there will be no 0s whichbin = np.digitize(r.flat, bins) # bincount counts the number of occurrences of each non-negative # integer value in whichbin. Each entry in ncount gives the # number of occurrences of it in whichbin. The length of ncount # is set by the maximum value in whichbin ncount = np.bincount(whichbin) # now bin the associated data phi_bin = np.zeros(len(ncount)-1, dtype=np.float64) for n in range(len(ncount)): # remember that there are no whichbin == 0, since that # corresponds to the left edge. So we want whichbin == 1 to # correspond to the first value of bin_centers # (bin_centers[0]) phi_bin[n-1] = np.sum(phi.flat[whichbin == n])/np.sum(ncount[n]) bin_centers = bin_centers[0:len(ncount)-1] # get the analytic solution phi_exact = gaussian.phi_analytic(bin_centers, t, t_0, k, phi_0, phi_max) return bin_centers, phi_exact, phi_bin
else: print("{:20s} absolute error = {:10.10g}".format(name, abs_err)) if not np.allclose(d1.v(), d2.v(), rtol=rtol): result = "varerr" return result if __name__ == "__main__": if not (len(sys.argv) == 3 or len(sys.argv) == 4): print(usage) sys.exit(2) file1 = sys.argv[1] file2 = sys.argv[2] s1 = io.read(file1) s2 = io.read(file2) if len(sys.argv) == 3: result = compare(s1.cc_data, s2.cc_data) else: result = compare(s1.cc_data, s2.cc_data, rtol=float(sys.argv[3])) if result == 0: print("SUCCESS: files agree") else: print("ERROR: ", errors[result])
def test_general_poisson_dirichlet(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1): """ test the general MG solver. The return value here is the error compared to the exact solution, UNLESS comp_bench=True, in which case the return value is the error compared to the stored benchmark """ # test the multigrid solver nx = N ny = nx # create the coefficient variable g = patch.Grid2d(nx, ny, ng=1) d = patch.CellCenterData2d(g) bc_c = bnd.BC(xlb="neumann", xrb="neumann", ylb="neumann", yrb="neumann") d.register_var("alpha", bc_c) d.register_var("beta", bc_c) d.register_var("gamma_x", bc_c) d.register_var("gamma_y", bc_c) d.create() a = d.get_var("alpha") a[:, :] = alpha(g.x2d, g.y2d) b = d.get_var("beta") b[:, :] = beta(g.x2d, g.y2d) gx = d.get_var("gamma_x") gx[:, :] = gamma_x(g.x2d, g.y2d) gy = d.get_var("gamma_y") gy[:, :] = gamma_y(g.x2d, g.y2d) # create the multigrid object a = MG.GeneralMG2d(nx, ny, xl_BC_type="dirichlet", yl_BC_type="dirichlet", xr_BC_type="dirichlet", yr_BC_type="dirichlet", coeffs=d, verbose=verbose, vis=0, true_function=true) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-11 a.solve(rtol=1.e-11) # alternately, we can just use smoothing by uncommenting the following # a.smooth(a.nlevels-1,50000) # get the solution v = a.get_solution() # compute the error from the analytic solution b = true(a.x2d, a.y2d) e = v - b enorm = e.norm() print( " L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % (enorm, a.relative_error, a.num_cycles)) # plot the solution if make_plot: plt.clf() plt.figure(figsize=(10.0, 4.0), dpi=100, facecolor='w') plt.subplot(121) plt.imshow(np.transpose(v.v()), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("nx = {}".format(nx)) plt.colorbar() plt.subplot(122) plt.imshow(np.transpose(e.v()), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("error") plt.colorbar() plt.tight_layout() plt.savefig("mg_general_dirichlet_test.png") # store the output for later comparison bench = "mg_general_poisson_dirichlet" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to: %s " % (compare_file)) bench = io.read(compare_file) result = compare.compare(my_data, bench) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result # normal return -- error wrt true solution return enorm
parser = argparse.ArgumentParser(description=usage) parser.add_argument("-o", type=str, default="sedov_compare.png", metavar="plot.png", help="output file name") parser.add_argument("plotfile", type=str, nargs=1, help="the plotfile you wish to plot") args = parser.parse_args() # read the data and convert to the primitive variables (and velocity # magnitude) sim = io.read(args.plotfile[0]) myd = sim.cc_data myg = myd.grid dens = myd.get_var("density") xmom = myd.get_var("x-momentum") ymom = myd.get_var("y-momentum") ener = myd.get_var("energy") rho = dens.v() u = np.sqrt(xmom.v()**2 + ymom.v()**2) / rho e = (ener.v() - 0.5 * rho * u * u) / rho gamma = myd.get_aux("gamma") p = rho * e * (gamma - 1.0)
usage: ./incomp_converge_error.py file """ if not len(sys.argv) == 2: print(usage) sys.exit(2) try: file1 = sys.argv[1] except IndexError: print(usage) sys.exit(2) sim = io.read(file1) myd = sim.cc_data myg = myd.grid # numerical solution u = myd.get_var("x-velocity") v = myd.get_var("y-velocity") t = myd.t # analytic solution u_exact = myg.scratch_array() u_exact[:, :] = 1.0 - 2.0*np.cos(2.0*math.pi*(myg.x2d-t))*np.sin(2.0*math.pi*(myg.y2d-t)) v_exact = myg.scratch_array() v_exact[:, :] = 1.0 + 2.0*np.sin(2.0*math.pi*(myg.x2d-t))*np.cos(2.0*math.pi*(myg.y2d-t))
def test_vc_poisson_periodic(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1): """ test the variable-coefficient MG solver. The return value here is the error compared to the exact solution, UNLESS comp_bench=True, in which case the return value is the error compared to the stored benchmark """ # test the multigrid solver nx = N ny = nx # create the coefficient variable g = patch.Grid2d(nx, ny, ng=1) d = patch.CellCenterData2d(g) bc_c = bnd.BC(xlb="periodic", xrb="periodic", ylb="periodic", yrb="periodic") d.register_var("c", bc_c) d.create() c = d.get_var("c") c[:, :] = alpha(g.x2d, g.y2d) # check whether the RHS sums to zero (necessary for periodic data) rhs = f(g.x2d, g.y2d) print("rhs sum: {}".format(np.sum(rhs[g.ilo:g.ihi + 1, g.jlo:g.jhi + 1]))) # create the multigrid object a = MG.VarCoeffCCMG2d(nx, ny, xl_BC_type="periodic", yl_BC_type="periodic", xr_BC_type="periodic", yr_BC_type="periodic", coeffs=c, coeffs_bc=bc_c, verbose=verbose, vis=0, true_function=true) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-11 a.solve(rtol=1.e-11) # alternately, we can just use smoothing by uncommenting the following # a.smooth(a.nlevels-1,10000) # get the solution v = a.get_solution() # get the true solution b = true(a.x2d, a.y2d) # compute the error from the analytic solution -- note that with # periodic BCs all around, there is nothing to normalize the # solution. We subtract off the average of phi from the MG # solution (we do the same for the true solution to put them on # the same footing) e = v - np.sum(v.v()) / (nx * ny) - ( b - np.sum(b[a.ilo:a.ihi + 1, a.jlo:a.jhi + 1]) / (nx * ny)) enorm = e.norm() print( " L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % (enorm, a.relative_error, a.num_cycles)) # plot the solution if make_plot: plt.clf() plt.figure(figsize=(10.0, 4.0), dpi=100, facecolor='w') plt.subplot(121) plt.imshow(np.transpose(v.v()), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("nx = {}".format(nx)) plt.colorbar() plt.subplot(122) plt.imshow(np.transpose(e.v()), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("error") plt.colorbar() plt.tight_layout() plt.savefig("mg_vc_periodic_test.png") # store the output for later comparison bench = "mg_vc_poisson_periodic" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to {}".format(compare_file)) bench = io.read(compare_file) result = compare.compare(my_data, bench) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: {}\n".format(compare.errors[result])) return result # normal return -- error wrt true solution return enorm
def test_vc_poisson_dirichlet(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1, rtol=1.e-12): """ test the variable-coefficient MG solver. The return value here is the error compared to the exact solution, UNLESS comp_bench=True, in which case the return value is the error compared to the stored benchmark """ # test the multigrid solver nx = N ny = nx # create the coefficient variable g = patch.Grid2d(nx, ny, ng=1) d = patch.CellCenterData2d(g) bc_c = bnd.BC(xlb="neumann", xrb="neumann", ylb="neumann", yrb="neumann") d.register_var("c", bc_c) d.create() c = d.get_var("c") c[:, :] = alpha(g.x2d, g.y2d) # create the multigrid object a = MG.VarCoeffCCMG2d(nx, ny, xl_BC_type="dirichlet", yl_BC_type="dirichlet", xr_BC_type="dirichlet", yr_BC_type="dirichlet", coeffs=c, coeffs_bc=bc_c, verbose=verbose, vis=0, true_function=true) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-11 a.solve(rtol=1.e-11) # alternately, we can just use smoothing by uncommenting the following # a.smooth(a.nlevels-1,50000) # get the solution v = a.get_solution() # compute the error from the analytic solution b = true(a.x2d, a.y2d) e = v - b enorm = e.norm() print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % (enorm, a.relative_error, a.num_cycles)) # plot the solution if make_plot: plt.clf() plt.figure(figsize=(10.0, 4.0), dpi=100, facecolor='w') plt.subplot(121) plt.imshow(np.transpose(v.v()), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("nx = {}".format(nx)) plt.colorbar() plt.subplot(122) plt.imshow(np.transpose(e.v()), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("error") plt.colorbar() plt.tight_layout() plt.savefig("mg_vc_dirichlet_test.png") # store the output for later comparison bench = "mg_vc_poisson_dirichlet" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to: %s " % (compare_file)) bench = io.read(compare_file) result = compare.compare(my_data, bench, rtol) if result == 0: msg.success("results match benchmark to within relative tolerance of {}\n".format(rtol)) else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result # normal return -- error wrt true solution return enorm