def makeplot(plotfile, variable, outfile): myg, myd = patch.read(plotfile) if variable == "vort": vx = myd.get_var("x-velocity") vy = myd.get_var("y-velocity") v = myg.scratch_array() v[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1] = \ 0.5*(vy[myg.ilo+1:myg.ihi+2,myg.jlo:myg.jhi+1] - vy[myg.ilo-1:myg.ihi,myg.jlo:myg.jhi+1])/myg.dx - \ 0.5*(vx[myg.ilo:myg.ihi+1,myg.jlo+1:myg.jhi+2] - vx[myg.ilo:myg.ihi+1,myg.jlo-1:myg.jhi])/myg.dy else: v = myd.get_var(variable) plt.figure(num=1, figsize=(6.0, 6.0), dpi=100, facecolor='w') plt.imshow(np.transpose(v[myg.ilo:myg.ihi + 1, myg.jlo:myg.jhi + 1]), interpolation="nearest", origin="lower", extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax]) plt.axis("off") plt.subplots_adjust(bottom=0.0, top=1.0, left=0.0, right=1.0) plt.savefig(outfile)
def makeplot(plotfile_name, solver_name, outfile, width, height): """ plot the data in a plotfile using the solver's vis() method """ _, myd = patch.read(plotfile_name) solver = importlib.import_module(solver_name) sim = solver.Simulation(solver_name, None, None) sim.cc_data = myd plt.figure(num=1, figsize=(width, height), dpi=100, facecolor='w') sim.dovis() if outfile.endswith(".pdf"): plt.savefig(outfile, bbox_inches="tight") else: plt.savefig(outfile) plt.show()
def makeplot(plotfile, variable, outfile): myg, myd = patch.read(plotfile) plt.figure(num=1, figsize=(6.5, 5.25), dpi=100, facecolor='w') var = myd.get_var(variable) img = plt.imshow(np.transpose(var.v()), interpolation="nearest", origin="lower", extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax]) plt.colorbar() plt.xlabel("x") plt.ylabel("y") plt.savefig(outfile, bbox_inches="tight") plt.show()
optional arguments: -h, --help show this help message and exit -o image.png output image name. The extension .png will generate a PNG file, .eps will generate an EPS file (default: plot.png). """ print usage sys.exit() if __name__== "__main__": try: opts, next = getopt.getopt(sys.argv[1:], "o:h") except getopt.GetoptError: sys.exit("invalid calling sequence") outfile = "plot.png" for o, a in opts: if o == "-h": usage() if o == "-o": outfile = a try: file = next[0] except: usage() try: variable = next[1] except: usage() myg, myd = patch.read(file) makeplot(myd, variable, outfile)
def doit(solver_name, problem_name, param_file, other_commands=None, comp_bench=False, make_bench=False): msg.bold('pyro ...') tc = profile.TimerCollection() tm_main = tc.timer("main") tm_main.begin() # import desired solver under "solver" namespace solver = importlib.import_module(solver_name) #------------------------------------------------------------------------- # runtime parameters #------------------------------------------------------------------------- # parameter defaults rp = runparams.RuntimeParameters() rp.load_params("_defaults") rp.load_params(solver_name + "/_defaults") # problem-specific runtime parameters rp.load_params(solver_name + "/problems/_" + problem_name + ".defaults") # now read in the inputs file if not os.path.isfile(param_file): # check if the param file lives in the solver's problems directory param_file = solver_name + "/problems/" + param_file if not os.path.isfile(param_file): msg.fail("ERROR: inputs file does not exist") rp.load_params(param_file, no_new=1) # and any commandline overrides if not other_commands == None: rp.command_line_params(other_commands) # write out the inputs.auto rp.print_paramfile() #------------------------------------------------------------------------- # initialization #------------------------------------------------------------------------- # initialize the Simulation object -- this will hold the grid and # data and know about the runtime parameters and which problem we # are running sim = solver.Simulation(solver_name, problem_name, rp, timers=tc) sim.initialize() sim.preevolve() #------------------------------------------------------------------------- # evolve #------------------------------------------------------------------------- init_tstep_factor = rp.get_param("driver.init_tstep_factor") max_dt_change = rp.get_param("driver.max_dt_change") fix_dt = rp.get_param("driver.fix_dt") verbose = rp.get_param("driver.verbose") plt.ion() sim.cc_data.t = 0.0 # output the 0th data basename = rp.get_param("io.basename") sim.cc_data.write("{}{:04d}".format(basename, sim.n)) dovis = rp.get_param("vis.dovis") if dovis: plt.figure(num=1, figsize=(8,6), dpi=100, facecolor='w') sim.dovis() while not sim.finished(): # fill boundary conditions sim.cc_data.fill_BC_all() # get the timestep if fix_dt > 0.0: sim.dt = fix_dt else: sim.compute_timestep() if sim.n == 0: sim.dt = init_tstep_factor*sim.dt else: sim.dt = min(max_dt_change*dt_old, sim.dt) dt_old = sim.dt if sim.cc_data.t + sim.dt > sim.tmax: sim.dt = sim.tmax - sim.cc_data.t # evolve for a single timestep sim.evolve() if verbose > 0: print("%5d %10.5f %10.5f" % (sim.n, sim.cc_data.t, sim.dt)) # output if sim.do_output(): if verbose > 0: msg.warning("outputting...") basename = rp.get_param("io.basename") sim.cc_data.write("{}{:04d}".format(basename, sim.n)) # visualization if dovis: tm_vis = tc.timer("vis") tm_vis.begin() sim.dovis() store = rp.get_param("vis.store_images") if store == 1: basename = rp.get_param("io.basename") plt.savefig("{}{:04d}.png".format(basename, sim.n)) tm_vis.end() tm_main.end() #------------------------------------------------------------------------- # benchmarks (for regression testing) #------------------------------------------------------------------------- # are we comparing to a benchmark? if comp_bench: compare_file = solver_name + "/tests/" + basename + "%4.4d" % (sim.n) msg.warning("comparing to: %s " % (compare_file) ) try: bench_grid, bench_data = patch.read(compare_file) except: msg.warning("ERROR openning compare file") return "ERROR openning compare file" result = compare.compare(sim.cc_data.grid, sim.cc_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") # are we storing a benchmark? if make_bench: if not os.path.isdir(solver_name + "/tests/"): try: os.mkdir(solver_name + "/tests/") except: msg.fail("ERROR: unable to create the solver's tests/ directory") bench_file = solver_name + "/tests/" + basename + "%4.4d" % (sim.n) msg.warning("storing new benchmark: {}\n".format(bench_file)) sim.cc_data.write(bench_file) #------------------------------------------------------------------------- # final reports #------------------------------------------------------------------------- if verbose > 0: rp.print_unused_params() if verbose > 0: tc.report() sim.finalize() if comp_bench: return result else: return None
def test_poisson_dirichlet(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1): # test the multigrid solver nx = N ny = nx # create the multigrid object a = MG.CellCenterMG2d(nx, ny, xl_BC_type="dirichlet", yl_BC_type="dirichlet", xr_BC_type="dirichlet", yr_BC_type="dirichlet", verbose=verbose) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-11 a.solve(rtol=1.e-11) # alternately, we can just use smoothing by uncommenting the following #a.smooth(a.nlevels-1,50000) # get the solution v = a.get_solution() # compute the error from the analytic solution b = true(a.x2d, a.y2d) e = v - b print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \ (e.norm(), a.relative_error, a.num_cycles)) # plot it if make_plot: plt.figure(num=1, figsize=(5.0, 5.0), dpi=100, facecolor='w') plt.imshow(np.transpose(v[a.ilo:a.ihi + 1, a.jlo:a.jhi + 1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.savefig("mg_test.png") # store the output for later comparison bench = "mg_poisson_dirichlet" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to: %s " % (compare_file)) bench_grid, bench_data = patch.read(compare_file) result = compare.compare(my_data.grid, my_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result return None
def test_vc_poisson_dirichlet(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1): """ test the variable-coefficient MG solver. The return value here is the error compared to the exact solution, UNLESS comp_bench=True, in which case the return value is the error compared to the stored benchmark """ # test the multigrid solver nx = N ny = nx # create the coefficient variable g = patch.Grid2d(nx, ny, ng=1) d = patch.CellCenterData2d(g) bc_c = patch.BCObject(xlb="neumann", xrb="neumann", ylb="neumann", yrb="neumann") d.register_var("c", bc_c) d.create() c = d.get_var("c") c.d[:, :] = alpha(g.x2d, g.y2d) # create the multigrid object a = MG.VarCoeffCCMG2d(nx, ny, xl_BC_type="dirichlet", yl_BC_type="dirichlet", xr_BC_type="dirichlet", yr_BC_type="dirichlet", coeffs=c, coeffs_bc=bc_c, verbose=verbose, vis=0, true_function=true) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-11 a.solve(rtol=1.e-11) # alternately, we can just use smoothing by uncommenting the following #a.smooth(a.nlevels-1,50000) # get the solution v = a.get_solution() # compute the error from the analytic solution b = true(a.x2d, a.y2d) e = v - b enorm = e.norm() print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \ (enorm, a.relative_error, a.num_cycles)) # plot the solution if make_plot: plt.clf() plt.figure(figsize=(10.0, 4.0), dpi=100, facecolor='w') plt.subplot(121) plt.imshow(np.transpose(v.v()), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("nx = {}".format(nx)) plt.colorbar() plt.subplot(122) plt.imshow(np.transpose(e.v()), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("error") plt.colorbar() plt.tight_layout() plt.savefig("mg_vc_dirichlet_test.png") # store the output for later comparison bench = "mg_vc_poisson_dirichlet" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to: %s " % (compare_file)) bench_grid, bench_data = patch.read(compare_file) result = compare.compare(my_data.grid, my_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result # normal return -- error wrt true solution return enorm
basename = rp.get_param("io.basename") pylab.savefig(basename + "%4.4d" % (n) + ".png") tm_vis.end() tm_main.end() #----------------------------------------------------------------------------- # benchmarks (for regression testing) #----------------------------------------------------------------------------- # are we comparing to a benchmark? if comp_bench: compare_file = solver_name + "/tests/" + basename + "%4.4d" % (n) msg.warning("comparing to: %s " % (compare_file) ) bench_grid, bench_data = patch.read(compare_file) result = compare.compare(sim.cc_data.grid, sim.cc_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.fail("ERROR: " + compare.errors[result] + "\n") # are we storing a benchmark? if make_bench: bench_file = solver_name + "/tests/" + basename + "%4.4d" % (n) msg.warning("storing new benchmark: %s\n " % (bench_file) ) sim.cc_data.write(bench_file)
def test_general_poisson_inhomogeneous(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1): """ test the general MG solver. The return value here is the error compared to the exact solution, UNLESS comp_bench=True, in which case the return value is the error compared to the stored benchmark """ # test the multigrid solver nx = N ny = nx # create the coefficient variable g = patch.Grid2d(nx, ny, ng=1) d = patch.CellCenterData2d(g) bc_c = patch.BCObject(xlb="neumann", xrb="neumann", ylb="neumann", yrb="neumann") d.register_var("alpha", bc_c) d.register_var("beta", bc_c) d.register_var("gamma_x", bc_c) d.register_var("gamma_y", bc_c) d.create() a = d.get_var("alpha") a[:,:] = alpha(g.x2d, g.y2d) b = d.get_var("beta") b[:,:] = beta(g.x2d, g.y2d) gx = d.get_var("gamma_x") gx[:,:] = gamma_x(g.x2d, g.y2d) gy = d.get_var("gamma_y") gy[:,:] = gamma_y(g.x2d, g.y2d) # create the multigrid object a = MG.GeneralMG2d(nx, ny, xl_BC_type="dirichlet", yl_BC_type="dirichlet", xr_BC_type="dirichlet", yr_BC_type="dirichlet", xl_BC=xl_func, yl_BC=yl_func, coeffs=d, verbose=verbose, vis=0, true_function=true) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) print( np.min(rhs), np.max(rhs)) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-10 a.solve(rtol=1.e-10) # alternately, we can just use smoothing by uncommenting the following #a.smooth(a.nlevels-1,50000) # get the solution v = a.get_solution() # compute the error from the analytic solution b = true(a.x2d,a.y2d) e = v - b enorm = a.soln_grid.norm(e) print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \ (enorm, a.relative_error, a.num_cycles)) # plot the solution if make_plot: plt.clf() plt.figure(figsize=(10.0,4.0), dpi=100, facecolor='w') plt.subplot(121) plt.imshow(np.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("nx = {}".format(nx)) plt.colorbar() plt.subplot(122) plt.imshow(np.transpose(e[a.ilo:a.ihi+1,a.jlo:a.jhi+1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("error") plt.colorbar() plt.tight_layout() plt.savefig("mg_general_inhomogeneous_test.png") # store the output for later comparison bench = "mg_general_poisson_inhomogeneous" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to: %s " % (compare_file) ) bench_grid, bench_data = patch.read(compare_file) result = compare.compare(my_data.grid, my_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result # normal return -- error wrt true solution return enorm
def test_poisson_dirichlet(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1): # test the multigrid solver nx = N ny = nx # create the multigrid object a = MG.CellCenterMG2d(nx, ny, xl_BC_type="dirichlet", yl_BC_type="dirichlet", xr_BC_type="dirichlet", yr_BC_type="dirichlet", verbose=verbose) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-11 a.solve(rtol=1.e-11) # alternately, we can just use smoothing by uncommenting the following #a.smooth(a.nlevels-1,50000) # get the solution v = a.get_solution() # compute the error from the analytic solution b = true(a.x2d,a.y2d) e = v - b print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \ (a.soln_grid.norm(e), a.relative_error, a.num_cycles)) # plot it if make_plot: plt.figure(num=1, figsize=(5.0,5.0), dpi=100, facecolor='w') plt.imshow(np.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.savefig("mg_test.png") # store the output for later comparison bench = "mg_poisson_dirichlet" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to: %s " % (compare_file) ) bench_grid, bench_data = patch.read(compare_file) result = compare.compare(my_data.grid, my_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") return result return None
if not len(sys.argv) == 3: print usage sys.exit(2) try: file1 = sys.argv[1] except: print usage sys.exit(2) try: file2 = sys.argv[2] except: print usage sys.exit(2) myg, myd = patch.read(file1) myg2, myd2 = patch.read(file2) U_analytic = myg2.scratch_array() U_numerical = myg.scratch_array() # compute total velocity from myd px = myd.get_var("x-momentum") py = myd.get_var("y-momentum") density = myd.get_var("density") u = px/density v = py/density px2 = myd2.get_var("x-momentum")
def process(file): # read the data and convert to the primitive variables (and velocity # magnitude) myg, myd = patch.read(file) phi_t = myd.get_var("phi") phi = phi_t[myg.ilo:myg.ihi + 1, myg.jlo:myg.jhi + 1] # get the problem parameters t_0 = myd.get_aux("t_0") phi_0 = myd.get_aux("phi_0") phi_max = myd.get_aux("phi_max") k = myd.get_aux("k") t = myd.t # radially bin # see http://code.google.com/p/agpy/source/browse/trunk/agpy/radialprofile.py?r=317 # for inspiration # first define the bins rmin = 0 rmax = np.sqrt(myg.xmax**2 + myg.ymax**2) nbins = np.int(np.sqrt(myg.nx**2 + myg.ny**2)) # bins holds the edges, so there is one more value than actual bin # bin_centers holds the center value of the bin bins = np.linspace(rmin, rmax, nbins + 1) bin_centers = 0.5 * (bins[1:] + bins[:-1]) # radius of each zone xcenter = 0.5 * (myg.xmin + myg.xmax) ycenter = 0.5 * (myg.ymin + myg.ymax) r = np.sqrt( (myg.x2d[myg.ilo:myg.ihi + 1, myg.jlo:myg.jhi + 1] - xcenter)**2 + (myg.y2d[myg.ilo:myg.ihi + 1, myg.jlo:myg.jhi + 1] - ycenter)**2) # bin the radii -- digitize returns an array with the same shape # as the input array but with elements of the array specifying # which bin that location belongs to. The value of whichbin will # be 1 if we are located in the bin defined by bins[0] to bins[1]. # This means that there will be no 0s whichbin = np.digitize(r.flat, bins) # bincount counts the number of occurrences of each non-negative # integer value in whichbin. Each entry in ncount gives the # number of occurrences of it in whichbin. The length of ncount # is set by the maximum value in whichbin ncount = np.bincount(whichbin) # now bin the associated data phi_bin = np.zeros(len(ncount) - 1, dtype=np.float64) for n in range(ncount): # remember that there are no whichbin == 0, since that # corresponds to the left edge. So we want whichbin == 1 to # correspond to the first value of bin_centers # (bin_centers[0]) phi_bin[n - 1] = np.sum(phi.flat[whichbin == n]) / np.sum(ncount[n]) bin_centers = bin_centers[0:len(ncount) - 1] # get the analytic solution phi_exact = gaussian.phi_analytic(bin_centers, t, t_0, k, phi_0, phi_max) return bin_centers, phi_exact, phi_bin
def test_vc_poisson_periodic(N, store_bench=False, comp_bench=False, make_plot=False, verbose=1): """ test the variable-coefficient MG solver. The return value here is the error compared to the exact solution, UNLESS comp_bench=True, in which case the return value is the error compared to the stored benchmark """ # test the multigrid solver nx = N ny = nx # create the coefficient variable g = patch.Grid2d(nx, ny, ng=1) d = patch.CellCenterData2d(g) bc_c = patch.BCObject(xlb="periodic", xrb="periodic", ylb="periodic", yrb="periodic") d.register_var("c", bc_c) d.create() c = d.get_var("c") c[:,:] = alpha(g.x2d, g.y2d) # check whether the RHS sums to zero (necessary for periodic data) rhs = f(g.x2d, g.y2d) print("rhs sum: {}".format(np.sum(rhs[g.ilo:g.ihi+1,g.jlo:g.jhi+1]))) # create the multigrid object a = MG.VarCoeffCCMG2d(nx, ny, xl_BC_type="periodic", yl_BC_type="periodic", xr_BC_type="periodic", yr_BC_type="periodic", coeffs=c, coeffs_bc=bc_c, verbose=verbose, vis=0, true_function=true) # initialize the solution to 0 a.init_zeros() # initialize the RHS using the function f rhs = f(a.x2d, a.y2d) a.init_RHS(rhs) # solve to a relative tolerance of 1.e-11 a.solve(rtol=1.e-11) # alternately, we can just use smoothing by uncommenting the following #a.smooth(a.nlevels-1,10000) # get the solution v = a.get_solution() # get the true solution b = true(a.x2d,a.y2d) # compute the error from the analytic solution -- note that with # periodic BCs all around, there is nothing to normalize the # solution. We subtract off the average of phi from the MG # solution (we do the same for the true solution to put them on # the same footing) e = v - np.sum(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1])/(nx*ny) - (b - np.sum(b[a.ilo:a.ihi+1,a.jlo:a.jhi+1])/(nx*ny)) enorm = a.soln_grid.norm(e) print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \ (enorm, a.relative_error, a.num_cycles)) # plot the solution if make_plot: plt.clf() plt.figure(figsize=(10.0,4.0), dpi=100, facecolor='w') plt.subplot(121) plt.imshow(np.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("nx = {}".format(nx)) plt.colorbar() plt.subplot(122) plt.imshow(np.transpose(e[a.ilo:a.ihi+1,a.jlo:a.jhi+1]), interpolation="nearest", origin="lower", extent=[a.xmin, a.xmax, a.ymin, a.ymax]) plt.xlabel("x") plt.ylabel("y") plt.title("error") plt.colorbar() plt.tight_layout() plt.savefig("mg_vc_periodic_test.png") # store the output for later comparison bench = "mg_vc_poisson_periodic" bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/" my_data = a.get_solution_object() if store_bench: my_data.write("{}/{}".format(bench_dir, bench)) # do we do a comparison? if comp_bench: compare_file = "{}/{}".format(bench_dir, bench) msg.warning("comparing to {}".format(compare_file)) bench_grid, bench_data = patch.read(compare_file) result = compare.compare(my_data.grid, my_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: {}\n".format(compare.errors[result])) return result # normal return -- error wrt true solution return enorm
basename = rp.get_param("io.basename") pylab.savefig(basename + "%4.4d" % (n) + ".png") pfd.end() pf.end() #----------------------------------------------------------------------------- # benchmarks (for regression testing) #----------------------------------------------------------------------------- # are we comparing to a benchmark? if comp_bench: compare_file = solverName + "/tests/" + basename + "%4.4d" % (n) msg.warning("comparing to: %s " % (compare_file) ) bench_grid, bench_data = patch.read(compare_file) result = compare.compare(my_grid, my_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.fail("ERROR: " + compare.errors[result] + "\n") # are we storing a benchmark? if make_bench: bench_file = solverName + "/tests/" + basename + "%4.4d" % (n) msg.warning("storing new benchmark: %s\n " % (bench_file) ) my_data.write(bench_file)
def compare(time,res=512,base='/Users/cgkim/Dropbox/pyro/data/',fileout=False): if res == 512: f1='vortex_e1_512%4.4d.pyro' % time f0='vortex_e0_512%4.4d.pyro' % time elif res == 256: f1='vortex_e1_256_%4.4d.pyro' % time f0='vortex_e0_256_%4.4d.pyro' % time elif res == 128: f1='vortex_e1_%4.4d.pyro' % time f0='vortex_e0_%4.4d.pyro' % time g1,d1 = patch.read(base+f1) g0,d0 = patch.read(base+f0) time=d1.t w1,gwx1,gwy1 = get_vorticity(g1,d1) w0,gwx0,gwy0 = get_vorticity(g0,d0) dw=w1-w0 dgwx=gwx1-gwx0 dgwy=gwy1-gwy0 dgw=np.sqrt(dgwx**2+dgwy**2) n1=g1.nx/4 n2=g1.nx/4*3 # print dgw.shape plt.clf() y2d=g1.y2d[g1.ilo:g1.ihi+1,g1.jlo:g1.jhi+1] x2d=g1.x2d[g1.ilo:g1.ihi+1,g1.jlo:g1.jhi+1] idx1=y2d < (-x2d+0.5+0.4*time) idx2=y2d > (-x2d+1.5-0.4*time) idx3=y2d > (+x2d+0.6*time) idx4=y2d < (+x2d-2.0-0.6*time) dgw[idx1]=0. dgw[idx2]=0. dgw[idx3]=0. dgw[idx4]=0. upper=y2d > -x2d lower=y2d < -x2d # x1,y1= x2d[upper][np.argmax(dgw[upper])],y2d[upper][np.argmax(dgw[upper])] # x2,y2= x2d[lower][np.argmax(dgw[lower])],y2d[lower][np.argmax(dgw[lower])] # print x1,y1,x2,y2 # print np.sqrt((x1-x2)**2+(y1-y2)**2),g1.dx x1,y1= x2d.flatten()[np.argmax(dgw)],y2d.flatten()[np.argmax(dgw)] nres=np.sqrt((x1-1.0)**2+(y1-0.0)**2)/g1.dx im=plt.imshow(dgw.T,origin='lower',interpolation='nearest', extent=[g1.xmin,g1.xmax,g1.ymin,g1.ymax], norm=LogNorm(vmin=1.e-2,vmax=1.e2)) plt.plot(g1.xr,-g1.xr+0.5+0.4*time,ls=':',color='w') plt.plot(g1.xr,-g1.xr+1.5-0.4*time,ls=':',color='w') plt.plot(g1.xr,+g1.xr+0.6*time,ls=':',color='w') plt.plot(g1.xr,+g1.xr-2.0-0.6*time,ls=':',color='w') plt.xlim(0,2) plt.ylim(-1,1) plt.colorbar(im) plt.scatter(x1,y1,marker='*') plt.axhline(0,ls=':') plt.axvline(1,ls=':') norm=np.sum(dgw**2*g1.dx*g1.dy) if fileout: if time == 0.0: fp=open('norm_%d.txt' % res,'w') else: fp=open('norm_%d.txt' % res,'a') fp.write("%15.5e %15.5e %15.5e\n" % (time,norm,nres)) fp.close() print time,norm,nres return time,norm
def process(file): # read the data and convert to the primitive variables (and velocity # magnitude) myg, myd = patch.read(file) phi_t = myd.get_var("phi") phi = phi_t[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1] # get the problem parameters t_0 = myd.get_aux("t_0") phi_0 = myd.get_aux("phi_0") phi_max = myd.get_aux("phi_max") k = myd.get_aux("k") t = myd.t # radially bin # see http://code.google.com/p/agpy/source/browse/trunk/agpy/radialprofile.py?r=317 # for inspiration # first define the bins rmin = 0 rmax = np.sqrt(myg.xmax**2 + myg.ymax**2) nbins = np.int(np.sqrt(myg.nx**2 + myg.ny**2)) # bins holds the edges, so there is one more value than actual bin # bin_centers holds the center value of the bin bins = np.linspace(rmin, rmax, nbins+1) bin_centers = 0.5*(bins[1:] + bins[:-1]) # radius of each zone xcenter = 0.5*(myg.xmin + myg.xmax) ycenter = 0.5*(myg.ymin + myg.ymax) r = np.sqrt( (myg.x2d[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1] - xcenter)**2 + (myg.y2d[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1] - ycenter)**2 ) # bin the radii -- digitize returns an array with the same shape # as the input array but with elements of the array specifying # which bin that location belongs to. The value of whichbin will # be 1 if we are located in the bin defined by bins[0] to bins[1]. # This means that there will be no 0s whichbin = np.digitize(r.flat, bins) # bincount counts the number of occurrences of each non-negative # integer value in whichbin. Each entry in ncount gives the # number of occurrences of it in whichbin. The length of ncount # is set by the maximum value in whichbin ncount = np.bincount(whichbin) # now bin the associated data phi_bin = np.zeros(len(ncount)-1, dtype=np.float64) for n in range(ncount): # remember that there are no whichbin == 0, since that # corresponds to the left edge. So we want whichbin == 1 to # correspond to the first value of bin_centers # (bin_centers[0]) phi_bin[n-1] = np.sum(phi.flat[whichbin==n])/np.sum(ncount[n]) bin_centers = bin_centers[0:len(ncount)-1] # get the analytic solution phi_exact = gaussian.phi_analytic(bin_centers, t, t_0, k, phi_0, phi_max) return bin_centers, phi_exact, phi_bin
def doit(solver_name, problem_name, param_file, other_commands=None, comp_bench=False, make_bench=False): msg.bold('pyro ...') tc = profile.TimerCollection() tm_main = tc.timer("main") tm_main.begin() # import desired solver under "solver" namespace solver = importlib.import_module(solver_name) #------------------------------------------------------------------------- # runtime parameters #------------------------------------------------------------------------- # parameter defaults rp = runparams.RuntimeParameters() rp.load_params("_defaults") rp.load_params(solver_name + "/_defaults") # problem-specific runtime parameters rp.load_params(solver_name + "/problems/_" + problem_name + ".defaults") # now read in the inputs file if not os.path.isfile(param_file): # check if the param file lives in the solver's problems directory param_file = solver_name + "/problems/" + param_file if not os.path.isfile(param_file): msg.fail("ERROR: inputs file does not exist") rp.load_params(param_file, no_new=1) # and any commandline overrides if not other_commands == None: rp.command_line_params(other_commands) # write out the inputs.auto rp.print_paramfile() #------------------------------------------------------------------------- # initialization #------------------------------------------------------------------------- # initialize the Simulation object -- this will hold the grid and # data and know about the runtime parameters and which problem we # are running sim = solver.Simulation(solver_name, problem_name, rp, timers=tc) sim.initialize() sim.preevolve() #------------------------------------------------------------------------- # evolve #------------------------------------------------------------------------- init_tstep_factor = rp.get_param("driver.init_tstep_factor") max_dt_change = rp.get_param("driver.max_dt_change") fix_dt = rp.get_param("driver.fix_dt") verbose = rp.get_param("driver.verbose") plt.ion() sim.cc_data.t = 0.0 # output the 0th data basename = rp.get_param("io.basename") sim.cc_data.write("{}{:04d}".format(basename, sim.n)) dovis = rp.get_param("vis.dovis") if dovis: plt.figure(num=1, figsize=(8, 6), dpi=100, facecolor='w') sim.dovis() while not sim.finished(): # fill boundary conditions sim.cc_data.fill_BC_all() # get the timestep if fix_dt > 0.0: sim.dt = fix_dt else: sim.compute_timestep() if sim.n == 0: sim.dt = init_tstep_factor * sim.dt else: sim.dt = min(max_dt_change * dt_old, sim.dt) dt_old = sim.dt if sim.cc_data.t + sim.dt > sim.tmax: sim.dt = sim.tmax - sim.cc_data.t # evolve for a single timestep sim.evolve() if verbose > 0: print("%5d %10.5f %10.5f" % (sim.n, sim.cc_data.t, sim.dt)) # output if sim.do_output(): if verbose > 0: msg.warning("outputting...") basename = rp.get_param("io.basename") sim.cc_data.write("{}{:04d}".format(basename, sim.n)) # visualization if dovis: tm_vis = tc.timer("vis") tm_vis.begin() sim.dovis() store = rp.get_param("vis.store_images") if store == 1: basename = rp.get_param("io.basename") plt.savefig("{}{:04d}.png".format(basename, sim.n)) tm_vis.end() tm_main.end() #------------------------------------------------------------------------- # benchmarks (for regression testing) #------------------------------------------------------------------------- # are we comparing to a benchmark? if comp_bench: compare_file = solver_name + "/tests/" + basename + "%4.4d" % (sim.n) msg.warning("comparing to: %s " % (compare_file)) try: bench_grid, bench_data = patch.read(compare_file) except: msg.warning("ERROR openning compare file") return "ERROR openning compare file" result = compare.compare(sim.cc_data.grid, sim.cc_data, bench_grid, bench_data) if result == 0: msg.success("results match benchmark\n") else: msg.warning("ERROR: " + compare.errors[result] + "\n") # are we storing a benchmark? if make_bench: if not os.path.isdir(solver_name + "/tests/"): try: os.mkdir(solver_name + "/tests/") except: msg.fail( "ERROR: unable to create the solver's tests/ directory") bench_file = solver_name + "/tests/" + basename + "%4.4d" % (sim.n) msg.warning("storing new benchmark: {}\n".format(bench_file)) sim.cc_data.write(bench_file) #------------------------------------------------------------------------- # final reports #------------------------------------------------------------------------- if verbose > 0: rp.print_unused_params() if verbose > 0: tc.report() sim.finalize() if comp_bench: return result else: return None
print usage sys.exit() if __name__ == "__main__": try: opts, next = getopt.getopt(sys.argv[1:], "o:h:") except getopt.GetoptError: sys.exit("invalid calling sequence") outfile = "plot.png" for o, a in opts: if o == "-h": usage() if o == "-o": outfile = a try: var = next[0] except: usage() try: file = next[1] except: usage() myg, myd = patch.read(file) makeplot(myd, var, outfile)
def init_data(my_data, rp): """ initialize the isothermal atmosphere problem """ msg.bold("initializing the isothermal atmosphere problem...") # make sure that we are passed a valid patch object if not isinstance(my_data, patch.CellCenterData2d): print("ERROR: patch invalid in isothermal.py") print(my_data.__class__) sys.exit() # get the density, momenta, and energy as separate variables dens = my_data.get_var("density") xmom = my_data.get_var("x-momentum") ymom = my_data.get_var("y-momentum") ener = my_data.get_var("energy") gamma = rp.get_param("eos.gamma") grav_const = rp.get_param("compressible.grav") cs = rp.get_param("isothermal.cs") eddington_ratio = rp.get_param("isothermal.eddington") dens1 = rp.get_param("isothermal.dens1") amp = rp.get_param("isothermal.amp") nwaves = rp.get_param("isothermal.nwaves") xmax = rp.get_param("mesh.xmax") ymax = rp.get_param("mesh.ymax") if grav_const != 0.0: scale_height = cs*cs/numpy.abs(grav_const) else: scale_height = 0.1 print("scale height:",scale_height) smallpres = 1.e-10 smalldens = smallpres/(cs**2) # compute optical depth kappa = 1.0 c = 1.0 column_density = dens1*scale_height*(1.0-numpy.exp(-ymax)) optical_depth = column_density*kappa I_0 = (1./numpy.pi)*eddington_ratio*c*numpy.abs(grav_const) \ *column_density/(1.0-numpy.exp(-optical_depth)) my_data.set_aux("surface_brightness", I_0) my_data.set_aux("speed_of_light", c) my_data.set_aux("opacity",kappa) print("optical depth:",optical_depth) print("surface brightness:",I_0) # compute Eddington ratio rad_accel = (numpy.pi*I_0)*kappa/c* \ (1.0-numpy.exp(-optical_depth))/optical_depth \ # mass weighted flux (plane-parallel radiation, constant kappa) net_accel = rad_accel + grav_const eddington_ratio = rad_accel/numpy.abs(grav_const) print("eddington_ratio:",eddington_ratio) print("net accel:",net_accel) # initialize the components, remember, that ener here is # rho*eint + 0.5*rho*v**2, where eint is the specific # internal energy (erg/g) xmom.d[:,:] = 0.0 ymom.d[:,:] = 0.0 dens.d[:,:] = 0.0 if rp.get_param('restart.flag') != 0: # reload simulation state from file grid,cells = patch.read(rp.get_param('restart.snapshot')) dens.d[:,:] = cells.get_var('density').d xmom.d[:,:] = cells.get_var('x-momentum').d ymom.d[:,:] = cells.get_var('y-momentum').d ener.d[:,:] = cells.get_var('energy').d ## must set time!! my_data.t = cells.t else: # set the density to be stratified in the y-direction myg = my_data.grid p = myg.scratch_array() dens.d[:,:] = dens1*numpy.exp(-myg.y2d/scale_height) dens.d[dens.d < smalldens] = smalldens p.d[:,:] = dens.d * cs**2 / gamma # set the velocity perturbations u = 0. A = amp*numpy.random.rand(dens.d.shape[0],dens.d.shape[1]) # v = A*(1+numpy.cos(nwaves*numpy.pi*myg.x2d/xmax))*0.5 v = A*(numpy.cos(nwaves*numpy.pi*myg.x2d/xmax))*0.5 # set the momenta xmom.d[:,:] = dens.d * u ymom.d[:,:] = dens.d * v # set the energy (P = cs2*dens/gamma) ener.d[:,:] = p.d[:,:]/(gamma - 1.0) + \ 0.5*(xmom.d[:,:]**2 + ymom.d[:,:]**2)/dens.d[:,:]