Exemplo n.º 1
0
    def compare_to_benchmark(self):
        """ Are we comparing to a benchmark? """

        basename = self.rp.get_param("io.basename")
        compare_file = "{}/tests/{}{:04d}".format(self.solver_name, basename,
                                                  self.sim.n)
        msg.warning("comparing to: {} ".format(compare_file))
        try:
            sim_bench = io.read(compare_file)
        except IOError:
            msg.warning("ERROR openning compare file")
            return "ERROR openning compare file"

        result = compare.compare(self.sim.cc_data, sim_bench.cc_data)

        if result == 0:
            msg.success("results match benchmark\n")
        else:
            msg.warning("ERROR: " + compare.errors[result] + "\n")

        return result
Exemplo n.º 2
0
    def compare_to_benchmark(self, rtol):
        """ Are we comparing to a benchmark? """

        basename = self.rp.get_param("io.basename")
        compare_file = "{}/tests/{}{:04d}".format(
            self.solver_name, basename, self.sim.n)
        msg.warning("comparing to: {} ".format(compare_file))
        try:
            sim_bench = io.read(compare_file)
        except IOError:
            msg.warning("ERROR opening compare file")
            return "ERROR opening compare file"

        result = compare.compare(self.sim.cc_data, sim_bench.cc_data, rtol)

        if result == 0:
            msg.success("results match benchmark to within relative tolerance of {}\n".format(rtol))
        else:
            msg.warning("ERROR: " + compare.errors[result] + "\n")

        return result
def test_general_poisson_inhomogeneous(N, store_bench=False, comp_bench=False,
                                       make_plot=False, verbose=1):
    """
    test the general MG solver.  The return value
    here is the error compared to the exact solution, UNLESS
    comp_bench=True, in which case the return value is the
    error compared to the stored benchmark
    """

    # test the multigrid solver
    nx = N
    ny = nx


    # create the coefficient variable
    g = patch.Grid2d(nx, ny, ng=1)
    d = patch.CellCenterData2d(g)
    bc_c = patch.BCObject(xlb="neumann", xrb="neumann",
                          ylb="neumann", yrb="neumann")
    d.register_var("alpha", bc_c)
    d.register_var("beta", bc_c)
    d.register_var("gamma_x", bc_c)
    d.register_var("gamma_y", bc_c)
    d.create()

    a = d.get_var("alpha")
    a[:,:] = alpha(g.x2d, g.y2d)

    b = d.get_var("beta")
    b[:,:] = beta(g.x2d, g.y2d)

    gx = d.get_var("gamma_x")
    gx[:,:] = gamma_x(g.x2d, g.y2d)

    gy = d.get_var("gamma_y")
    gy[:,:] = gamma_y(g.x2d, g.y2d)

    
    # create the multigrid object
    a = MG.GeneralMG2d(nx, ny,
                       xl_BC_type="dirichlet", yl_BC_type="dirichlet",
                       xr_BC_type="dirichlet", yr_BC_type="dirichlet",
                       xl_BC=xl_func,
                       yl_BC=yl_func,
                       coeffs=d,
                       verbose=verbose, vis=0, true_function=true)


    # initialize the solution to 0
    a.init_zeros()

    # initialize the RHS using the function f
    rhs = f(a.x2d, a.y2d)
    print( np.min(rhs), np.max(rhs))

    a.init_RHS(rhs)

    # solve to a relative tolerance of 1.e-10
    a.solve(rtol=1.e-10)

    # alternately, we can just use smoothing by uncommenting the following
    #a.smooth(a.nlevels-1,50000)

    # get the solution
    v = a.get_solution()

    # compute the error from the analytic solution
    b = true(a.x2d,a.y2d)
    e = v - b

    enorm = a.soln_grid.norm(e)
    print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
          (enorm, a.relative_error, a.num_cycles))


    # plot the solution
    if make_plot:
        plt.clf()

        plt.figure(figsize=(10.0,4.0), dpi=100, facecolor='w')

        plt.subplot(121)

        plt.imshow(np.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
                   interpolation="nearest", origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")
        plt.title("nx = {}".format(nx))

        plt.colorbar()


        plt.subplot(122)

        plt.imshow(np.transpose(e[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
                   interpolation="nearest", origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")
        plt.title("error")

        plt.colorbar()

        plt.tight_layout()

        plt.savefig("mg_general_inhomogeneous_test.png")

    # store the output for later comparison
    bench = "mg_general_poisson_inhomogeneous"
    bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/"

    my_data = a.get_solution_object()
    
    if store_bench:
        my_data.write("{}/{}".format(bench_dir, bench))

    # do we do a comparison?
    if comp_bench:
        compare_file = "{}/{}".format(bench_dir, bench)
        msg.warning("comparing to: %s " % (compare_file) )
        bench_grid, bench_data = patch.read(compare_file)

        result = compare.compare(my_data.grid, my_data,
                                 bench_grid, bench_data)

        if result == 0:
            msg.success("results match benchmark\n")
        else:
            msg.warning("ERROR: " + compare.errors[result] + "\n")

        return result

    
    # normal return -- error wrt true solution
    return enorm
Exemplo n.º 4
0
def test_general_poisson_dirichlet(N,
                                   store_bench=False,
                                   comp_bench=False,
                                   make_plot=False,
                                   verbose=1):
    """
    test the general MG solver.  The return value
    here is the error compared to the exact solution, UNLESS
    comp_bench=True, in which case the return value is the
    error compared to the stored benchmark
    """

    # test the multigrid solver
    nx = N
    ny = nx

    # create the coefficient variable
    g = patch.Grid2d(nx, ny, ng=1)
    d = patch.CellCenterData2d(g)
    bc_c = bnd.BC(xlb="neumann", xrb="neumann", ylb="neumann", yrb="neumann")
    d.register_var("alpha", bc_c)
    d.register_var("beta", bc_c)
    d.register_var("gamma_x", bc_c)
    d.register_var("gamma_y", bc_c)
    d.create()

    a = d.get_var("alpha")
    a[:, :] = alpha(g.x2d, g.y2d)

    b = d.get_var("beta")
    b[:, :] = beta(g.x2d, g.y2d)

    gx = d.get_var("gamma_x")
    gx[:, :] = gamma_x(g.x2d, g.y2d)

    gy = d.get_var("gamma_y")
    gy[:, :] = gamma_y(g.x2d, g.y2d)

    # create the multigrid object
    a = MG.GeneralMG2d(nx,
                       ny,
                       xl_BC_type="dirichlet",
                       yl_BC_type="dirichlet",
                       xr_BC_type="dirichlet",
                       yr_BC_type="dirichlet",
                       coeffs=d,
                       verbose=verbose,
                       vis=0,
                       true_function=true)

    # initialize the solution to 0
    a.init_zeros()

    # initialize the RHS using the function f
    rhs = f(a.x2d, a.y2d)
    a.init_RHS(rhs)

    # solve to a relative tolerance of 1.e-11
    a.solve(rtol=1.e-11)

    # alternately, we can just use smoothing by uncommenting the following
    # a.smooth(a.nlevels-1,50000)

    # get the solution
    v = a.get_solution()

    # compute the error from the analytic solution
    b = true(a.x2d, a.y2d)
    e = v - b

    enorm = e.norm()
    print(
        " L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d"
        % (enorm, a.relative_error, a.num_cycles))

    # plot the solution
    if make_plot:
        plt.clf()

        plt.figure(figsize=(10.0, 4.0), dpi=100, facecolor='w')

        plt.subplot(121)

        plt.imshow(np.transpose(v.v()),
                   interpolation="nearest",
                   origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")
        plt.title("nx = {}".format(nx))

        plt.colorbar()

        plt.subplot(122)

        plt.imshow(np.transpose(e.v()),
                   interpolation="nearest",
                   origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")
        plt.title("error")

        plt.colorbar()

        plt.tight_layout()

        plt.savefig("mg_general_dirichlet_test.png")

    # store the output for later comparison
    bench = "mg_general_poisson_dirichlet"
    bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/"

    my_data = a.get_solution_object()

    if store_bench:
        my_data.write("{}/{}".format(bench_dir, bench))

    # do we do a comparison?
    if comp_bench:
        compare_file = "{}/{}".format(bench_dir, bench)
        msg.warning("comparing to: %s " % (compare_file))
        bench = io.read(compare_file)

        result = compare.compare(my_data, bench)

        if result == 0:
            msg.success("results match benchmark\n")
        else:
            msg.warning("ERROR: " + compare.errors[result] + "\n")

        return result

    # normal return -- error wrt true solution
    return enorm
Exemplo n.º 5
0
def doit(solver_name, problem_name, param_file,
         other_commands=None,
         comp_bench=False, make_bench=False):

    msg.bold('pyro ...')

    tc = profile.TimerCollection()

    tm_main = tc.timer("main")
    tm_main.begin()

    # import desired solver under "solver" namespace
    solver = importlib.import_module(solver_name)

    #-------------------------------------------------------------------------
    # runtime parameters
    #-------------------------------------------------------------------------

    # parameter defaults
    rp = runparams.RuntimeParameters()
    rp.load_params("_defaults")
    rp.load_params(solver_name + "/_defaults")

    # problem-specific runtime parameters
    rp.load_params(solver_name + "/problems/_" + problem_name + ".defaults")

    # now read in the inputs file
    if not os.path.isfile(param_file):
        # check if the param file lives in the solver's problems directory
        param_file = solver_name + "/problems/" + param_file
        if not os.path.isfile(param_file):
            msg.fail("ERROR: inputs file does not exist")

    rp.load_params(param_file, no_new=1)

    # and any commandline overrides
    if not other_commands == None:
        rp.command_line_params(other_commands)

    # write out the inputs.auto
    rp.print_paramfile()


    #-------------------------------------------------------------------------
    # initialization
    #-------------------------------------------------------------------------

    # initialize the Simulation object -- this will hold the grid and
    # data and know about the runtime parameters and which problem we
    # are running
    sim = solver.Simulation(solver_name, problem_name, rp, timers=tc)

    sim.initialize()
    sim.preevolve()


    #-------------------------------------------------------------------------
    # evolve
    #-------------------------------------------------------------------------
    init_tstep_factor = rp.get_param("driver.init_tstep_factor")
    max_dt_change = rp.get_param("driver.max_dt_change")
    fix_dt = rp.get_param("driver.fix_dt")

    verbose = rp.get_param("driver.verbose")

    plt.ion()

    sim.cc_data.t = 0.0

    # output the 0th data
    basename = rp.get_param("io.basename")
    sim.cc_data.write("{}{:04d}".format(basename, sim.n))

    dovis = rp.get_param("vis.dovis")
    if dovis:
        plt.figure(num=1, figsize=(8,6), dpi=100, facecolor='w')
        sim.dovis()

    while not sim.finished():

        # fill boundary conditions
        sim.cc_data.fill_BC_all()

        # get the timestep
        if fix_dt > 0.0:
            sim.dt = fix_dt
        else:
            sim.compute_timestep()
            if sim.n == 0:
                sim.dt = init_tstep_factor*sim.dt
            else:
                sim.dt = min(max_dt_change*dt_old, sim.dt)
            dt_old = sim.dt

        if sim.cc_data.t + sim.dt > sim.tmax:
            sim.dt = sim.tmax - sim.cc_data.t

        # evolve for a single timestep
        sim.evolve()

        if verbose > 0: print("%5d %10.5f %10.5f" % (sim.n, sim.cc_data.t, sim.dt))

        # output
        if sim.do_output():
            if verbose > 0: msg.warning("outputting...")
            basename = rp.get_param("io.basename")
            sim.cc_data.write("{}{:04d}".format(basename, sim.n))

        # visualization
        if dovis:
            tm_vis = tc.timer("vis")
            tm_vis.begin()

            sim.dovis()
            store = rp.get_param("vis.store_images")

            if store == 1:
                basename = rp.get_param("io.basename")
                plt.savefig("{}{:04d}.png".format(basename, sim.n))

            tm_vis.end()

    tm_main.end()


    #-------------------------------------------------------------------------
    # benchmarks (for regression testing)
    #-------------------------------------------------------------------------
    # are we comparing to a benchmark?
    if comp_bench:
        compare_file = solver_name + "/tests/" + basename + "%4.4d" % (sim.n)
        msg.warning("comparing to: %s " % (compare_file) )
        try: bench_grid, bench_data = patch.read(compare_file)
        except:
            msg.warning("ERROR openning compare file")
            return "ERROR openning compare file"


        result = compare.compare(sim.cc_data.grid, sim.cc_data, bench_grid, bench_data)

        if result == 0:
            msg.success("results match benchmark\n")
        else:
            msg.warning("ERROR: " + compare.errors[result] + "\n")


    # are we storing a benchmark?
    if make_bench:
        if not os.path.isdir(solver_name + "/tests/"):
            try: os.mkdir(solver_name + "/tests/")
            except:
                msg.fail("ERROR: unable to create the solver's tests/ directory")

        bench_file = solver_name + "/tests/" + basename + "%4.4d" % (sim.n)
        msg.warning("storing new benchmark: {}\n".format(bench_file))
        sim.cc_data.write(bench_file)


    #-------------------------------------------------------------------------
    # final reports
    #-------------------------------------------------------------------------
    if verbose > 0: rp.print_unused_params()
    if verbose > 0: tc.report()

    sim.finalize()

    if comp_bench:
        return result
    else:
        return None
Exemplo n.º 6
0
def test_poisson_dirichlet(N,
                           store_bench=False,
                           comp_bench=False,
                           make_plot=False,
                           verbose=1):

    # test the multigrid solver
    nx = N
    ny = nx

    # create the multigrid object
    a = MG.CellCenterMG2d(nx,
                          ny,
                          xl_BC_type="dirichlet",
                          yl_BC_type="dirichlet",
                          xr_BC_type="dirichlet",
                          yr_BC_type="dirichlet",
                          verbose=verbose)

    # initialize the solution to 0
    a.init_zeros()

    # initialize the RHS using the function f
    rhs = f(a.x2d, a.y2d)
    a.init_RHS(rhs)

    # solve to a relative tolerance of 1.e-11
    a.solve(rtol=1.e-11)

    # alternately, we can just use smoothing by uncommenting the following
    #a.smooth(a.nlevels-1,50000)

    # get the solution
    v = a.get_solution()

    # compute the error from the analytic solution
    b = true(a.x2d, a.y2d)
    e = v - b

    print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
          (e.norm(), a.relative_error, a.num_cycles))

    # plot it
    if make_plot:
        plt.figure(num=1, figsize=(5.0, 5.0), dpi=100, facecolor='w')

        plt.imshow(np.transpose(v[a.ilo:a.ihi + 1, a.jlo:a.jhi + 1]),
                   interpolation="nearest",
                   origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")

        plt.savefig("mg_test.png")

    # store the output for later comparison
    bench = "mg_poisson_dirichlet"
    bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/"

    my_data = a.get_solution_object()

    if store_bench:
        my_data.write("{}/{}".format(bench_dir, bench))

    # do we do a comparison?
    if comp_bench:
        compare_file = "{}/{}".format(bench_dir, bench)
        msg.warning("comparing to: %s " % (compare_file))
        bench_grid, bench_data = patch.read(compare_file)

        result = compare.compare(my_data.grid, my_data, bench_grid, bench_data)

        if result == 0:
            msg.success("results match benchmark\n")
        else:
            msg.warning("ERROR: " + compare.errors[result] + "\n")

        return result

    return None
Exemplo n.º 7
0
def doit(solver_name, problem_name, param_file,
         other_commands=None,
         comp_bench=False, reset_bench_on_fail=False, make_bench=False):
    """The main driver to run pyro"""

    msg.bold('pyro ...')

    tc = profile.TimerCollection()

    tm_main = tc.timer("main")
    tm_main.begin()

    # import desired solver under "solver" namespace
    solver = importlib.import_module(solver_name)

    #-------------------------------------------------------------------------
    # runtime parameters
    #-------------------------------------------------------------------------

    # parameter defaults
    rp = runparams.RuntimeParameters()
    rp.load_params("_defaults")
    rp.load_params(solver_name + "/_defaults")

    # problem-specific runtime parameters
    rp.load_params(solver_name + "/problems/_" + problem_name + ".defaults")

    # now read in the inputs file
    if not os.path.isfile(param_file):
        # check if the param file lives in the solver's problems directory
        param_file = solver_name + "/problems/" + param_file
        if not os.path.isfile(param_file):
            msg.fail("ERROR: inputs file does not exist")

    rp.load_params(param_file, no_new=1)

    # and any commandline overrides
    if other_commands is not None:
        rp.command_line_params(other_commands)

    # write out the inputs.auto
    rp.print_paramfile()


    #-------------------------------------------------------------------------
    # initialization
    #-------------------------------------------------------------------------

    # initialize the Simulation object -- this will hold the grid and
    # data and know about the runtime parameters and which problem we
    # are running
    sim = solver.Simulation(solver_name, problem_name, rp, timers=tc)

    sim.initialize()
    sim.preevolve()


    #-------------------------------------------------------------------------
    # evolve
    #-------------------------------------------------------------------------
    verbose = rp.get_param("driver.verbose")

    plt.ion()

    sim.cc_data.t = 0.0

    # output the 0th data
    basename = rp.get_param("io.basename")
    sim.write("{}{:04d}".format(basename, sim.n))

    dovis = rp.get_param("vis.dovis")
    if dovis:
        plt.figure(num=1, figsize=(8, 6), dpi=100, facecolor='w')
        sim.dovis()

    while not sim.finished():

        # fill boundary conditions
        sim.cc_data.fill_BC_all()

        # get the timestep
        sim.compute_timestep()

        # evolve for a single timestep
        sim.evolve()

        if verbose > 0:
            print("%5d %10.5f %10.5f" % (sim.n, sim.cc_data.t, sim.dt))

        # output
        if sim.do_output():
            if verbose > 0:
                msg.warning("outputting...")
            basename = rp.get_param("io.basename")
            sim.write("{}{:04d}".format(basename, sim.n))

        # visualization
        if dovis:
            tm_vis = tc.timer("vis")
            tm_vis.begin()

            sim.dovis()
            store = rp.get_param("vis.store_images")

            if store == 1:
                basename = rp.get_param("io.basename")
                plt.savefig("{}{:04d}.png".format(basename, sim.n))

            tm_vis.end()

    # final output
    if verbose > 0:
        msg.warning("outputting...")
    basename = rp.get_param("io.basename")
    sim.write("{}{:04d}".format(basename, sim.n))

    tm_main.end()


    #-------------------------------------------------------------------------
    # benchmarks (for regression testing)
    #-------------------------------------------------------------------------
    result = 0
    # are we comparing to a benchmark?
    if comp_bench:
        compare_file = "{}/tests/{}{:04d}".format(
            solver_name, basename, sim.n)
        msg.warning("comparing to: {} ".format(compare_file))
        try:
            sim_bench = io.read(compare_file)
        except:
            msg.warning("ERROR openning compare file")
            return "ERROR openning compare file"


        result = compare.compare(sim.cc_data, sim_bench.cc_data)

        if result == 0:
            msg.success("results match benchmark\n")
        else:
            msg.warning("ERROR: " + compare.errors[result] + "\n")


    # are we storing a benchmark?
    if make_bench or (result != 0 and reset_bench_on_fail):
        if not os.path.isdir(solver_name + "/tests/"):
            try:
                os.mkdir(solver_name + "/tests/")
            except:
                msg.fail("ERROR: unable to create the solver's tests/ directory")

        bench_file = solver_name + "/tests/" + basename + "%4.4d" % (sim.n)
        msg.warning("storing new benchmark: {}\n".format(bench_file))
        sim.write(bench_file)


    #-------------------------------------------------------------------------
    # final reports
    #-------------------------------------------------------------------------
    if verbose > 0:
        rp.print_unused_params()
        tc.report()

    sim.finalize()

    if comp_bench:
        return result
Exemplo n.º 8
0
tm_main.end()


#-----------------------------------------------------------------------------
# benchmarks (for regression testing)
#-----------------------------------------------------------------------------
# are we comparing to a benchmark?
if comp_bench:
    compare_file = solver_name + "/tests/" + basename + "%4.4d" % (n)
    msg.warning("comparing to: %s " % (compare_file) )
    bench_grid, bench_data = patch.read(compare_file)

    result = compare.compare(sim.cc_data.grid, sim.cc_data, bench_grid, bench_data)
    
    if result == 0:
        msg.success("results match benchmark\n")
    else:
        msg.fail("ERROR: " + compare.errors[result] + "\n")


# are we storing a benchmark?
if make_bench:
    bench_file = solver_name + "/tests/" + basename + "%4.4d" % (n)
    msg.warning("storing new benchmark: %s\n " % (bench_file) )
    sim.cc_data.write(bench_file)
    

#-----------------------------------------------------------------------------
# final reports
#-----------------------------------------------------------------------------
rp.print_unused_params()
Exemplo n.º 9
0
def test_vc_poisson_periodic(N,
                             store_bench=False,
                             comp_bench=False,
                             make_plot=False,
                             verbose=1):
    """
    test the variable-coefficient MG solver.  The return value
    here is the error compared to the exact solution, UNLESS
    comp_bench=True, in which case the return value is the
    error compared to the stored benchmark
    """

    # test the multigrid solver
    nx = N
    ny = nx

    # create the coefficient variable
    g = patch.Grid2d(nx, ny, ng=1)
    d = patch.CellCenterData2d(g)
    bc_c = bnd.BC(xlb="periodic",
                  xrb="periodic",
                  ylb="periodic",
                  yrb="periodic")
    d.register_var("c", bc_c)
    d.create()

    c = d.get_var("c")
    c[:, :] = alpha(g.x2d, g.y2d)

    # check whether the RHS sums to zero (necessary for periodic data)
    rhs = f(g.x2d, g.y2d)
    print("rhs sum: {}".format(np.sum(rhs[g.ilo:g.ihi + 1, g.jlo:g.jhi + 1])))

    # create the multigrid object
    a = MG.VarCoeffCCMG2d(nx,
                          ny,
                          xl_BC_type="periodic",
                          yl_BC_type="periodic",
                          xr_BC_type="periodic",
                          yr_BC_type="periodic",
                          coeffs=c,
                          coeffs_bc=bc_c,
                          verbose=verbose,
                          vis=0,
                          true_function=true)

    # initialize the solution to 0
    a.init_zeros()

    # initialize the RHS using the function f
    rhs = f(a.x2d, a.y2d)
    a.init_RHS(rhs)

    # solve to a relative tolerance of 1.e-11
    a.solve(rtol=1.e-11)

    # alternately, we can just use smoothing by uncommenting the following
    #a.smooth(a.nlevels-1,10000)

    # get the solution
    v = a.get_solution()

    # get the true solution
    b = true(a.x2d, a.y2d)

    # compute the error from the analytic solution -- note that with
    # periodic BCs all around, there is nothing to normalize the
    # solution.  We subtract off the average of phi from the MG
    # solution (we do the same for the true solution to put them on
    # the same footing)
    e = v - np.sum(v.v()) / (nx * ny) - (
        b - np.sum(b[a.ilo:a.ihi + 1, a.jlo:a.jhi + 1]) / (nx * ny))

    enorm = e.norm()
    print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
          (enorm, a.relative_error, a.num_cycles))

    # plot the solution
    if make_plot:
        plt.clf()

        plt.figure(figsize=(10.0, 4.0), dpi=100, facecolor='w')

        plt.subplot(121)

        plt.imshow(np.transpose(v.v()),
                   interpolation="nearest",
                   origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")
        plt.title("nx = {}".format(nx))

        plt.colorbar()

        plt.subplot(122)

        plt.imshow(np.transpose(e.v()),
                   interpolation="nearest",
                   origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")
        plt.title("error")

        plt.colorbar()

        plt.tight_layout()

        plt.savefig("mg_vc_periodic_test.png")

    # store the output for later comparison
    bench = "mg_vc_poisson_periodic"
    bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/"

    my_data = a.get_solution_object()

    if store_bench:
        my_data.write("{}/{}".format(bench_dir, bench))

    # do we do a comparison?
    if comp_bench:
        compare_file = "{}/{}".format(bench_dir, bench)
        msg.warning("comparing to {}".format(compare_file))
        bench_grid, bench_data = patch.read(compare_file)

        result = compare.compare(my_data.grid, my_data, bench_grid, bench_data)

        if result == 0:
            msg.success("results match benchmark\n")
        else:
            msg.warning("ERROR: {}\n".format(compare.errors[result]))

        return result

    # normal return -- error wrt true solution
    return enorm
Exemplo n.º 10
0
def test_poisson_dirichlet(N, store_bench=False, comp_bench=False,
                           make_plot=False, verbose=1):
    
    # test the multigrid solver
    nx = N
    ny = nx


    # create the multigrid object
    a = MG.CellCenterMG2d(nx, ny,
                          xl_BC_type="dirichlet", yl_BC_type="dirichlet",
                          xr_BC_type="dirichlet", yr_BC_type="dirichlet",
                          verbose=verbose)

    # initialize the solution to 0
    a.init_zeros()

    # initialize the RHS using the function f
    rhs = f(a.x2d, a.y2d)
    a.init_RHS(rhs)

    # solve to a relative tolerance of 1.e-11
    a.solve(rtol=1.e-11)

    # alternately, we can just use smoothing by uncommenting the following
    #a.smooth(a.nlevels-1,50000)

    # get the solution
    v = a.get_solution()

    # compute the error from the analytic solution
    b = true(a.x2d,a.y2d)
    e = v - b

    print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
          (a.soln_grid.norm(e), a.relative_error, a.num_cycles))


    # plot it
    if make_plot:
        plt.figure(num=1, figsize=(5.0,5.0), dpi=100, facecolor='w')

        plt.imshow(np.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
                   interpolation="nearest", origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")

        plt.savefig("mg_test.png")

        
    # store the output for later comparison
    bench = "mg_poisson_dirichlet"
    bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/"

    my_data = a.get_solution_object()
    
    if store_bench:
        my_data.write("{}/{}".format(bench_dir, bench))

    # do we do a comparison?
    if comp_bench:
        compare_file = "{}/{}".format(bench_dir, bench)
        msg.warning("comparing to: %s " % (compare_file) )
        bench_grid, bench_data = patch.read(compare_file)

        result = compare.compare(my_data.grid, my_data, bench_grid, bench_data)

        if result == 0:
            msg.success("results match benchmark\n")
        else:
            msg.warning("ERROR: " + compare.errors[result] + "\n")

        return result

    return None
Exemplo n.º 11
0
def test_vc_poisson_periodic(N, store_bench=False, comp_bench=False,
                             make_plot=False, verbose=1):
    """
    test the variable-coefficient MG solver.  The return value
    here is the error compared to the exact solution, UNLESS
    comp_bench=True, in which case the return value is the
    error compared to the stored benchmark
    """

    # test the multigrid solver
    nx = N
    ny = nx


    # create the coefficient variable
    g = patch.Grid2d(nx, ny, ng=1)
    d = patch.CellCenterData2d(g)
    bc_c = patch.BCObject(xlb="periodic", xrb="periodic",
                          ylb="periodic", yrb="periodic")
    d.register_var("c", bc_c)
    d.create()

    c = d.get_var("c")
    c[:,:] = alpha(g.x2d, g.y2d)

    # check whether the RHS sums to zero (necessary for periodic data)
    rhs = f(g.x2d, g.y2d)
    print("rhs sum: {}".format(np.sum(rhs[g.ilo:g.ihi+1,g.jlo:g.jhi+1])))


    # create the multigrid object
    a = MG.VarCoeffCCMG2d(nx, ny,
                          xl_BC_type="periodic", yl_BC_type="periodic",
                          xr_BC_type="periodic", yr_BC_type="periodic",
                          coeffs=c, coeffs_bc=bc_c,
                          verbose=verbose, vis=0, true_function=true)


    # initialize the solution to 0
    a.init_zeros()

    # initialize the RHS using the function f
    rhs = f(a.x2d, a.y2d)
    a.init_RHS(rhs)

    # solve to a relative tolerance of 1.e-11
    a.solve(rtol=1.e-11)

    # alternately, we can just use smoothing by uncommenting the following
    #a.smooth(a.nlevels-1,10000)

    # get the solution
    v = a.get_solution()

    # get the true solution
    b = true(a.x2d,a.y2d)

    # compute the error from the analytic solution -- note that with
    # periodic BCs all around, there is nothing to normalize the
    # solution.  We subtract off the average of phi from the MG
    # solution (we do the same for the true solution to put them on
    # the same footing)
    e = v - np.sum(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1])/(nx*ny) - (b - np.sum(b[a.ilo:a.ihi+1,a.jlo:a.jhi+1])/(nx*ny))

    enorm = a.soln_grid.norm(e)
    print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
          (enorm, a.relative_error, a.num_cycles))


    # plot the solution
    if make_plot:
        plt.clf()

        plt.figure(figsize=(10.0,4.0), dpi=100, facecolor='w')

        plt.subplot(121)

        plt.imshow(np.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
                   interpolation="nearest", origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")
        plt.title("nx = {}".format(nx))

        plt.colorbar()


        plt.subplot(122)

        plt.imshow(np.transpose(e[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
                   interpolation="nearest", origin="lower",
                   extent=[a.xmin, a.xmax, a.ymin, a.ymax])

        plt.xlabel("x")
        plt.ylabel("y")
        plt.title("error")

        plt.colorbar()

        plt.tight_layout()

        plt.savefig("mg_vc_periodic_test.png")


    # store the output for later comparison
    bench = "mg_vc_poisson_periodic"
    bench_dir = os.environ["PYRO_HOME"] + "/multigrid/tests/"

    my_data = a.get_solution_object()

    if store_bench:
        my_data.write("{}/{}".format(bench_dir, bench))

    # do we do a comparison?
    if comp_bench:
        compare_file = "{}/{}".format(bench_dir, bench)
        msg.warning("comparing to {}".format(compare_file))
        bench_grid, bench_data = patch.read(compare_file)

        result = compare.compare(my_data.grid, my_data,
                                 bench_grid, bench_data)

        if result == 0:
            msg.success("results match benchmark\n")
        else:
            msg.warning("ERROR: {}\n".format(compare.errors[result]))

        return result


    # normal return -- error wrt true solution
    return enorm
Exemplo n.º 12
0
pf.end()


#-----------------------------------------------------------------------------
# benchmarks (for regression testing)
#-----------------------------------------------------------------------------
# are we comparing to a benchmark?
if comp_bench:
    compare_file = solverName + "/tests/" + basename + "%4.4d" % (n)
    msg.warning("comparing to: %s " % (compare_file) )
    bench_grid, bench_data = patch.read(compare_file)

    result = compare.compare(my_grid, my_data, bench_grid, bench_data)
    
    if result == 0:
        msg.success("results match benchmark\n")
    else:
        msg.fail("ERROR: " + compare.errors[result] + "\n")


# are we storing a benchmark?
if make_bench:
    bench_file = solverName + "/tests/" + basename + "%4.4d" % (n)
    msg.warning("storing new benchmark: %s\n " % (bench_file) )
    my_data.write(bench_file)
    

#-----------------------------------------------------------------------------
# final reports
#-----------------------------------------------------------------------------
rp.print_unused_params()