Exemple #1
0
def _process_common(args, mesh, soln, cfg):
    # Prefork to allow us to exec processes after MPI is initialised
    if hasattr(os, 'fork'):
        from pytools.prefork import enable_prefork

        enable_prefork()

    # Import but do not initialise MPI
    from mpi4py import MPI

    # Manually initialise MPI
    MPI.Init()

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # If we are running interactively then create a progress bar
    if args.progress and MPI.COMM_WORLD.rank == 0:
        pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)

        # Register a callback to update the bar after each step
        callb = lambda intg: pb.advance_to(intg.tcurr)
        solver.completed_step_handlers.append(callb)

    # Execute!
    solver.run()
Exemple #2
0
def _process_common(args, mesh, soln, cfg):
    # Prefork to allow us to exec processes after MPI is initialised
    if hasattr(os, 'fork'):
        from pytools.prefork import enable_prefork

        enable_prefork()

    # Import but do not initialise MPI
    from mpi4py import MPI

    # Manually initialise MPI
    MPI.Init()

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # If we are running interactively then create a progress bar
    if args.progress and MPI.COMM_WORLD.rank == 0:
        pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)

        # Register a callback to update the bar after each step
        callb = lambda intg: pb.advance_to(intg.tcurr)
        solver.completed_step_handlers.append(callb)

    # Execute!
    solver.run()
Exemple #3
0
def main():
    from mpi4py import MPI

    ap = ArgumentParser(prog="pyfr-sim", description="Runs a PyFR simulation")
    ap.add_argument("--verbose", "-v", action="count")
    ap.add_argument("--backend", "-b", default="cuda", help="Backend to use")
    ap.add_argument("--progress", "-p", action="store_true", help="show a progress bar")
    ap.add_argument("--nansweep", "-n", metavar="N", type=int, help="check for NaNs every N steps")

    sp = ap.add_subparsers(help="sub-command help")

    ap_run = sp.add_parser("run", help="run --help")
    ap_run.add_argument("mesh", help="mesh file")
    ap_run.add_argument("cfg", type=FileType("r"), help="config file")
    ap_run.set_defaults(process=process_run)

    ap_restart = sp.add_parser("restart", help="restart --help")
    ap_restart.add_argument("mesh", help="mesh file")
    ap_restart.add_argument("soln", help="solution file")
    ap_restart.add_argument("cfg", nargs="?", type=FileType("r"), help="new config file")
    ap_restart.set_defaults(process=process_restart)

    # Parse the arguments
    args = ap.parse_args()
    mesh, soln, cfg = args.process(args)

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # If we are running interactively then create a progress bar
    if args.progress and MPI.COMM_WORLD.rank == 0:
        pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)

        # Register a callback to update the bar after each step
        callb = lambda intg: pb.advance_to(intg.tcurr)
        solver.completed_step_handlers.append(callb)

    # NaN sweeping
    if args.nansweep:

        def nansweep(intg):
            if intg.nsteps % args.nansweep == 0:
                if any(np.isnan(np.sum(s)) for s in intg.soln):
                    raise RuntimeError("NaNs detected at t = {}".format(intg.tcurr))

        solver.completed_step_handlers.append(nansweep)

    # Execute!
    solver.run()
Exemple #4
0
    def _process_common(self, args, mesh, soln, cfg):
        # Prefork to allow us to exec processes after MPI is initialised
        if hasattr(os, 'fork'):
            from pytools.prefork import enable_prefork

            enable_prefork()

        # Ensure MPI is suitably cleaned up
        register_finalize_handler()

        # Create a backend
        backend = get_backend(args.backend, cfg)

        # Get the mapping from physical ranks to MPI ranks
        rallocs = get_rank_allocation(mesh, cfg)

        # Construct the solver
        self.solver = get_solver(backend, rallocs, mesh, soln, cfg)
Exemple #5
0
def main():
    from mpi4py import MPI

    ap = ArgumentParser(prog='pyfr-sim', description='Runs a PyFR simulation')
    ap.add_argument('--verbose', '-v', action='count')
    ap.add_argument('--backend', '-b', default='cuda', help='Backend to use')
    ap.add_argument('--progress',
                    '-p',
                    action='store_true',
                    help='show a progress bar')
    ap.add_argument('--nansweep',
                    '-n',
                    metavar='N',
                    type=int,
                    help='check for NaNs every N steps')

    sp = ap.add_subparsers(help='sub-command help')

    ap_run = sp.add_parser('run', help='run --help')
    ap_run.add_argument('mesh', help='mesh file')
    ap_run.add_argument('cfg', type=FileType('r'), help='config file')
    ap_run.set_defaults(process=process_run)

    ap_restart = sp.add_parser('restart', help='restart --help')
    ap_restart.add_argument('mesh', help='mesh file')
    ap_restart.add_argument('soln', help='solution file')
    ap_restart.add_argument('cfg',
                            nargs='?',
                            type=FileType('r'),
                            help='new config file')
    ap_restart.set_defaults(process=process_restart)

    # Parse the arguments
    args = ap.parse_args()
    mesh, soln, cfg = args.process(args)

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # If we are running interactively then create a progress bar
    if args.progress and MPI.COMM_WORLD.rank == 0:
        pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)

        # Register a callback to update the bar after each step
        callb = lambda intg: pb.advance_to(intg.tcurr)
        solver.completed_step_handlers.append(callb)

    # NaN sweeping
    if args.nansweep:

        def nansweep(intg):
            if intg.nsteps % args.nansweep == 0:
                if any(np.isnan(np.sum(s)) for s in intg.soln):
                    raise RuntimeError('NaNs detected at t = {}'.format(
                        intg.tcurr))

        solver.completed_step_handlers.append(nansweep)

    # Execute!
    solver.run()
Exemple #6
0
def main():
    ap = ArgumentParser(prog='pyfr-sim', description='Runs a PyFR simulation')
    ap.add_argument('--verbose', '-v', action='count')
    ap.add_argument('--backend', '-b', default='cuda', help='Backend to use')
    ap.add_argument('--progress', '-p', action='store_true',
                    help='show a progress bar')
    ap.add_argument('--nansweep', '-n', metavar='N', type=int,
                    help='check for NaNs every N steps')

    sp = ap.add_subparsers(help='sub-command help')

    ap_run = sp.add_parser('run', help='run --help')
    ap_run.add_argument('mesh', help='mesh file')
    ap_run.add_argument('cfg', type=FileType('r'), help='config file')
    ap_run.set_defaults(process=process_run)

    ap_restart = sp.add_parser('restart', help='restart --help')
    ap_restart.add_argument('mesh', help='mesh file')
    ap_restart.add_argument('soln', help='solution file')
    ap_restart.add_argument('cfg', nargs='?', type=FileType('r'),
                            help='new config file')
    ap_restart.set_defaults(process=process_restart)

    # Parse the arguments
    args = ap.parse_args()
    mesh, soln, cfg = args.process(args)

    # Prefork to allow us to exec processes after MPI is initialised
    if hasattr(os, 'fork'):
        from pytools.prefork import enable_prefork

        enable_prefork()

    # Import and hence initialise MPI
    from mpi4py import MPI

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # If we are running interactively then create a progress bar
    if args.progress and MPI.COMM_WORLD.rank == 0:
        pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)

        # Register a callback to update the bar after each step
        callb = lambda intg: pb.advance_to(intg.tcurr)
        solver.completed_step_handlers.append(callb)

    # NaN sweeping
    if args.nansweep:
        def nansweep(intg):
            if intg.nsteps % args.nansweep == 0:
                if any(np.isnan(np.sum(s)) for s in intg.soln):
                    raise RuntimeError('NaNs detected at t = {}'
                                       .format(intg.tcurr))
        solver.completed_step_handlers.append(nansweep)

    # Execute!
    solver.run()