Ejemplo n.º 1
0
def _process_common(args, mesh, soln, cfg):
    # Prefork to allow us to exec processes after MPI is initialised
    if hasattr(os, 'fork'):
        from pytools.prefork import enable_prefork

        enable_prefork()

    # Import but do not initialise MPI
    from mpi4py import MPI

    # Manually initialise MPI
    MPI.Init()

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # Execute!
    solver.run()

    # Finalise MPI
    MPI.Finalize()
Ejemplo n.º 2
0
def _process_common(args, mesh, soln, cfg):
    # Prefork to allow us to exec processes after MPI is initialised
    if hasattr(os, 'fork'):
        from pytools.prefork import enable_prefork

        enable_prefork()

    # Import but do not initialise MPI
    from mpi4py import MPI

    # Manually initialise MPI
    MPI.Init()

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # If we are running interactively then create a progress bar
    if args.progress and MPI.COMM_WORLD.rank == 0:
        pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)

        # Register a callback to update the bar after each step
        callb = lambda intg: pb.advance_to(intg.tcurr)
        solver.completed_step_handlers.append(callb)

    # Execute!
    solver.run()
Ejemplo n.º 3
0
def __main__():

    if hasattr(os, 'fork'):
        from pytools.prefork import enable_prefork
        enable_prefork()

    # Define MPI communication world
    from mpi4py import MPI
    MPI.Init()

    # define the local rank based cuda device
    print("Local rank", get_local_rank())
    os.environ.pop('CUDA_DEVICE', None)
    devid = get_local_rank()
    os.environ['CUDA_DEVICE'] = str(devid)

    # CUDA device number (used by pycuda.autoinit)
    #from pycuda.autoinit import context
    #import pycuda.autoinit
    cuda.init()
    cudadevice = cuda.Device(devid)
    cudacontext = cudadevice.make_context()

    import atexit
    atexit.register(cudacontext.pop)

    # define the main process
    main()

    # finalize everything
    MPI.Finalize()
Ejemplo n.º 4
0
def _process_common(args, mesh, soln, cfg):
    # Prefork to allow us to exec processes after MPI is initialised
    if hasattr(os, 'fork'):
        from pytools.prefork import enable_prefork

        enable_prefork()

    # Import but do not initialise MPI
    from mpi4py import MPI

    # Manually initialise MPI
    MPI.Init()

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # If we are running interactively then create a progress bar
    if args.progress and MPI.COMM_WORLD.rank == 0:
        pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)

        # Register a callback to update the bar after each step
        callb = lambda intg: pb.advance_to(intg.tcurr)
        solver.completed_step_handlers.append(callb)

    # Execute!
    solver.run()
Ejemplo n.º 5
0
    def _process_common(self, args, mesh, soln, cfg):
        # Prefork to allow us to exec processes after MPI is initialised
        if hasattr(os, 'fork'):
            from pytools.prefork import enable_prefork

            enable_prefork()

        # Ensure MPI is suitably cleaned up
        register_finalize_handler()

        # Create a backend
        backend = get_backend(args.backend, cfg)

        # Get the mapping from physical ranks to MPI ranks
        rallocs = get_rank_allocation(mesh, cfg)

        # Construct the solver
        self.solver = get_solver(backend, rallocs, mesh, soln, cfg)
Ejemplo n.º 6
0
def main():
    ap = ArgumentParser(prog='pyfr-sim', description='Runs a PyFR simulation')
    ap.add_argument('--verbose', '-v', action='count')
    ap.add_argument('--backend', '-b', default='cuda', help='Backend to use')
    ap.add_argument('--progress', '-p', action='store_true',
                    help='show a progress bar')
    ap.add_argument('--nansweep', '-n', metavar='N', type=int,
                    help='check for NaNs every N steps')

    sp = ap.add_subparsers(help='sub-command help')

    ap_run = sp.add_parser('run', help='run --help')
    ap_run.add_argument('mesh', help='mesh file')
    ap_run.add_argument('cfg', type=FileType('r'), help='config file')
    ap_run.set_defaults(process=process_run)

    ap_restart = sp.add_parser('restart', help='restart --help')
    ap_restart.add_argument('mesh', help='mesh file')
    ap_restart.add_argument('soln', help='solution file')
    ap_restart.add_argument('cfg', nargs='?', type=FileType('r'),
                            help='new config file')
    ap_restart.set_defaults(process=process_restart)

    # Parse the arguments
    args = ap.parse_args()
    mesh, soln, cfg = args.process(args)

    # Prefork to allow us to exec processes after MPI is initialised
    if hasattr(os, 'fork'):
        from pytools.prefork import enable_prefork

        enable_prefork()

    # Import and hence initialise MPI
    from mpi4py import MPI

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # If we are running interactively then create a progress bar
    if args.progress and MPI.COMM_WORLD.rank == 0:
        pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)

        # Register a callback to update the bar after each step
        callb = lambda intg: pb.advance_to(intg.tcurr)
        solver.completed_step_handlers.append(callb)

    # NaN sweeping
    if args.nansweep:
        def nansweep(intg):
            if intg.nsteps % args.nansweep == 0:
                if any(np.isnan(np.sum(s)) for s in intg.soln):
                    raise RuntimeError('NaNs detected at t = {}'
                                       .format(intg.tcurr))
        solver.completed_step_handlers.append(nansweep)

    # Execute!
    solver.run()
Ejemplo n.º 7
0
def main():
    ap = ArgumentParser(prog='pyfr-sim', description='Runs a PyFR simulation')
    ap.add_argument('--verbose', '-v', action='count')
    ap.add_argument('--backend', '-b', default='cuda', help='Backend to use')
    ap.add_argument('--progress',
                    '-p',
                    action='store_true',
                    help='show a progress bar')
    ap.add_argument('--nansweep',
                    '-n',
                    metavar='N',
                    type=int,
                    help='check for NaNs every N steps')

    sp = ap.add_subparsers(help='sub-command help')

    ap_run = sp.add_parser('run', help='run --help')
    ap_run.add_argument('mesh', help='mesh file')
    ap_run.add_argument('cfg', type=FileType('r'), help='config file')
    ap_run.set_defaults(process=process_run)

    ap_restart = sp.add_parser('restart', help='restart --help')
    ap_restart.add_argument('mesh', help='mesh file')
    ap_restart.add_argument('soln', help='solution file')
    ap_restart.add_argument('cfg',
                            nargs='?',
                            type=FileType('r'),
                            help='new config file')
    ap_restart.set_defaults(process=process_restart)

    # Parse the arguments
    args = ap.parse_args()
    mesh, soln, cfg = args.process(args)

    # Prefork to allow us to exec processes after MPI is initialised
    if hasattr(os, 'fork'):
        from pytools.prefork import enable_prefork

        enable_prefork()

    # Import and hence initialise MPI
    from mpi4py import MPI

    # Ensure MPI is suitably cleaned up
    register_finalize_handler()

    # Create a backend
    backend = get_backend(args.backend, cfg)

    # Get the mapping from physical ranks to MPI ranks
    rallocs = get_rank_allocation(mesh, cfg)

    # Construct the solver
    solver = get_solver(backend, rallocs, mesh, soln, cfg)

    # If we are running interactively then create a progress bar
    if args.progress and MPI.COMM_WORLD.rank == 0:
        pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)

        # Register a callback to update the bar after each step
        callb = lambda intg: pb.advance_to(intg.tcurr)
        solver.completed_step_handlers.append(callb)

    # NaN sweeping
    if args.nansweep:

        def nansweep(intg):
            if intg.nsteps % args.nansweep == 0:
                if any(np.isnan(np.sum(s)) for s in intg.soln):
                    raise RuntimeError('NaNs detected at t = {}'.format(
                        intg.tcurr))

        solver.completed_step_handlers.append(nansweep)

    # Execute!
    solver.run()
Ejemplo n.º 8
0
__author__ = "W.R.Saunders"
__copyright__ = "Copyright 2016, W.R.Saunders"
__license__ = "GPL"


import mpi4py
mpi4py.rc.initialize = False
mpi4py.rc.finalize = True
from mpi4py import MPI as _MPI
_is_init = _MPI.Is_initialized()

if _is_init:
    print("Warning MPI was initialised before prefork, this is not supported with OpenMPI.")

from pytools import prefork
prefork.enable_prefork()

__all__ = [ 'mpi',
            'access',
            'cell',
            'data',
            'domain',
            'halo',
            'host',
            'kernel',
            'loop',
            'method',
            'pairloop',
            'pio',
            'runtime',
            'state',