Example #1
0
def run_main(args):
    """Top-level function that controls what mode of simulation (standard/optimsation/benchmark etc...) is run.

    Args:
        args (dict): Namespace with input arguments from command line or api.
    """

    numbermodelruns = args.n
    inputdirectory = os.path.dirname(os.path.abspath(args.inputfile))
    inputfile = os.path.abspath(
        os.path.join(inputdirectory, os.path.basename(args.inputfile)))

    # Create a separate namespace that users can access in any Python code blocks in the input file
    usernamespace = {
        'c': c,
        'e0': e0,
        'm0': m0,
        'z0': z0,
        'number_model_runs': numbermodelruns,
        'input_directory': inputdirectory
    }

    # Process for Taguchi optimisation
    if args.opt_taguchi:
        if args.benchmark:
            raise GeneralError(
                'Taguchi optimisation should not be used with benchmarking mode'
            )
        from gprMax.optimisation_taguchi import run_opt_sim
        run_opt_sim(args, numbermodelruns, inputfile, usernamespace)

    # Process for benchmarking simulation
    elif args.benchmark:
        run_benchmark_sim(args, inputfile, usernamespace)

    # Process for standard simulation
    else:
        # Mixed mode MPI/OpenMP - MPI task farm for models with each model parallelised with OpenMP
        if args.mpi:
            if args.benchmark:
                raise GeneralError(
                    'MPI should not be used with benchmarking mode')
            if numbermodelruns == 1:
                raise GeneralError(
                    'MPI is not beneficial when there is only one model to run'
                )
            run_mpi_sim(args, numbermodelruns, inputfile, usernamespace)
        # Standard behaviour - part of a job array on Open Grid Scheduler/Grid Engine with each model parallelised with OpenMP
        elif args.taskid:
            if args.benchmark:
                raise GeneralError(
                    'A job array should not be used with benchmarking mode')
            if numbermodelruns == 1:
                raise GeneralError(
                    'A job array is not beneficial when there is only one model to run'
                )
            run_job_array_sim(args, numbermodelruns, inputfile, usernamespace)
        # Standard behaviour - models run serially with each model parallelised with OpenMP
        else:
            run_std_sim(args, numbermodelruns, inputfile, usernamespace)
Example #2
0
def run_main(args):
    """Top-level function that controls what mode of simulation (standard/optimsation/benchmark etc...) is run.

    Args:
        args (dict): Namespace with input arguments from command line or api.
    """
    
    with open_path_file(args.inputfile) as inputfile:

        # Get information about host machine
        hostinfo = get_host_info()
        hyperthreading = ', {} cores with Hyper-Threading'.format(hostinfo['logicalcores']) if hostinfo['hyperthreading'] else ''
        print('\nHost: {}; {} x {} ({} cores{}); {} RAM; {}'.format(hostinfo['machineID'], hostinfo['sockets'], hostinfo['cpuID'], hostinfo['physicalcores'], hyperthreading, human_size(hostinfo['ram'], a_kilobyte_is_1024_bytes=True), hostinfo['osversion']))

        # Create a separate namespace that users can access in any Python code blocks in the input file
        usernamespace = {'c': c, 'e0': e0, 'm0': m0, 'z0': z0, 'number_model_runs': args.n, 'inputfile': os.path.abspath(inputfile.name)}

        #######################################
        # Process for benchmarking simulation #
        #######################################
        if args.benchmark:
            if args.mpi or args.opt_taguchi or args.task or args.n > 1:
                raise GeneralError('Benchmarking mode cannot be combined with MPI, job array, or Taguchi optimisation modes, or multiple model runs.')
            run_benchmark_sim(args, inputfile, usernamespace)

        ####################################################
        # Process for simulation with Taguchi optimisation #
        ####################################################
        elif args.opt_taguchi:
            if args.mpi_worker: # Special case for MPI spawned workers - they do not need to enter the Taguchi optimisation mode
                run_mpi_sim(args, inputfile, usernamespace)
            else:
                from gprMax.optimisation_taguchi import run_opt_sim
                run_opt_sim(args, inputfile, usernamespace)

        ################################################
        # Process for standard simulation (CPU or GPU) #
        ################################################
        else:
            # Mixed mode MPI with OpenMP or CUDA - MPI task farm for models with each model parallelised with OpenMP (CPU) or CUDA (GPU)
            if args.mpi:
                if args.n == 1:
                    raise GeneralError('MPI is not beneficial when there is only one model to run')
                if args.task:
                    raise GeneralError('MPI cannot be combined with job array mode')
                run_mpi_sim(args, inputfile, usernamespace)
        
            # Standard behaviour - models run serially with each model parallelised with OpenMP (CPU) or CUDA (GPU)
            else:
                if args.task and args.restart:
                    raise GeneralError('Job array and restart modes cannot be used together')
                run_std_sim(args, inputfile, usernamespace)
Example #3
0
def main():
    """This is the main function for gprMax."""
    
    # Print gprMax logo, version, and licencing/copyright information
    logo(gprMax.__version__ + ' (Bowmore)')

    # Parse command line arguments
    parser = argparse.ArgumentParser(prog='gprMax', description='Electromagnetic modelling software based on the Finite-Difference Time-Domain (FDTD) method')
    parser.add_argument('inputfile', help='path to and name of inputfile')
    parser.add_argument('-n', default=1, type=int, help='number of times to run the input file')
    parser.add_argument('-mpi', action='store_true', default=False, help='switch on MPI task farm')
    parser.add_argument('-benchmark', action='store_true', default=False, help='switch on benchmarking mode')
    parser.add_argument('--geometry-only', action='store_true', default=False, help='only build model and produce geometry file(s)')
    parser.add_argument('--write-python', action='store_true', default=False, help='write an input file after any Python code blocks in the original input file have been processed')
    parser.add_argument('--opt-taguchi', action='store_true', default=False, help='optimise parameters using the Taguchi optimisation method')
    args = parser.parse_args()
    numbermodelruns = args.n
    inputdirectory = os.path.dirname(os.path.abspath(args.inputfile)) + os.sep
    inputfile = inputdirectory + os.path.basename(args.inputfile)
    
    # Create a separate namespace that users can access in any Python code blocks in the input file
    usernamespace = {'c': c, 'e0': e0, 'm0': m0, 'z0': z0, 'number_model_runs': numbermodelruns, 'inputdirectory': inputdirectory}

    # Process for Taguchi optimisation
    if args.opt_taguchi:
        if args.benchmark:
            raise GeneralError('Taguchi optimisation should not be used with benchmarking mode')
        from gprMax.optimisation_taguchi import run_opt_sim
        run_opt_sim(args, numbermodelruns, inputfile, usernamespace)

    # Process for benchmarking simulation
    elif args.benchmark:
        run_benchmark_sim(args, inputfile, usernamespace)

    # Process for standard simulation
    else:
        # Mixed mode MPI/OpenMP - MPI task farm for models with each model parallelised with OpenMP
        if args.mpi:
            if args.benchmark:
                raise GeneralError('MPI should not be used with benchmarking mode')
            if numbermodelruns == 1:
                raise GeneralError('MPI is not beneficial when there is only one model to run')
            run_mpi_sim(args, numbermodelruns, inputfile, usernamespace)
        # Standard behaviour - models run serially with each model parallelised with OpenMP
        else:
            run_std_sim(args, numbermodelruns, inputfile, usernamespace)

        print('\nSimulation completed.\n{}\n'.format(68*'*'))
Example #4
0
def run_main(args):
    """Top-level function that controls what mode of simulation (standard/optimsation/benchmark etc...) is run.

    Args:
        args (dict): Namespace with input arguments from command line or api.
    """

    numbermodelruns = args.n
    inputdirectory = os.path.dirname(os.path.abspath(args.inputfile))
    inputfile = os.path.abspath(os.path.join(inputdirectory, os.path.basename(args.inputfile)))

    # Create a separate namespace that users can access in any Python code blocks in the input file
    usernamespace = {'c': c, 'e0': e0, 'm0': m0, 'z0': z0, 'number_model_runs': numbermodelruns, 'input_directory': inputdirectory}

    # Process for Taguchi optimisation
    if args.opt_taguchi:
        if args.benchmark:
            raise GeneralError('Taguchi optimisation should not be used with benchmarking mode')
        from gprMax.optimisation_taguchi import run_opt_sim
        run_opt_sim(args, numbermodelruns, inputfile, usernamespace)

    # Process for benchmarking simulation
    elif args.benchmark:
        run_benchmark_sim(args, inputfile, usernamespace)

    # Process for standard simulation
    else:
        # Mixed mode MPI/OpenMP - MPI task farm for models with each model parallelised with OpenMP
        if args.mpi:
            if args.benchmark:
                raise GeneralError('MPI should not be used with benchmarking mode')
            if numbermodelruns == 1:
                raise GeneralError('MPI is not beneficial when there is only one model to run')
            run_mpi_sim(args, numbermodelruns, inputfile, usernamespace)
        # Standard behaviour - part of a job array on Open Grid Scheduler/Grid Engine with each model parallelised with OpenMP
        elif args.taskid:
            if args.benchmark:
                raise GeneralError('A job array should not be used with benchmarking mode')
            if numbermodelruns == 1:
                raise GeneralError('A job array is not beneficial when there is only one model to run')
            run_job_array_sim(args, numbermodelruns, inputfile, usernamespace)
        # Standard behaviour - models run serially with each model parallelised with OpenMP
        else:
            run_std_sim(args, numbermodelruns, inputfile, usernamespace)
Example #5
0
def run_main(args):
    """
    Top-level function that controls what mode of simulation (standard/optimsation/benchmark etc...) is run.

    Args:
        args (dict): Namespace with input arguments from command line or api.
    """

    with open_path_file(args.inputfile) as inputfile:

        # Get information about host machine
        hostinfo = get_host_info()
        hyperthreading = ', {} cores with Hyper-Threading'.format(hostinfo['logicalcores']) if hostinfo['hyperthreading'] else ''
        print('\nHost: {}; {} x {} ({} cores{}); {} RAM; {}'.format(hostinfo['machineID'], hostinfo['sockets'], hostinfo['cpuID'], hostinfo['physicalcores'], hyperthreading, human_size(hostinfo['ram'], a_kilobyte_is_1024_bytes=True), hostinfo['osversion']))

        # Get information/setup Nvidia GPU(s)
        if args.gpu is not None:
            # Extract first item of list, either True to automatically determine device ID,
            # or an integer to manually specify device ID
            args.gpu = args.gpu[0]
            gpus = detect_gpus()

            # If a device ID is specified check it is valid
            if not isinstance(args.gpu, bool):
                if args.gpu > len(gpus) - 1:
                    raise GeneralError('GPU with device ID {} does not exist'.format(args.gpu))
                # Set args.gpu to GPU object to access elsewhere
                args.gpu = next(gpu for gpu in gpus if gpu.deviceID == args.gpu)

            # If no device ID is specified
            else:
                # If in MPI mode then set args.gpu to list of available GPUs
                if args.mpi:
                    if args.mpi - 1 > len(gpus):
                        raise GeneralError('Too many MPI tasks requested ({}). The number of MPI tasks requested can only be a maximum of the number of GPU(s) detected plus one, i.e. {} GPU worker tasks + 1 CPU master task'.format(args.mpi, len(gpus)))
                    args.gpu = gpus
                # If benchmarking mode then set args.gpu to list of available GPUs
                elif args.benchmark:
                    args.gpu = gpus
                # Otherwise set args.gpu to GPU object with default device ID (0) to access elsewhere
                else:
                    args.gpu = next(gpu for gpu in gpus if gpu.deviceID == 0)

        # Create a separate namespace that users can access in any Python code blocks in the input file
        usernamespace = {'c': c, 'e0': e0, 'm0': m0, 'z0': z0, 'number_model_runs': args.n, 'inputfile': os.path.abspath(inputfile.name)}

        #######################################
        # Process for benchmarking simulation #
        #######################################
        if args.benchmark:
            if args.mpi or args.opt_taguchi or args.task or args.n > 1:
                raise GeneralError('Benchmarking mode cannot be combined with MPI, job array, or Taguchi optimisation modes, or multiple model runs.')
            run_benchmark_sim(args, inputfile, usernamespace)

        ####################################################
        # Process for simulation with Taguchi optimisation #
        ####################################################
        elif args.opt_taguchi:
            if args.mpi_worker: # Special case for MPI spawned workers - they do not need to enter the Taguchi optimisation mode
                run_mpi_sim(args, inputfile, usernamespace)
            else:
                from gprMax.optimisation_taguchi import run_opt_sim
                run_opt_sim(args, inputfile, usernamespace)

        ################################################
        # Process for standard simulation (CPU or GPU) #
        ################################################
        else:
            # Mixed mode MPI with OpenMP or CUDA - MPI task farm for models with each model parallelised with OpenMP (CPU) or CUDA (GPU)
            if args.mpi:
                if args.n == 1:
                    raise GeneralError('MPI is not beneficial when there is only one model to run')
                if args.task:
                    raise GeneralError('MPI cannot be combined with job array mode')
                run_mpi_sim(args, inputfile, usernamespace)

            # Standard behaviour - models run serially with each model parallelised with OpenMP (CPU) or CUDA (GPU)
            else:
                if args.task and args.restart:
                    raise GeneralError('Job array and restart modes cannot be used together')
                run_std_sim(args, inputfile, usernamespace)
Example #6
0
def run_main(args):
    """
    Top-level function that controls what mode of simulation (standard/optimsation/benchmark etc...) is run.

    Args:
        args (dict): Namespace with input arguments from command line or api.
    """

    # Print gprMax logo, version, and licencing/copyright information
    logo(__version__ + ' (' + codename + ')')

    with open_path_file(args.inputfile) as inputfile:

        # Get information about host machine
        hostinfo = get_host_info()
        hyperthreading = ', {} cores with Hyper-Threading'.format(
            hostinfo['logicalcores']) if hostinfo['hyperthreading'] else ''
        print('\nHost: {} | {} | {} x {} ({} cores{}) | {} RAM | {}'.format(
            hostinfo['hostname'], hostinfo['machineID'], hostinfo['sockets'],
            hostinfo['cpuID'], hostinfo['physicalcores'], hyperthreading,
            human_size(hostinfo['ram'],
                       a_kilobyte_is_1024_bytes=True), hostinfo['osversion']))

        # Get information/setup any Nvidia GPU(s)
        if args.gpu is not None:
            # Flatten a list of lists
            if any(isinstance(element, list) for element in args.gpu):
                args.gpu = [val for sublist in args.gpu for val in sublist]
            gpus, allgpustext = detect_check_gpus(args.gpu)
            print('GPU(s) detected: {}'.format(' | '.join(allgpustext)))

            # If in MPI mode or benchmarking provide list of GPU objects, otherwise
            # provide single GPU object
            if args.mpi or args.mpi_no_spawn or args.benchmark:
                args.gpu = gpus
            else:
                args.gpu = gpus[0]

        # Create a separate namespace that users can access in any Python code blocks in the input file
        usernamespace = {
            'c': c,
            'e0': e0,
            'm0': m0,
            'z0': z0,
            'number_model_runs': args.n,
            'inputfile': os.path.abspath(inputfile.name)
        }

        #######################################
        # Process for benchmarking simulation #
        #######################################
        if args.benchmark:
            if args.mpi or args.opt_taguchi or args.task or args.n > 1:
                raise GeneralError(
                    'Benchmarking mode cannot be combined with MPI, job array, or Taguchi optimisation modes, or multiple model runs.'
                )
            run_benchmark_sim(args, inputfile, usernamespace)

        ####################################################
        # Process for simulation with Taguchi optimisation #
        ####################################################
        elif args.opt_taguchi:
            if args.mpi_worker:  # Special case for MPI spawned workers - they do not need to enter the Taguchi optimisation mode
                run_mpi_sim(args, inputfile, usernamespace)
            else:
                from gprMax.optimisation_taguchi import run_opt_sim
                run_opt_sim(args, inputfile, usernamespace)

        ################################################
        # Process for standard simulation (CPU or GPU) #
        ################################################
        else:
            # Mixed mode MPI with OpenMP or CUDA - MPI task farm for models with each model parallelised with OpenMP (CPU) or CUDA (GPU)
            if args.mpi:
                if args.n == 1:
                    raise GeneralError(
                        'MPI is not beneficial when there is only one model to run'
                    )
                if args.task:
                    raise GeneralError(
                        'MPI cannot be combined with job array mode')
                run_mpi_sim(args, inputfile, usernamespace)

            # Alternate MPI configuration that does not use MPI spawn mechanism
            elif args.mpi_no_spawn:
                if args.n == 1:
                    raise GeneralError(
                        'MPI is not beneficial when there is only one model to run'
                    )
                if args.task:
                    raise GeneralError(
                        'MPI cannot be combined with job array mode')
                run_mpi_no_spawn_sim(args, inputfile, usernamespace)

            # Standard behaviour - models run serially with each model parallelised with OpenMP (CPU) or CUDA (GPU)
            else:
                if args.task and args.restart:
                    raise GeneralError(
                        'Job array and restart modes cannot be used together')
                run_std_sim(args, inputfile, usernamespace)
Example #7
0
def run_main(args):
    """Top-level function that controls what mode of simulation (standard/optimsation/benchmark etc...) is run.

    Args:
        args (dict): Namespace with input arguments from command line or api.
    """

    numbermodelruns = args.n
    with open_path_file(args.inputfile) as inputfile:

        # Get information about host machine
        hostinfo = get_host_info()
        print('\nHost: {}; {} ({} cores); {} RAM; {}'.format(
            hostinfo['machineID'], hostinfo['cpuID'], hostinfo['cpucores'],
            human_size(hostinfo['ram'], a_kilobyte_is_1024_bytes=True),
            hostinfo['osversion']))

        # Create a separate namespace that users can access in any Python code blocks in the input file
        usernamespace = {
            'c': c,
            'e0': e0,
            'm0': m0,
            'z0': z0,
            'number_model_runs': numbermodelruns,
            'input_directory': os.path.dirname(os.path.abspath(inputfile.name))
        }

        #######################################
        # Process for benchmarking simulation #
        #######################################
        if args.benchmark:
            run_benchmark_sim(args, inputfile, usernamespace)

        ####################################################
        # Process for simulation with Taguchi optimisation #
        ####################################################
        elif args.opt_taguchi:
            if args.benchmark:
                raise GeneralError(
                    'Taguchi optimisation should not be used with benchmarking mode'
                )
            from gprMax.optimisation_taguchi import run_opt_sim
            run_opt_sim(args, numbermodelruns, inputfile, usernamespace)

        ################################################
        # Process for standard simulation (CPU or GPU) #
        ################################################
        else:
            # Mixed mode MPI with OpenMP or CUDA - MPI task farm for models with each model parallelised with OpenMP (CPU) or CUDA (GPU)
            if args.mpi:
                if args.benchmark:
                    raise GeneralError(
                        'MPI should not be used with benchmarking mode')
                if numbermodelruns == 1:
                    raise GeneralError(
                        'MPI is not beneficial when there is only one model to run'
                    )
                run_mpi_sim(args, numbermodelruns, inputfile, usernamespace)

            # Standard behaviour - part of a job array on Open Grid Scheduler/Grid Engine with each model parallelised with OpenMP (CPU) or CUDA (GPU)
            elif args.task:
                if args.benchmark:
                    raise GeneralError(
                        'A job array should not be used with benchmarking mode'
                    )
                run_job_array_sim(args, numbermodelruns, inputfile,
                                  usernamespace)

            # Standard behaviour - models run serially with each model parallelised with OpenMP (CPU) or CUDA (GPU)
            else:
                run_std_sim(args, numbermodelruns, inputfile, usernamespace)
Example #8
0
def main():
    """This is the main function for gprMax."""

    # Print gprMax logo, version, and licencing/copyright information
    logo(gprMax.__version__ + ' (Bowmore)')

    # Parse command line arguments
    parser = argparse.ArgumentParser(
        prog='gprMax',
        description=
        'Electromagnetic modelling software based on the Finite-Difference Time-Domain (FDTD) method'
    )
    parser.add_argument('inputfile', help='path to and name of inputfile')
    parser.add_argument('-n',
                        default=1,
                        type=int,
                        help='number of times to run the input file')
    parser.add_argument('-mpi',
                        action='store_true',
                        default=False,
                        help='switch on MPI')
    parser.add_argument('--geometry-only',
                        action='store_true',
                        default=False,
                        help='only build model and produce geometry file(s)')
    parser.add_argument(
        '--write-python',
        action='store_true',
        default=False,
        help=
        'write an input file after any Python code blocks in the original input file have been processed'
    )
    parser.add_argument(
        '--opt-taguchi',
        action='store_true',
        default=False,
        help='optimise parameters using the Taguchi optimisation method')
    args = parser.parse_args()
    numbermodelruns = args.n
    inputdirectory = os.path.dirname(os.path.abspath(args.inputfile)) + os.sep
    inputfile = inputdirectory + os.path.basename(args.inputfile)

    # Create a separate namespace that users can access in any Python code blocks in the input file
    usernamespace = {
        'c': c,
        'e0': e0,
        'm0': m0,
        'z0': z0,
        'number_model_runs': numbermodelruns,
        'inputdirectory': inputdirectory
    }

    # Process for Taguchi optimisation
    if args.opt_taguchi:
        from gprMax.optimisation_taguchi import run_opt_sim
        run_opt_sim(args, numbermodelruns, inputfile, usernamespace)

    # Process for standard simulation
    else:
        if args.mpi:  # Mixed mode MPI/OpenMP - MPI task farm for models with each model parallelised with OpenMP
            if numbermodelruns == 1:
                raise GeneralError(
                    'MPI is not beneficial when there is only one model to run'
                )
            run_mpi_sim(args, numbermodelruns, inputfile, usernamespace)
        else:  # Standard behaviour - models run serially with each model parallelised with OpenMP
            run_std_sim(args, numbermodelruns, inputfile, usernamespace)

        print('\nSimulation completed.\n{}\n'.format(68 * '*'))