Example #1
0
def _perform_common_init_tasks():
    from hoomd_script import update
    from hoomd_script import group
    from hoomd_script import compute

    # create the sorter
    globals.sorter = update.sort()

    # create the default compute.thermo on the all group
    util._disable_status_lines = True
    all = group.all()
    compute._get_unique_thermo(group=all)
    util._disable_status_lines = False

    # set up Communicator, and register it with the System
    if hoomd.is_MPI_available():
        cpp_decomposition = globals.system_definition.getParticleData().getDomainDecomposition()
        if cpp_decomposition is not None:
            # create the c++ Communicator
            if not globals.exec_conf.isCUDAEnabled():
                cpp_communicator = hoomd.Communicator(globals.system_definition, cpp_decomposition)
            else:
                cpp_communicator = hoomd.CommunicatorGPU(globals.system_definition, cpp_decomposition)

            # set Communicator in C++ System
            globals.system.setCommunicator(cpp_communicator)
Example #2
0
    def __init__(self, group, P, r):
        util.print_status_line()

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData(
            ).getDomainDecomposition():
                globals.msg.error(
                    "constrain.sphere is not supported in multi-processor simulations.\n\n"
                )
                raise RuntimeError("Error initializing constraint force.")

        # initialize the base class
        _constraint_force.__init__(self)

        # create the c++ mirror class
        P = hoomd.make_scalar3(P[0], P[1], P[2])
        if not globals.exec_conf.isCUDAEnabled():
            self.cpp_force = hoomd.ConstraintSphere(globals.system_definition,
                                                    group.cpp_group, P, r)
        else:
            self.cpp_force = hoomd.ConstraintSphereGPU(
                globals.system_definition, group.cpp_group, P, r)

        globals.system.addCompute(self.cpp_force, self.force_name)

        # store metadata
        self.group = group
        self.P = P
        self.r = r
        self.metadata_fields = ['group', 'P', 'r']
Example #3
0
def _perform_common_init_tasks():
    from hoomd_script import update;
    from hoomd_script import group;
    from hoomd_script import compute;

    # create the sorter, using the evil import __main__ trick to provide the user with a default variable
    import __main__;
    __main__.sorter = update.sort();
    
    # create the default compute.thermo on the all group
    util._disable_status_lines = True;
    all = group.all();
    compute._get_unique_thermo(group=all);
    util._disable_status_lines = False;

    # set up Communicator, and register it with the System 
    if hoomd.is_MPI_available():
        cpp_decomposition = globals.system_definition.getParticleData().getDomainDecomposition();
        if cpp_decomposition is not None:
            # create the c++ Communicator
            if not globals.exec_conf.isCUDAEnabled():
                cpp_communicator = hoomd.Communicator(globals.system_definition, cpp_decomposition)
            else:
                cpp_communicator = hoomd.CommunicatorGPU(globals.system_definition, cpp_decomposition)

            # set Communicator in C++ System
            globals.system.setCommunicator(cpp_communicator)
Example #4
0
def _create_domain_decomposition(box):
        if not hoomd.is_MPI_available():
            return None

        # default values for arguents
        nx = ny = nz = 0
        linear = False

        if globals.options.nx is not None:
            nx = globals.options.nx
        if globals.options.ny is not None:
            ny = globals.options.ny
        if globals.options.nz is not None:
            nz = globals.options.nz
        if globals.options.linear is not None:
            linear = globals.options.linear

        if linear is True:
            # set up linear decomposition
            nz = globals.exec_conf.getNRanks()
  
        # if we are only running on one processor, we use optimized code paths
        # for single-GPU execution
        if globals.exec_conf.getNRanks() == 1:
            return None

        # initialize domain decomposition
        return hoomd.DomainDecomposition(globals.exec_conf, box.getL(), nx, ny, nz);
Example #5
0
    def __init__(self, x=True, y=True, z=True, tolerance=1.02, maxiter=1, period=1000, phase=-1):
        util.print_status_line();

        # initialize base class
        _updater.__init__(self);

        # balancing cannot be done without mpi
        if not hoomd.is_MPI_available():
            globals.msg.warning("Ignoring balance command, not supported in current configuration.\n")
            return

        # create the c++ mirror class
        if not globals.exec_conf.isCUDAEnabled():
            self.cpp_updater = hoomd.LoadBalancer(globals.system_definition, globals.decomposition.cpp_dd);
        else:
            self.cpp_updater = hoomd.LoadBalancerGPU(globals.system_definition, globals.decomposition.cpp_dd);

        self.setupUpdater(period,phase)

        # stash arguments to metadata
        self.metadata_fields = ['tolerance','maxiter','period','phase']
        self.period = period
        self.phase = phase

        # configure the parameters
        util._disable_status_lines = True
        self.set_params(x,y,z,tolerance, maxiter)
        util._disable_status_lines = False
Example #6
0
    def __init__(self, group):
        util.print_status_line();

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData().getDomainDecomposition():
                globals.msg.error("charge.pppm is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error initializing PPPM.")
       
        # initialize the base class
        force._force.__init__(self);
        # create the c++ mirror class

        # update the neighbor list
        neighbor_list = pair._update_global_nlist(0.0)
        neighbor_list.subscribe(lambda: self.log*0.0)
        if not globals.exec_conf.isCUDAEnabled():
            self.cpp_force = hoomd.PPPMForceCompute(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group);
        else:
            self.cpp_force = hoomd.PPPMForceComputeGPU(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group);
        
        globals.system.addCompute(self.cpp_force, self.force_name);
        
        # error check flag - must be set to true by set_params in order for the run() to commence
        self.params_set = False;
        
        # initialize the short range part of electrostatics
        util._disable_status_lines = True;
        self.ewald = pair.ewald(r_cut = 0.0);
        util._disable_status_lines = False;
Example #7
0
def _create_domain_decomposition(box):
    if not hoomd.is_MPI_available():
        return None

    # default values for arguents
    nx = ny = nz = 0
    linear = False

    if globals.options.nx is not None:
        nx = globals.options.nx
    if globals.options.ny is not None:
        ny = globals.options.ny
    if globals.options.nz is not None:
        nz = globals.options.nz
    if globals.options.linear is not None:
        linear = globals.options.linear

    if linear is True:
        # set up linear decomposition
        nz = globals.exec_conf.getNRanks()

    # if we are only running on one processor, we use optimized code paths
    # for single-GPU execution
    if globals.exec_conf.getNRanks() == 1:
        return None

    # initialize domain decomposition
    return hoomd.DomainDecomposition(globals.exec_conf, box.getL(), nx, ny, nz,
                                     not globals.options.onelevel)
Example #8
0
def get_partition():
    hoomd_script.context._verify_init()

    if hoomd.is_MPI_available():
        return globals.exec_conf.getPartition()
    else:
        return 0
Example #9
0
def get_partition():
    hoomd_script.context._verify_init();

    if hoomd.is_MPI_available():
        return globals.exec_conf.getPartition()
    else:
        return 0;
Example #10
0
    def __init__(self, group):
        util.print_status_line();

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData().getDomainDecomposition():
                globals.msg.error("charge.pppm is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error initializing PPPM.")

        # initialize the base class
        force._force.__init__(self);
        # create the c++ mirror class

        # update the neighbor list
        neighbor_list = pair._update_global_nlist(0.0)
        neighbor_list.subscribe(lambda: self.log*0.0)
        if not globals.exec_conf.isCUDAEnabled():
            self.cpp_force = hoomd.PPPMForceCompute(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group);
        else:
            self.cpp_force = hoomd.PPPMForceComputeGPU(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group);

        globals.system.addCompute(self.cpp_force, self.force_name);

        # error check flag - must be set to true by set_params in order for the run() to commence
        self.params_set = False;

        # initialize the short range part of electrostatics
        util._disable_status_lines = True;
        self.ewald = pair.ewald(r_cut = 0.0);
        util._disable_status_lines = False;
Example #11
0
    def __init__(self, filename="dump", period=None, phase=-1):
        util.print_status_line();

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData().getDomainDecomposition():
                globals.msg.error("dump.pdb is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error writing PDB file.")


        # initialize base class
        analyze._analyzer.__init__(self);

        # create the c++ mirror class
        self.cpp_analyzer = hoomd.PDBDumpWriter(globals.system_definition, filename);

        if period is not None:
            self.setupAnalyzer(period, phase);
            self.enabled = True;
            self.prev_period = 1;
        elif filename != "dump":
            util._disable_status_lines = True;
            self.write(filename);
            util._disable_status_lines = False;
        else:
            self.enabled = False;
Example #12
0
    def __init__(self, filename="dump", period=None, phase=-1):
        util.print_status_line()

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData(
            ).getDomainDecomposition():
                globals.msg.error(
                    "dump.pdb is not supported in multi-processor simulations.\n\n"
                )
                raise RuntimeError("Error writing PDB file.")

        # initialize base class
        analyze._analyzer.__init__(self)

        # create the c++ mirror class
        self.cpp_analyzer = hoomd.PDBDumpWriter(globals.system_definition,
                                                filename)

        if period is not None:
            self.setupAnalyzer(period, phase)
            self.enabled = True
            self.prev_period = 1
        elif filename != "dump":
            util._disable_status_lines = True
            self.write(filename)
            util._disable_status_lines = False
        else:
            self.enabled = False
Example #13
0
    def __init__(self, group, P, r):
        util.print_status_line();

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData().getDomainDecomposition():
                globals.msg.error("constrain.sphere is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error initializing constraint force.")

        # initialize the base class
        _constraint_force.__init__(self);

        # create the c++ mirror class
        P = hoomd.make_scalar3(P[0], P[1], P[2]);
        if not globals.exec_conf.isCUDAEnabled():
            self.cpp_force = hoomd.ConstraintSphere(globals.system_definition, group.cpp_group, P, r);
        else:
            self.cpp_force = hoomd.ConstraintSphereGPU(globals.system_definition, group.cpp_group, P, r);

        globals.system.addCompute(self.cpp_force, self.force_name);

        # store metadata
        self.group = group
        self.P = P
        self.r = r
        self.metadata_fields = ['group','P', 'r']
Example #14
0
def _perform_common_init_tasks():
    from hoomd_script import update
    from hoomd_script import group
    from hoomd_script import compute

    # create the sorter
    globals.sorter = update.sort()

    # create the default compute.thermo on the all group
    util._disable_status_lines = True
    all = group.all()
    compute._get_unique_thermo(group=all)
    util._disable_status_lines = False

    # set up Communicator, and register it with the System
    if hoomd.is_MPI_available():
        cpp_decomposition = globals.system_definition.getParticleData(
        ).getDomainDecomposition()
        if cpp_decomposition is not None:
            # create the c++ Communicator
            if not globals.exec_conf.isCUDAEnabled():
                cpp_communicator = hoomd.Communicator(
                    globals.system_definition, cpp_decomposition)
            else:
                cpp_communicator = hoomd.CommunicatorGPU(
                    globals.system_definition, cpp_decomposition)

            # set Communicator in C++ System
            globals.system.setCommunicator(cpp_communicator)
Example #15
0
def get_rank():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getRank()
        else:
            return hoomd.ExecutionConfiguration.guessRank(globals.msg)
    else:
        return 0
Example #16
0
def get_rank():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getRank()
        else:
            return hoomd.ExecutionConfiguration.guessRank(globals.msg)
    else:
        return 0;
Example #17
0
    def __init__(self,
                 r=None,
                 rx=None,
                 ry=None,
                 rz=None,
                 P=hoomd.make_scalar3(0, 0, 0),
                 group=None):
        util.print_status_line()
        period = 1

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData(
            ).getDomainDecomposition():
                globals.msg.error(
                    "constrain.ellipsoid is not supported in multi-processor simulations.\n\n"
                )
                raise RuntimeError("Error initializing updater.")

        # Error out if no radii are set
        if (r is None and rx is None and ry is None and rz is None):
            globals.msg.error(
                "no radii were defined in update.constraint_ellipsoid.\n\n")
            raise RuntimeError("Error initializing updater.")

        # initialize the base class
        _updater.__init__(self)

        # Set parameters
        P = hoomd.make_scalar3(P[0], P[1], P[2])
        if (r is not None): rx = ry = rz = r

        # create the c++ mirror class
        if not globals.exec_conf.isCUDAEnabled():
            if (group is not None):
                self.cpp_updater = hoomd.ConstraintEllipsoid(
                    globals.system_definition, group.cpp_group, P, rx, ry, rz)
            else:
                self.cpp_updater = hoomd.ConstraintEllipsoid(
                    globals.system_definition, globals.group_all.cpp_group, P,
                    rx, ry, rz)
        else:
            if (group is not None):
                self.cpp_updater = hoomd.ConstraintEllipsoidGPU(
                    globals.system_definition, group.cpp_group, P, rx, ry, rz)
            else:
                self.cpp_updater = hoomd.ConstraintEllipsoidGPU(
                    globals.system_definition, globals.group_all.cpp_group, P,
                    rx, ry, rz)
        self.setupUpdater(period)

        # store metadata
        self.group = group
        self.P = P
        self.rx = rx
        self.ry = ry
        self.rz = rz
        self.metadata_fields = ['group', 'P', 'rx', 'ry', 'rz']
Example #18
0
    def __init__(self,
                 filename="dump",
                 period=None,
                 file1=None,
                 file2=None,
                 compress=True):
        util.print_status_line()
        globals.msg.warning(
            "dump.bin is deprecated and will be removed in the next release")

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData(
            ).getDomainDecomposition():
                globals.msg.error(
                    "dump.bin is not supported in multi-processor simulations.\n\n"
                )
                raise RuntimeError("Error writing restart data.")

        # initialize base class
        analyze._analyzer.__init__(self)

        # create the c++ mirror class
        self.cpp_analyzer = hoomd.HOOMDBinaryDumpWriter(
            globals.system_definition, filename)
        self.cpp_analyzer.enableCompression(compress)

        # handle the alternation setting
        # first, check that they are both set
        if (file1 is not None and file2 is None) or (file2 is not None
                                                     and file1 is None):
            globals.msg.error(
                "file1 and file2 must either both be set or both left as None.\n"
            )
            raise RuntimeError('Error initializing dump.bin')
        if file1 is not None:
            self.cpp_analyzer.setAlternatingWrites(file1, file2)
            if period is None:
                globals.msg.warning(
                    "Alternating file output set for dump.bin, but period is not set.\n"
                )
                globals.msg.warning("No output will be written.\n")

        globals.msg.warning("dump.bin does not support triclinic boxes.\n")
        globals.msg.warning(
            "dump.bin is deprecated and will be replaced in v1.1.0\n")

        if period is not None:
            self.setupAnalyzer(period)
            self.enabled = True
            self.prev_period = 1
        elif filename != "dump":
            util._disable_status_lines = True
            self.write(filename)
            util._disable_status_lines = False
        else:
            self.enabled = False
Example #19
0
def _create_exec_conf():
    # use a cached execution configuration if available
    if globals.exec_conf is not None:
        return globals.exec_conf

    mpi_available = hoomd.is_MPI_available();
    
    # set the openmp thread limits
    if globals.options.ncpu is not None:
        if globals.options.ncpu > hoomd.get_num_procs():
            globals.msg.warning("Requesting more CPU cores than there are available in the system\n");
        hoomd.set_num_threads(globals.options.ncpu);

    # if no command line options were specified, create a default ExecutionConfiguration
    if globals.options.mode is None:
        if mpi_available:
            if globals.options.nrank is not None:
                exec_conf = hoomd.ExecutionConfiguration(globals.options.min_cpu, globals.options.ignore_display, globals.msg, True, globals.options.nrank);
            else:
                exec_conf = hoomd.ExecutionConfiguration(globals.options.min_cpu, globals.options.ignore_display, globals.msg,True);
        else:
            exec_conf = hoomd.ExecutionConfiguration(globals.options.min_cpu, globals.options.ignore_display, globals.msg);
    else:
        # determine the GPU on which to execute
        if globals.options.gpu is not None:
            gpu_id = int(globals.options.gpu);
        else:
            gpu_id = -1;
        
        # create the specified configuration
        if globals.options.mode == "cpu":
            if mpi_available:
                if globals.options.nrank is not None:
                    exec_conf = hoomd.ExecutionConfiguration(hoomd.ExecutionConfiguration.executionMode.CPU, gpu_id, globals.options.min_cpu, globals.options.ignore_display, globals.msg, True, globals.options.nrank); 
                else:
                    exec_conf = hoomd.ExecutionConfiguration(hoomd.ExecutionConfiguration.executionMode.CPU, gpu_id, globals.options.min_cpu, globals.options.ignore_display, globals.msg, True);
            else:
                exec_conf = hoomd.ExecutionConfiguration(hoomd.ExecutionConfiguration.executionMode.CPU, gpu_id, globals.options.min_cpu, globals.options.ignore_display, globals.msg);
        elif globals.options.mode == "gpu":
            if mpi_available:
                if globals.options.nrank is not None:
                    exec_conf = hoomd.ExecutionConfiguration(hoomd.ExecutionConfiguration.executionMode.GPU, gpu_id, globals.options.min_cpu, globals.options.ignore_display, globals.msg, True, globals.options.nrank);
                else:
                    exec_conf = hoomd.ExecutionConfiguration(hoomd.ExecutionConfiguration.executionMode.GPU, gpu_id, globals.options.min_cpu, globals.options.ignore_display, globals.msg, True);
            else:
                exec_conf = hoomd.ExecutionConfiguration(hoomd.ExecutionConfiguration.executionMode.GPU, gpu_id, globals.options.min_cpu, globals.options.ignore_display, globals.msg);
        else:
            raise RuntimeError("Error initializing");
    
    # if gpu_error_checking is set, enable it on the GPU
    if globals.options.gpu_error_checking:
       exec_conf.setCUDAErrorChecking(True);
    
    globals.exec_conf = exec_conf;

    return exec_conf;
Example #20
0
def get_partition():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getPartition()
        else:
            if globals.options.nrank is not None:
                return int(hoomd.ExecutionConfiguration.guessRank(globals.msg)/globals.options.nrank)
            else:
                return 0
    else:
        return 0;
Example #21
0
def get_num_ranks():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getNRanks()
        else:
            if globals.options.nrank is not None:
                return globals.options.nrank
            else:
                return hoomd.ExecutionConfiguration.getNRanksGlobal()
    else:
        return 1
Example #22
0
def get_num_ranks():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getNRanks();
        else:
            if globals.options.nrank is not None:
                return globals.options.nrank;
            else:
                return hoomd.ExecutionConfiguration.getNRanksGlobal()
    else:
        return 1;
Example #23
0
def get_rank():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getRank()
        else:
            if globals.options.nrank is not None:
                # recompute local rank
                return int(hoomd.ExecutionConfiguration.getRankGlobal() % globals.options.nrank)
            else:
                return hoomd.ExecutionConfiguration.getRankGlobal()
    else:
        return 0;
Example #24
0
def get_partition():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getPartition()
        else:
            if globals.options.nrank is not None:
                # re-compute partition number
                return int(hoomd.ExecutionConfiguration.getRankGlobal()/globals.options.nrank)
            else:
                return 0
    else:
        return 0;
Example #25
0
def get_rank():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getRank()
        else:
            if globals.options.nrank is not None:
                # recompute local rank
                return int(hoomd.ExecutionConfiguration.getRankGlobal() %
                           globals.options.nrank)
            else:
                return hoomd.ExecutionConfiguration.getRankGlobal()
    else:
        return 0
Example #26
0
def get_partition():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getPartition()
        else:
            if globals.options.nrank is not None:
                # re-compute partition number
                return int(hoomd.ExecutionConfiguration.getRankGlobal() /
                           globals.options.nrank)
            else:
                return 0
    else:
        return 0
Example #27
0
def get_partition():
    if hoomd.is_MPI_available():
        if init.is_initialized():
            return globals.exec_conf.getPartition()
        else:
            if globals.options.nrank is not None:
                return int(
                    hoomd.ExecutionConfiguration.guessRank(globals.msg) /
                    globals.options.nrank)
            else:
                return 0
    else:
        return 0
Example #28
0
def _create_domain_decomposition(box):
    if not hoomd.is_MPI_available():
        return None

    # if we are only running on one processor, we use optimized code paths
    # for single-GPU execution
    if globals.exec_conf.getNRanks() == 1:
        return None

    # okay, we want a decomposition but one isn't set, so make a default one
    if globals.decomposition is None:
        # this is happening transparently to the user, so hush this up
        util._disable_status_lines = True
        globals.decomposition = comm.decomposition()
        util._disable_status_lines = False

    return globals.decomposition._make_cpp_decomposition(box)
Example #29
0
def _create_exec_conf():
    # use a cached execution configuration if available
    if globals.exec_conf is not None:
        return globals.exec_conf

    mpi_available = hoomd.is_MPI_available()

    # error out on nyx/flux if the auto mode is set
    if globals.options.mode == 'auto':
        if (re.match("flux*", platform.node()) is not None) or (re.match(
                "nyx*", platform.node()) is not None):
            globals.msg.error(
                "--mode=gpu or --mode=cpu must be specified on nyx/flux\n")
            raise RuntimeError("Error initializing")
        exec_mode = hoomd.ExecutionConfiguration.executionMode.AUTO
    elif globals.options.mode == "cpu":
        exec_mode = hoomd.ExecutionConfiguration.executionMode.CPU
    elif globals.options.mode == "gpu":
        exec_mode = hoomd.ExecutionConfiguration.executionMode.GPU
    else:
        raise RuntimeError("Invalid mode")

    # convert None options to defaults
    if globals.options.gpu is None:
        gpu_id = -1
    else:
        gpu_id = int(globals.options.gpu)

    if globals.options.nrank is None:
        nrank = 0
    else:
        nrank = int(globals.options.nrank)

    # create the specified configuration
    exec_conf = hoomd.ExecutionConfiguration(exec_mode, gpu_id,
                                             globals.options.min_cpu,
                                             globals.options.ignore_display,
                                             globals.msg, nrank)

    # if gpu_error_checking is set, enable it on the GPU
    if globals.options.gpu_error_checking:
        exec_conf.setCUDAErrorChecking(True)

    globals.exec_conf = exec_conf

    return exec_conf
Example #30
0
def _create_domain_decomposition(box):
    if not hoomd.is_MPI_available():
        return None

    # if we are only running on one processor, we use optimized code paths
    # for single-GPU execution
    if globals.exec_conf.getNRanks() == 1:
        return None

    # okay, we want a decomposition but one isn't set, so make a default one
    if globals.decomposition is None:
        # this is happening transparently to the user, so hush this up
        util._disable_status_lines = True
        globals.decomposition = comm.decomposition()
        util._disable_status_lines = False

    return globals.decomposition._make_cpp_decomposition(box)
Example #31
0
def _create_exec_conf():
    # use a cached execution configuration if available
    if globals.exec_conf is not None:
        return globals.exec_conf

    mpi_available = hoomd.is_MPI_available()

    # error out on nyx/flux if the auto mode is set
    if globals.options.mode == "auto":
        if (re.match("flux*", platform.node()) is not None) or (re.match("nyx*", platform.node()) is not None):
            globals.msg.error("--mode=gpu or --mode=cpu must be specified on nyx/flux\n")
            raise RuntimeError("Error initializing")
        exec_mode = hoomd.ExecutionConfiguration.executionMode.AUTO
    elif globals.options.mode == "cpu":
        exec_mode = hoomd.ExecutionConfiguration.executionMode.CPU
    elif globals.options.mode == "gpu":
        exec_mode = hoomd.ExecutionConfiguration.executionMode.GPU
    else:
        raise RuntimeError("Invalid mode")

    # convert None options to defaults
    if globals.options.gpu is None:
        gpu_id = -1
    else:
        gpu_id = int(globals.options.gpu)

    if globals.options.nrank is None:
        nrank = 0
    else:
        nrank = int(globals.options.nrank)

    # create the specified configuration
    exec_conf = hoomd.ExecutionConfiguration(
        exec_mode, gpu_id, globals.options.min_cpu, globals.options.ignore_display, globals.msg, nrank
    )

    # if gpu_error_checking is set, enable it on the GPU
    if globals.options.gpu_error_checking:
        exec_conf.setCUDAErrorChecking(True)

    globals.exec_conf = exec_conf

    return exec_conf
Example #32
0
    def __init__(self, r_cut):
        util.print_status_line();

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData().getDomainDecomposition():
                globals.msg.error("wall.lj is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error setting up wall potential.")

        # initialize the base class
        force._force.__init__(self);

        # create the c++ mirror class
        self.cpp_force = hoomd.LJWallForceCompute(globals.system_definition, r_cut);

        # variable for tracking which particle type coefficients have been set
        self.particle_types_set = [];

        globals.system.addCompute(self.cpp_force, self.force_name);
Example #33
0
    def __init__(self, group, nlist=None):
        util.print_status_line()

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData(
            ).getDomainDecomposition():
                globals.msg.error(
                    "charge.pppm is not supported in multi-processor simulations.\n\n"
                )
                raise RuntimeError("Error initializing PPPM.")

        # initialize the base class
        force._force.__init__(self)
        # create the c++ mirror class

        # PPPM doesn't really need a neighbor list, so subscribe call back as None
        if nlist is None:
            self.nlist = nl._subscribe_global_nlist(lambda: None)
        else:  # otherwise, subscribe the specified neighbor list
            self.nlist = nlist
            self.nlist.subscribe(lambda: None)
            self.nlist.update_rcut()

        if not globals.exec_conf.isCUDAEnabled():
            self.cpp_force = hoomd.PPPMForceCompute(globals.system_definition,
                                                    self.nlist.cpp_nlist,
                                                    group.cpp_group)
        else:
            self.cpp_force = hoomd.PPPMForceComputeGPU(
                globals.system_definition, self.nlist.cpp_nlist,
                group.cpp_group)

        globals.system.addCompute(self.cpp_force, self.force_name)

        # error check flag - must be set to true by set_params in order for the run() to commence
        self.params_set = False

        # initialize the short range part of electrostatics
        util._disable_status_lines = True
        self.ewald = pair.ewald(r_cut=0.0, nlist=self.nlist)
        util._disable_status_lines = False
Example #34
0
def _create_exec_conf():
    # use a cached execution configuration if available
    if globals.exec_conf is not None:
        return globals.exec_conf

    mpi_available = hoomd.is_MPI_available();

    # error out on nyx/flux if the auto mode is set
    if globals.options.mode == 'auto':
        host = _get_proc_name()
        if "flux" in host or "nyx" in host:
            globals.msg.error("--mode=gpu or --mode=cpu must be specified on nyx/flux\n");
            raise RuntimeError("Error initializing");
        exec_mode = hoomd.ExecutionConfiguration.executionMode.AUTO;
    elif globals.options.mode == "cpu":
        exec_mode = hoomd.ExecutionConfiguration.executionMode.CPU;
    elif globals.options.mode == "gpu":
        exec_mode = hoomd.ExecutionConfiguration.executionMode.GPU;
    else:
        raise RuntimeError("Invalid mode");

    # convert None options to defaults
    if globals.options.gpu is None:
        gpu_id = -1;
    else:
        gpu_id = int(globals.options.gpu);

    if globals.options.nrank is None:
        nrank = 0;
    else:
        nrank = int(globals.options.nrank);

    # create the specified configuration
    exec_conf = hoomd.ExecutionConfiguration(exec_mode, gpu_id, globals.options.min_cpu, globals.options.ignore_display, globals.msg, nrank);

    # if gpu_error_checking is set, enable it on the GPU
    if globals.options.gpu_error_checking:
       exec_conf.setCUDAErrorChecking(True);

    globals.exec_conf = exec_conf;

    return exec_conf;
Example #35
0
    def __init__(self, filename="dump", period=None, file1=None, file2=None, compress=True, phase=-1):
        util.print_status_line();
        globals.msg.warning("dump.bin is deprecated and will be removed in the next release");

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData().getDomainDecomposition():
                globals.msg.error("dump.bin is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error writing restart data.")

        # initialize base class
        analyze._analyzer.__init__(self);

        # create the c++ mirror class
        self.cpp_analyzer = hoomd.HOOMDBinaryDumpWriter(globals.system_definition, filename);
        self.cpp_analyzer.enableCompression(compress)

        # handle the alternation setting
        # first, check that they are both set
        if (file1 is not None and file2 is None) or (file2 is not None and file1 is None):
            globals.msg.error("file1 and file2 must either both be set or both left as None.\n");
            raise RuntimeError('Error initializing dump.bin');
        if file1 is not None:
            self.cpp_analyzer.setAlternatingWrites(file1, file2)
            if period is None:
                globals.msg.warning("Alternating file output set for dump.bin, but period is not set.\n");
                globals.msg.warning("No output will be written.\n");

        globals.msg.warning("dump.bin does not support triclinic boxes.\n");
        globals.msg.warning("dump.bin is deprecated and will be replaced in v1.1.0\n");

        if period is not None:
            self.setupAnalyzer(period, phase);
            self.enabled = True;
            self.prev_period = 1;
        elif filename != "dump":
            util._disable_status_lines = True;
            self.write(filename);
            util._disable_status_lines = False;
        else:
            self.enabled = False;
Example #36
0
    def __init__(self, T, period=1):
        util.print_status_line();

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData().getDomainDecomposition():
                globals.msg.error("update.rescale_temp not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error setting up updater.")
 
        # initialize base class
        _updater.__init__(self);
        
        # setup the variant inputs
        T = variant._setup_variant_input(T);
        
        # create the compute thermo
        thermo = compute._get_unique_thermo(group=globals.group_all);
        
        # create the c++ mirror class
        self.cpp_updater = hoomd.TempRescaleUpdater(globals.system_definition, thermo.cpp_compute, T.cpp_variant);
        self.setupUpdater(period);
Example #37
0
    def __init__(self, T, period=1):
        util.print_status_line()

        # Error out in MPI simulations
        if hoomd.is_MPI_available():
            if globals.system_definition.getParticleData().getDomainDecomposition():
                globals.msg.error("update.rescale_temp not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error setting up updater.")

        # initialize base class
        _updater.__init__(self)

        # setup the variant inputs
        T = variant._setup_variant_input(T)

        # create the compute thermo
        thermo = compute._get_unique_thermo(group=globals.group_all)

        # create the c++ mirror class
        self.cpp_updater = hoomd.TempRescaleUpdater(globals.system_definition, thermo.cpp_compute, T.cpp_variant)
        self.setupUpdater(period)
Example #38
0
    def __init__(self,
                 x=True,
                 y=True,
                 z=True,
                 tolerance=1.02,
                 maxiter=1,
                 period=1000,
                 phase=-1):
        util.print_status_line()

        # initialize base class
        _updater.__init__(self)

        # balancing cannot be done without mpi
        if not hoomd.is_MPI_available():
            globals.msg.warning(
                "Ignoring balance command, not supported in current configuration.\n"
            )
            return

        # create the c++ mirror class
        if not globals.exec_conf.isCUDAEnabled():
            self.cpp_updater = hoomd.LoadBalancer(globals.system_definition,
                                                  globals.decomposition.cpp_dd)
        else:
            self.cpp_updater = hoomd.LoadBalancerGPU(
                globals.system_definition, globals.decomposition.cpp_dd)

        self.setupUpdater(period, phase)

        # stash arguments to metadata
        self.metadata_fields = ['tolerance', 'maxiter', 'period', 'phase']
        self.period = period
        self.phase = phase

        # configure the parameters
        util._disable_status_lines = True
        self.set_params(x, y, z, tolerance, maxiter)
        util._disable_status_lines = False
Example #39
0
    def __init__(self, r_cut):
        util.print_status_line()

        # Error out in MPI simulations
        if (hoomd.is_MPI_available()):
            if globals.system_definition.getParticleData(
            ).getDomainDecomposition():
                globals.msg.error(
                    "wall.lj is not supported in multi-processor simulations.\n\n"
                )
                raise RuntimeError("Error setting up wall potential.")

        # initialize the base class
        force._force.__init__(self)

        # create the c++ mirror class
        self.cpp_force = hoomd.LJWallForceCompute(globals.system_definition,
                                                  r_cut)

        # variable for tracking which particle type coefficients have been set
        self.particle_types_set = []

        globals.system.addCompute(self.cpp_force, self.force_name)
Example #40
0
def _parse_command_line():
    parser = OptionParser();
    parser.add_option("--mode", dest="mode", help="Execution mode (cpu or gpu)");
    parser.add_option("--gpu", dest="gpu", help="GPU on which to execute");
    parser.add_option("--ncpu", dest="ncpu", help="Number of CPU cores on which to execute");
    parser.add_option("--gpu_error_checking", dest="gpu_error_checking", action="store_true", default=False, help="Enable error checking on the GPU");
    parser.add_option("--minimize-cpu-usage", dest="min_cpu", action="store_true", default=False, help="Enable to keep the CPU usage of HOOMD to a bare minimum (will degrade overall performance somewhat)");
    parser.add_option("--ignore-display-gpu", dest="ignore_display", action="store_true", default=False, help="Attempt to avoid running on the display GPU");
    parser.add_option("--notice-level", dest="notice_level", help="Minimum level of notice messages to print");
    parser.add_option("--msg-file", dest="msg_file", help="Name of file to write messages to");
    parser.add_option("--shared-msg-file", dest="shared_msg_file", help="(MPI only) Name of shared file to write message to (append partition #)");
    parser.add_option("--nrank", dest="nrank", help="(MPI) Number of ranks to include in a partition");
    parser.add_option("--nx", dest="nx", help="(MPI) Number of domains along the x-direction");
    parser.add_option("--ny", dest="ny", help="(MPI) Number of domains along the y-direction");
    parser.add_option("--nz", dest="nz", help="(MPI) Number of domains along the z-direction");
    parser.add_option("--linear", dest="linear", action="store_true", default=False, help="(MPI only) Force a slab (1D) decomposition along the z-direction");
    parser.add_option("--onelevel", dest="onelevel", action="store_true", default=False, help="(MPI only) Disable two-level (node-local) decomposition");
    parser.add_option("--user", dest="user", help="User options");

    (cmd_options, args) = parser.parse_args();

    # chedk for valid mode setting
    if cmd_options.mode is not None:
        if not (cmd_options.mode == "cpu" or cmd_options.mode == "gpu"):
            parser.error("--mode must be either cpu or gpu");

    # check for sane options
    if cmd_options.mode == "cpu" and (cmd_options.gpu is not None):
        parser.error("--mode=cpu cannot be specified along with --gpu")

    if cmd_options.mode == "gpu" and (cmd_options.ncpu is not None):
        parser.error("--mode=gpu cannot be specified along with --ncpu")

    # set the mode to gpu if the gpu # was set
    if cmd_options.gpu is not None and cmd_options.mode is None:
        cmd_options.mode = "gpu"

    # set the mode to cpu if the ncpu was set
    if cmd_options.ncpu is not None and cmd_options.mode is None:
        cmd_options.mode = "cpu"

    # convert ncpu to an integer
    if cmd_options.ncpu is not None:
        try:
            cmd_options.ncpu = int(cmd_options.ncpu);
        except ValueError:
            parser.error('--ncpu must be an integer')

    # convert gpu to an integer
    if cmd_options.gpu:
        try:
            cmd_options.gpu = int(cmd_options.gpu);
        except ValueError:
            parser.error('--gpu must be an integer')

    # convert notice_level to an integer
    if cmd_options.notice_level is not None:
        try:
            cmd_options.notice_level = int(cmd_options.notice_level);
        except ValueError:
            parser.error('--notice-level must be an integer')

    # Convert nx to an integer
    if cmd_options.nx is not None:
        if not hoomd.is_MPI_available():
            globals.msg.error("The --nx option is only avaible in MPI builds.\n");
            raise RuntimeError('Error setting option');
        try:
            cmd_options.nx = int(cmd_options.nx);
        except ValueError:
            parser.error('--nx must be an integer')

    # Convert ny to an integer
    if cmd_options.ny is not None:
        if not hoomd.is_MPI_available():
            globals.msg.error("The --ny option is only avaible in MPI builds.\n");
            raise RuntimeError('Error setting option');
        try:
            cmd_options.ny = int(cmd_options.ny);
        except ValueError:
            parser.error('--ny must be an integer')

    # Convert nz to an integer
    if cmd_options.nz is not None:
       if not hoomd.is_MPI_available():
            globals.msg.error("The --nz option is only avaible in MPI builds.\n");
            raise RuntimeError('Error setting option');
       try:
            cmd_options.nz = int(cmd_options.nz);
       except ValueError:
            parser.error('--nz must be an integer')

    # copy command line options over to global options
    globals.options.mode = cmd_options.mode;
    globals.options.gpu = cmd_options.gpu;
    globals.options.ncpu = cmd_options.ncpu;
    globals.options.gpu_error_checking = cmd_options.gpu_error_checking;
    globals.options.min_cpu = cmd_options.min_cpu;
    globals.options.ignore_display = cmd_options.ignore_display;

    globals.options.nx = cmd_options.nx;
    globals.options.ny = cmd_options.ny;
    globals.options.nz = cmd_options.nz;
    globals.options.linear = cmd_options.linear
    globals.options.onelevel = cmd_options.onelevel

    if cmd_options.notice_level is not None:
        globals.options.notice_level = cmd_options.notice_level;
        globals.msg.setNoticeLevel(globals.options.notice_level);

    if cmd_options.msg_file is not None:
        globals.options.msg_file = cmd_options.msg_file;
        globals.msg.openFile(globals.options.msg_file);

    if cmd_options.shared_msg_file is not None:
        if not hoomd.is_MPI_available():
            globals.msg.error("Shared log files are only available in MPI builds.\n");
            raise RuntimeError('Error setting option');
        globals.options.shared_msg_file = cmd_options.shared_msg_file;
        globals.msg.setSharedFile(globals.options.shared_msg_file);

    if cmd_options.nrank is not None:
        if not hoomd.is_MPI_available():
            globals.msg.error("The --nrank option is only avaible in MPI builds.\n");
            raise RuntimeError('Error setting option');
        # check validity
        nrank = int(cmd_options.nrank)
        if (hoomd.ExecutionConfiguration.getNRanksGlobal() % nrank):
            globals.msg.error("Total number of ranks is not a multiple of --nrank\n");
            raise RuntimeError('Error checking option');
        globals.options.nrank = nrank

    if cmd_options.user is not None:
        globals.options.user = shlex.split(cmd_options.user);
Example #41
0
def barrier_all():
    if hoomd.is_MPI_available():
        hoomd.mpi_barrier_world();
Example #42
0
def get_num_ranks():
    hoomd_script.context._verify_init();
    if hoomd.is_MPI_available():
        return globals.exec_conf.getNRanks();
    else:
        return 1;
Example #43
0
def barrier_all():
    if hoomd.is_MPI_available():
        hoomd.mpi_barrier_world()
Example #44
0
def barrier():
    hoomd_script.context._verify_init()

    if hoomd.is_MPI_available():
        globals.exec_conf.barrier()
Example #45
0
def barrier():
    hoomd_script.context._verify_init();

    if hoomd.is_MPI_available():
        globals.exec_conf.barrier()
Example #46
0
def get_num_ranks():
    hoomd_script.context._verify_init()
    if hoomd.is_MPI_available():
        return globals.exec_conf.getNRanks()
    else:
        return 1
Example #47
0
    # determine the number of steps to run
    step = int(step);
    cur_step = globals.system.getCurrentTimeStep();

    if cur_step >= step:
        globals.msg.warning("Requesting run up to a time step that has already passed, doing nothing\n");
        return;

    n_steps = step - cur_step;

    _util._disable_status_lines = True;
    run(n_steps, **keywords);
    _util._disable_status_lines = False;

## Get the current simulation time step
#
# \returns current simulation time step
def get_step():
    # check if initialization has occurred
    if not init.is_initialized():
        globals.msg.error("Cannot get step before initialization\n");
        raise RuntimeError('Error getting step');

    return globals.system.getCurrentTimeStep();

# Check to see if we are built without MPI support and the user used mpirun
if (not hoomd.is_MPI_available()) and ('OMPI_COMM_WORLD_RANK' in os.environ or 'MV2_COMM_WORLD_LOCAL_RANK' in os.environ):
    print('HOOMD-blue is built without MPI support, but seems to have been launched with mpirun');
    print('exiting now to prevent many sequential jobs from starting');
    raise RuntimeError('Error launching hoomd')
Example #48
0
def _get_proc_name():
    if hoomd.is_MPI_available():
        return hoomd.get_mpi_proc_name()
    else:
        return platform.node()
Example #49
0
def _get_proc_name():
    if hoomd.is_MPI_available():
        return hoomd.get_mpi_proc_name()
    else:
        return platform.node()
Example #50
0
def _parse_command_line():
    parser = OptionParser();
    parser.add_option("--mode", dest="mode", help="Execution mode (cpu or gpu)", default='auto');
    parser.add_option("--gpu", dest="gpu", help="GPU on which to execute");
    parser.add_option("--gpu_error_checking", dest="gpu_error_checking", action="store_true", default=False, help="Enable error checking on the GPU");
    parser.add_option("--minimize-cpu-usage", dest="min_cpu", action="store_true", default=False, help="Enable to keep the CPU usage of HOOMD to a bare minimum (will degrade overall performance somewhat)");
    parser.add_option("--ignore-display-gpu", dest="ignore_display", action="store_true", default=False, help="Attempt to avoid running on the display GPU");
    parser.add_option("--notice-level", dest="notice_level", help="Minimum level of notice messages to print");
    parser.add_option("--msg-file", dest="msg_file", help="Name of file to write messages to");
    parser.add_option("--shared-msg-file", dest="shared_msg_file", help="(MPI only) Name of shared file to write message to (append partition #)");
    parser.add_option("--nrank", dest="nrank", help="(MPI) Number of ranks to include in a partition");
    parser.add_option("--nx", dest="nx", help="(MPI) Number of domains along the x-direction");
    parser.add_option("--ny", dest="ny", help="(MPI) Number of domains along the y-direction");
    parser.add_option("--nz", dest="nz", help="(MPI) Number of domains along the z-direction");
    parser.add_option("--linear", dest="linear", action="store_true", default=False, help="(MPI only) Force a slab (1D) decomposition along the z-direction");
    parser.add_option("--onelevel", dest="onelevel", action="store_true", default=False, help="(MPI only) Disable two-level (node-local) decomposition");
    parser.add_option("--user", dest="user", help="User options");

    (cmd_options, args) = parser.parse_args();

    # chedk for valid mode setting
    if cmd_options.mode is not None:
        if not (cmd_options.mode == "cpu" or cmd_options.mode == "gpu" or cmd_options.mode == "auto"):
            parser.error("--mode must be either cpu, gpu, or auto");

    # check for sane options
    if cmd_options.mode == "cpu" and (cmd_options.gpu is not None):
        parser.error("--mode=cpu cannot be specified along with --gpu")

    # set the mode to gpu if the gpu # was set
    if cmd_options.gpu is not None and cmd_options.mode == 'auto':
        cmd_options.mode = "gpu"

    # convert gpu to an integer
    if cmd_options.gpu is not None:
        try:
            cmd_options.gpu = int(cmd_options.gpu);
        except ValueError:
            parser.error('--gpu must be an integer')

    # convert notice_level to an integer
    if cmd_options.notice_level is not None:
        try:
            cmd_options.notice_level = int(cmd_options.notice_level);
        except ValueError:
            parser.error('--notice-level must be an integer')

    # Convert nx to an integer
    if cmd_options.nx is not None:
        if not hoomd.is_MPI_available():
            globals.msg.error("The --nx option is only avaible in MPI builds.\n");
            raise RuntimeError('Error setting option');
        try:
            cmd_options.nx = int(cmd_options.nx);
        except ValueError:
            parser.error('--nx must be an integer')

    # Convert ny to an integer
    if cmd_options.ny is not None:
        if not hoomd.is_MPI_available():
            globals.msg.error("The --ny option is only avaible in MPI builds.\n");
            raise RuntimeError('Error setting option');
        try:
            cmd_options.ny = int(cmd_options.ny);
        except ValueError:
            parser.error('--ny must be an integer')

    # Convert nz to an integer
    if cmd_options.nz is not None:
       if not hoomd.is_MPI_available():
            globals.msg.error("The --nz option is only avaible in MPI builds.\n");
            raise RuntimeError('Error setting option');
       try:
            cmd_options.nz = int(cmd_options.nz);
       except ValueError:
            parser.error('--nz must be an integer')

    # copy command line options over to global options
    globals.options.mode = cmd_options.mode;
    globals.options.gpu = cmd_options.gpu;
    globals.options.gpu_error_checking = cmd_options.gpu_error_checking;
    globals.options.min_cpu = cmd_options.min_cpu;
    globals.options.ignore_display = cmd_options.ignore_display;

    globals.options.nx = cmd_options.nx;
    globals.options.ny = cmd_options.ny;
    globals.options.nz = cmd_options.nz;
    globals.options.linear = cmd_options.linear
    globals.options.onelevel = cmd_options.onelevel

    if cmd_options.notice_level is not None:
        globals.options.notice_level = cmd_options.notice_level;
        globals.msg.setNoticeLevel(globals.options.notice_level);

    if cmd_options.msg_file is not None:
        globals.options.msg_file = cmd_options.msg_file;
        globals.msg.openFile(globals.options.msg_file);

    if cmd_options.shared_msg_file is not None:
        if not hoomd.is_MPI_available():
            globals.msg.error("Shared log files are only available in MPI builds.\n");
            raise RuntimeError('Error setting option');
        globals.options.shared_msg_file = cmd_options.shared_msg_file;
        globals.msg.setSharedFile(globals.options.shared_msg_file);

    if cmd_options.nrank is not None:
        if not hoomd.is_MPI_available():
            globals.msg.error("The --nrank option is only avaible in MPI builds.\n");
            raise RuntimeError('Error setting option');
        # check validity
        nrank = int(cmd_options.nrank)
        if (hoomd.ExecutionConfiguration.getNRanksGlobal() % nrank):
            globals.msg.error("Total number of ranks is not a multiple of --nrank\n");
            raise RuntimeError('Error checking option');
        globals.options.nrank = nrank

    if cmd_options.user is not None:
        globals.options.user = shlex.split(cmd_options.user);
Example #51
0
def get_num_ranks():
    if hoomd.is_MPI_available():
        return globals.exec_conf.getNRanks()
    else:
        return 1
Example #52
0
#
# Example:
# ~~~~~
# from hoomd_script import *
# init.read_xml("init.xml");
# # setup....
# run(30000);  # warm up and auto-tune kernel block sizes
# option.set_autotuner_params(enable=False);  # prevent block sizes from further autotuning
# cuda_profile_start();
# run(100);
# ~~~~~
def cuda_profile_start():
    hoomd.cuda_profile_start()


## Stop CUDA profiling
# \sa cuda_profile_start();
def cuda_profile_stop():
    hoomd.cuda_profile_stop()


# Check to see if we are built without MPI support and the user used mpirun
if (not hoomd.is_MPI_available()) and ('OMPI_COMM_WORLD_RANK' in os.environ
                                       or 'MV2_COMM_WORLD_LOCAL_RANK'
                                       in os.environ):
    print(
        'HOOMD-blue is built without MPI support, but seems to have been launched with mpirun'
    )
    print('exiting now to prevent many sequential jobs from starting')
    raise RuntimeError('Error launching hoomd')
Example #53
0
def _create_exec_conf():
    # use a cached execution configuration if available
    if globals.exec_conf is not None:
        return globals.exec_conf

    mpi_available = hoomd.is_MPI_available()

    # set the openmp thread limits
    if globals.options.ncpu is not None:
        if globals.options.ncpu > hoomd.get_num_procs():
            globals.msg.warning(
                "Requesting more CPU cores than there are available in the system\n"
            )
        hoomd.set_num_threads(globals.options.ncpu)

    # if no command line options were specified, create a default ExecutionConfiguration
    if globals.options.mode is None:
        if mpi_available:
            if globals.options.nrank is not None:
                exec_conf = hoomd.ExecutionConfiguration(
                    globals.options.min_cpu, globals.options.ignore_display,
                    globals.msg, True, globals.options.nrank)
            else:
                exec_conf = hoomd.ExecutionConfiguration(
                    globals.options.min_cpu, globals.options.ignore_display,
                    globals.msg, True)
        else:
            exec_conf = hoomd.ExecutionConfiguration(
                globals.options.min_cpu, globals.options.ignore_display,
                globals.msg)
    else:
        # determine the GPU on which to execute
        if globals.options.gpu is not None:
            gpu_id = int(globals.options.gpu)
        else:
            gpu_id = -1

        # create the specified configuration
        if globals.options.mode == "cpu":
            if mpi_available:
                if globals.options.nrank is not None:
                    exec_conf = hoomd.ExecutionConfiguration(
                        hoomd.ExecutionConfiguration.executionMode.CPU, gpu_id,
                        globals.options.min_cpu,
                        globals.options.ignore_display, globals.msg, True,
                        globals.options.nrank)
                else:
                    exec_conf = hoomd.ExecutionConfiguration(
                        hoomd.ExecutionConfiguration.executionMode.CPU, gpu_id,
                        globals.options.min_cpu,
                        globals.options.ignore_display, globals.msg, True)
            else:
                exec_conf = hoomd.ExecutionConfiguration(
                    hoomd.ExecutionConfiguration.executionMode.CPU, gpu_id,
                    globals.options.min_cpu, globals.options.ignore_display,
                    globals.msg)
        elif globals.options.mode == "gpu":
            if mpi_available:
                if globals.options.nrank is not None:
                    exec_conf = hoomd.ExecutionConfiguration(
                        hoomd.ExecutionConfiguration.executionMode.GPU, gpu_id,
                        globals.options.min_cpu,
                        globals.options.ignore_display, globals.msg, True,
                        globals.options.nrank)
                else:
                    exec_conf = hoomd.ExecutionConfiguration(
                        hoomd.ExecutionConfiguration.executionMode.GPU, gpu_id,
                        globals.options.min_cpu,
                        globals.options.ignore_display, globals.msg, True)
            else:
                exec_conf = hoomd.ExecutionConfiguration(
                    hoomd.ExecutionConfiguration.executionMode.GPU, gpu_id,
                    globals.options.min_cpu, globals.options.ignore_display,
                    globals.msg)
        else:
            raise RuntimeError("Error initializing")

    # if gpu_error_checking is set, enable it on the GPU
    if globals.options.gpu_error_checking:
        exec_conf.setCUDAErrorChecking(True)

    globals.exec_conf = exec_conf

    return exec_conf
Example #54
0
def get_num_ranks():
    if hoomd.is_MPI_available():
        return globals.exec_conf.getNRanks();
    else:
        return 1;