Esempio n. 1
0
    def __init__(self, r_cut, nlist):
        hoomd.util.print_status_line();

        # Error out in MPI simulations
        if (_hoomd.is_MPI_available()):
            if hoomd.context.current.system_definition.getParticleData().getDomainDecomposition():
                hoomd.context.msg.error("pair.cgcmm is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error setting up pair potential.")

        # initialize the base class
        hoomd.md.force._force.__init__(self);

        # this class extends force, so we need to store the r_cut explicitly as a member
        # to be used in get_rcut
        # the authors of this potential also did not incorporate pairwise cutoffs, so we just use
        # the same number for everything
        self.r_cut = r_cut

        # setup the coefficient matrix
        self.pair_coeff = hoomd.md.pair.coeff();

        self.nlist = nlist
        self.nlist.subscribe(lambda:self.get_rcut())
        self.nlist.update_rcut()

        # create the c++ mirror class
        if not hoomd.context.exec_conf.isCUDAEnabled():
            self.cpp_force = _cgcmm.CGCMMForceCompute(hoomd.context.current.system_definition, self.nlist.cpp_nlist, r_cut);
        else:
            self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
            self.cpp_force = _cgcmm.CGCMMForceComputeGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, r_cut);
            self.cpp_force.setBlockSize(128);

        hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
Esempio n. 2
0
def _perform_common_init_tasks():
    # create the sorter
    hoomd.context.current.sorter = hoomd.update.sort()

    # create the default compute.thermo on the all group
    hoomd.util.quiet_status()
    all = hoomd.group.all()
    hoomd.compute._get_unique_thermo(group=all)
    hoomd.util.unquiet_status()

    # set up Communicator, and register it with the System
    if _hoomd.is_MPI_available():
        cpp_decomposition = hoomd.context.current.system_definition.getParticleData(
        ).getDomainDecomposition()
        if cpp_decomposition is not None:
            # create the c++ Communicator
            if not hoomd.context.exec_conf.isCUDAEnabled():
                cpp_communicator = _hoomd.Communicator(
                    hoomd.context.current.system_definition, cpp_decomposition)
            else:
                cpp_communicator = _hoomd.CommunicatorGPU(
                    hoomd.context.current.system_definition, cpp_decomposition)

            # set Communicator in C++ System
            hoomd.context.current.system.setCommunicator(cpp_communicator)
Esempio n. 3
0
    def __init__(self, x=True, y=True, z=True, tolerance=1.02, maxiter=1, period=1000, phase=0):
        hoomd.util.print_status_line();

        # initialize base class
        _updater.__init__(self);

        # balancing cannot be done without mpi
        if not _hoomd.is_MPI_available() or hoomd.context.current.decomposition is None:
            hoomd.context.msg.warning("Ignoring balance command, not supported in current configuration.\n")
            return

        # create the c++ mirror class
        if not hoomd.context.exec_conf.isCUDAEnabled():
            self.cpp_updater = _hoomd.LoadBalancer(hoomd.context.current.system_definition, hoomd.context.current.decomposition.cpp_dd);
        else:
            self.cpp_updater = _hoomd.LoadBalancerGPU(hoomd.context.current.system_definition, hoomd.context.current.decomposition.cpp_dd);

        self.setupUpdater(period,phase)

        # stash arguments to metadata
        self.metadata_fields = ['tolerance','maxiter','period','phase']
        self.period = period
        self.phase = phase

        # configure the parameters
        hoomd.util.quiet_status()
        self.set_params(x,y,z,tolerance, maxiter)
        hoomd.util.unquiet_status()
Esempio n. 4
0
    def __init__(self, x=True, y=True, z=True, tolerance=1.02, maxiter=1, period=1000, phase=0):
        hoomd.util.print_status_line();

        # initialize base class
        _updater.__init__(self);

        # balancing cannot be done without mpi
        if not _hoomd.is_MPI_available() or hoomd.context.current.decomposition is None:
            hoomd.context.msg.warning("Ignoring balance command, not supported in current configuration.\n")
            return

        # create the c++ mirror class
        if not hoomd.context.exec_conf.isCUDAEnabled():
            self.cpp_updater = _hoomd.LoadBalancer(hoomd.context.current.system_definition, hoomd.context.current.decomposition.cpp_dd);
        else:
            self.cpp_updater = _hoomd.LoadBalancerGPU(hoomd.context.current.system_definition, hoomd.context.current.decomposition.cpp_dd);

        self.setupUpdater(period,phase)

        # stash arguments to metadata
        self.metadata_fields = ['tolerance','maxiter','period','phase']
        self.period = period
        self.phase = phase

        # configure the parameters
        hoomd.util.quiet_status()
        self.set_params(x,y,z,tolerance, maxiter)
        hoomd.util.unquiet_status()
    def __init__(self, communicator, notice_level, msg_file, shared_msg_file):

        # metadata stuff
        hoomd.meta._metadata.__init__(self)
        self.metadata_fields = ['gpu_ids', 'mode', 'num_ranks']
        if _hoomd.is_TBB_available():
            self.metadata_fields.append('num_threads')

        # check shared_msg_file
        if shared_msg_file is not None:
            if not _hoomd.is_MPI_available():
                raise RuntimeError(
                    "Shared log files are only available in MPI builds.\n")

        # MPI communicator
        if communicator is None:
            self._comm = hoomd.comm.Communicator()
        else:
            self._comm = communicator

        # c++ messenger object
        self.cpp_msg = _create_messenger(self.comm.cpp_mpi_conf, notice_level,
                                         msg_file, shared_msg_file)

        # output the version info on initialization
        self.cpp_msg.notice(1, _hoomd.output_version_info())

        # c++ execution configuration mirror class
        self.cpp_exec_conf = None

        # name of the message file
        self._msg_file = msg_file
Esempio n. 6
0
    def __init__(self, r_cut, nlist):
        hoomd.util.print_status_line();

        # Error out in MPI simulations
        if (_hoomd.is_MPI_available()):
            if hoomd.context.current.system_definition.getParticleData().getDomainDecomposition():
                hoomd.context.msg.error("pair.cgcmm is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error setting up pair potential.")

        # initialize the base class
        hoomd.md.force._force.__init__(self);

        # this class extends force, so we need to store the r_cut explicitly as a member
        # to be used in get_rcut
        # the authors of this potential also did not incorporate pairwise cutoffs, so we just use
        # the same number for everything
        self.r_cut = r_cut

        # setup the coefficent matrix
        self.pair_coeff = hoomd.md.pair.coeff();

        self.nlist = nlist
        self.nlist.subscribe(lambda:self.get_rcut())
        self.nlist.update_rcut()

        # create the c++ mirror class
        if not hoomd.context.exec_conf.isCUDAEnabled():
            self.cpp_force = _cgcmm.CGCMMForceCompute(hoomd.context.current.system_definition, self.nlist.cpp_nlist, r_cut);
        else:
            self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);
            self.cpp_force = _cgcmm.CGCMMForceComputeGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, r_cut);
            self.cpp_force.setBlockSize(128);

        hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
Esempio n. 7
0
def _create_messenger(mpi_config, options):
    global msg

    # use a cached messenger if available
    if msg is not None:
        return msg

    msg = _hoomd.Messenger(mpi_config)

    # try to detect if we're running inside an MPI job
    inside_mpi_job = mpi_config.getNRanksGlobal() > 1
    if ('OMPI_COMM_WORLD_RANK' in os.environ or
        'MV2_COMM_WORLD_LOCAL_RANK' in os.environ or
        'PMI_RANK' in os.environ or
        'ALPS_APP_PE' in os.environ):
        inside_mpi_job = True

    # only open python stdout/stderr in non-MPI runs
    if not inside_mpi_job:
        msg.openPython();

    if options.notice_level is not None:
        msg.setNoticeLevel(options.notice_level);

    if options.msg_file is not None:
        msg.openFile(options.msg_file);

    if options.shared_msg_file is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error("Shared log files are only available in MPI builds.\n");
            raise RuntimeError('Error setting option');
        msg.setSharedFile(options.shared_msg_file);

    return msg
Esempio n. 8
0
    def __init__(self, file, type, nlist):
        c = hoomd.cite.article(
            cite_key='lin2017',
            author=['L Yang', 'F Zhang', 'K M Ho', 'C Z Wang', 'A Travesset'],
            title='Implementation of EAM and FS potentials in HOOMD-blue',
            journal='Computer Physics Communications',
            volume=0,
            number=0,
            pages='0--0',
            year='2017',
            doi='0',
            feature='EAM')
        hoomd.cite._ensure_global_bib().add(c)

        hoomd.util.print_status_line()

        # Error out in MPI simulations
        if (_hoomd.is_MPI_available()):
            if hoomd.context.current.system_definition.getParticleData(
            ).getDomainDecomposition():
                hoomd.context.msg.error(
                    "pair.eam is not supported in multi-processor simulations.\n\n"
                )
                raise RuntimeError("Error setting up pair potential.")

        # initialize the base class
        force._force.__init__(self)
        # Translate type
        if (type == 'Alloy'): type_of_file = 0
        elif (type == 'FS'): type_of_file = 1
        else: raise RuntimeError('Unknown EAM input file type')

        # create the c++ mirror class
        if not hoomd.context.exec_conf.isCUDAEnabled():
            self.cpp_force = _metal.EAMForceCompute(
                hoomd.context.current.system_definition, file, type_of_file)
        else:
            self.cpp_force = _metal.EAMForceComputeGPU(
                hoomd.context.current.system_definition, file, type_of_file)

        #After load EAMForceCompute we know r_cut from EAM potential`s file. We need update neighbor list.
        self.r_cut_new = self.cpp_force.get_r_cut()
        self.nlist = nlist
        self.nlist.subscribe(lambda: self.get_rcut())
        self.nlist.update_rcut()

        #Load neighbor list to compute.
        self.cpp_force.set_neighbor_list(self.nlist.cpp_nlist)
        if hoomd.context.exec_conf.isCUDAEnabled():
            self.nlist.cpp_nlist.setStorageMode(
                _md.NeighborList.storageMode.full)

        hoomd.context.msg.notice(
            2, "Set r_cut = " + str(self.r_cut_new) +
            " from potential`s file '" + str(file) + "'.\n")

        hoomd.context.current.system.addCompute(self.cpp_force,
                                                self.force_name)
        self.pair_coeff = hoomd.md.pair.coeff()
Esempio n. 9
0
    def __init__(self, mpi_comm=None, nrank=None):

        # check nrank
        if nrank is not None:
            if not _hoomd.is_MPI_available():
                raise RuntimeError("The nrank option is only available in MPI builds.\n")

        mpi_available = _hoomd.is_MPI_available();

        self.cpp_mpi_conf = None

        # create the specified configuration
        if mpi_comm is None:
            self.cpp_mpi_conf = _hoomd.MPIConfiguration();
        else:
            if not mpi_available:
                raise RuntimeError("mpi_comm is not supported in serial builds");

            handled = False;

            # pass in pointer to MPI_Comm object provided by mpi4py
            try:
                import mpi4py
                if isinstance(mpi_comm, mpi4py.MPI.Comm):
                    addr = mpi4py.MPI._addressof(mpi_comm);
                    self.cpp_mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr);
                    handled = True
            except ImportError:
                # silently ignore when mpi4py is missing
                pass

            # undocumented case: handle plain integers as pointers to MPI_Comm objects
            if not handled and isinstance(mpi_comm, int):
                self.cpp_mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm);
                handled = True

            if not handled:
                raise RuntimeError("Invalid mpi_comm object: {}".format(mpi_comm));

        if nrank is not None:
            # check validity
            if (self.cpp_mpi_conf.getNRanksGlobal() % nrank):
                raise RuntimeError('Total number of ranks is not a multiple of --nrank');

            # split the communicator into partitions
            self.cpp_mpi_conf.splitPartitions(nrank)
Esempio n. 10
0
def barrier_all():
    """ Perform a MPI barrier synchronization across the whole MPI run.

    Note:
        Does nothing in in non-MPI builds.
    """
    if _hoomd.is_MPI_available():
        _hoomd.mpi_barrier_world()
Esempio n. 11
0
def barrier_all():
    """ Perform a MPI barrier synchronization across the whole MPI run.

    Note:
        Does nothing in in non-MPI builds.
    """
    if _hoomd.is_MPI_available():
        _hoomd.mpi_barrier_world();
Esempio n. 12
0
    def __init__(self, x=None, y=None, z=None, nx=None, ny=None, nz=None, linear=False, onelevel=False):

        # check that the context has been initialized though
        if hoomd.context.current is None:
            raise RuntimeError("Cannot initialize decomposition without context.initialize() first")

        # check that system is not initialized
        if hoomd.context.current.system is not None:
            hoomd.context.current.device.cpp_msg.error("comm.decomposition: cannot modify decomposition after system is initialized. Call before init.*\n")
            raise RuntimeError("Cannot create decomposition after system is initialized. Call before init.*")

        # make sure MPI is enabled if any arguments are not None
        if (x or y or z or nx or ny or nz) and (not _hoomd.is_MPI_available()):
            raise RuntimeError("the x, y, z, nx, ny, nz options are only available in MPI builds")

        self._onelevel = onelevel  # cache this for later when we can make the cpp object

        # check that there are ranks available for decomposition
        if hoomd.context.current.device.comm.cpp_mpi_conf == 1:
            hoomd.context.current.device.cpp_msg.warning("Only 1 rank in system, ignoring decomposition to use optimized code pathways.\n")
            return
        else:
            self.x = []
            self.y = []
            self.z = []
            self.nx = 0
            self.ny = 0
            self.nz = 0
            self.uniform_x = True
            self.uniform_y = True
            self.uniform_z = True

            self.set_params(x,y,z,nx,ny,nz)

            """
            # do a one time update of the cuts to the global values if a global is set
            if not self.x and self.nx == 0 and hoomd.context.options.nx is not None:
                self.nx = hoomd.context.options.nx
                self.uniform_x = True
            if not self.y and self.ny == 0 and hoomd.context.options.ny is not None:
                self.ny = hoomd.context.options.ny
                self.uniform_y = True
            if not self.z and self.nz == 0:
                if linear:
                    self.nz = hoomd.context.current.device.cpp_mpi_conf.getNRanks()
                    self.uniform_z = True
                elif hoomd.context.options.nz is not None:
                    self.nz = hoomd.context.options.nz
                    self.uniform_z = True
            """

            # set the global decomposition to this class
            if hoomd.context.current.decomposition is not None:
                hoomd.context.current.device.cpp_msg.warning("comm.decomposition: overriding currently defined domain decomposition\n")

            hoomd.context.current.decomposition = self
Esempio n. 13
0
def barrier():
    """ Perform a MPI barrier synchronization across all ranks in the partition.

    Note:
        Does nothing in in non-MPI builds.
    """
    hoomd.context._verify_init()

    if _hoomd.is_MPI_available():
        hoomd.context.exec_conf.barrier()
Esempio n. 14
0
def barrier():
    """ Perform a MPI barrier synchronization across all ranks in the partition.

    Note:
        Does nothing in in non-MPI builds.
    """
    hoomd.context._verify_init();

    if _hoomd.is_MPI_available():
        hoomd.context.exec_conf.barrier()
Esempio n. 15
0
def _create_exec_conf():
    global exec_conf, options, msg

    # use a cached execution configuration if available
    if exec_conf is not None:
        return exec_conf

    mpi_available = _hoomd.is_MPI_available()

    # error out on nyx/flux if the auto mode is set
    if options.mode == 'auto':
        host = _get_proc_name()
        if "flux" in host or "nyx" in host:
            msg.error(
                "--mode=gpu or --mode=cpu must be specified on nyx/flux\n")
            raise RuntimeError("Error initializing")
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO
    elif options.mode == "cpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU
    elif options.mode == "gpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU
    else:
        raise RuntimeError("Invalid mode")

    # convert None options to defaults
    if options.gpu is None:
        gpu_id = -1
    else:
        gpu_id = int(options.gpu)

    if options.nrank is None:
        nrank = 0
    else:
        nrank = int(options.nrank)

    # create the specified configuration
    exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_id,
                                              options.min_cpu,
                                              options.ignore_display, msg,
                                              nrank)

    # if gpu_error_checking is set, enable it on the GPU
    if options.gpu_error_checking:
        exec_conf.setCUDAErrorChecking(True)

    if _hoomd.is_TBB_available():
        # set the number of TBB threads as necessary
        if options.nthreads != None:
            exec_conf.setNumThreads(options.nthreads)

    exec_conf = exec_conf

    return exec_conf
Esempio n. 16
0
    def __init__(self, file, type, nlist):
        c = hoomd.cite.article(cite_key = 'lin2017',
                         author=['L Yang', 'F Zhang', 'K M Ho', 'C Z Wang','A Travesset'],
                         title = 'Implementation of EAM and FS potentials in HOOMD-blue',
                         journal = 'Computer Physics Communications',
                         volume = 0,
                         number = 0,
                         pages = '0--0',
                         year = '2017',
                         doi = '0',
                         feature = 'EAM')
        hoomd.cite._ensure_global_bib().add(c)

        hoomd.util.print_status_line();

        # Error out in MPI simulations
        if (_hoomd.is_MPI_available()):
            if hoomd.context.current.system_definition.getParticleData().getDomainDecomposition():
                hoomd.context.msg.error("pair.eam is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error setting up pair potential.")

        # initialize the base class
        force._force.__init__(self);
        # Translate type
        if(type == 'Alloy'): type_of_file = 0;
        elif(type == 'FS'): type_of_file = 1;
        else: raise RuntimeError('Unknown EAM input file type');

        # create the c++ mirror class
        if not hoomd.context.exec_conf.isCUDAEnabled():
            self.cpp_force = _metal.EAMForceCompute(hoomd.context.current.system_definition, file, type_of_file);
        else:
            self.cpp_force = _metal.EAMForceComputeGPU(hoomd.context.current.system_definition, file, type_of_file);

        #After load EAMForceCompute we know r_cut from EAM potential`s file. We need update neighbor list.
        self.r_cut_new = self.cpp_force.get_r_cut();
        self.nlist = nlist
        self.nlist.subscribe(lambda : self.get_rcut())
        self.nlist.update_rcut()

        #Load neighbor list to compute.
        self.cpp_force.set_neighbor_list(self.nlist.cpp_nlist);
        if hoomd.context.exec_conf.isCUDAEnabled():
            self.nlist.cpp_nlist.setStorageMode(_md.NeighborList.storageMode.full);

        hoomd.context.msg.notice(2, "Set r_cut = " + str(self.r_cut_new) + " from potential`s file '" +  str(file) + "'.\n");

        hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
        self.pair_coeff = hoomd.md.pair.coeff();
Esempio n. 17
0
def _create_domain_decomposition(box):
    if not _hoomd.is_MPI_available():
        return None

    # if we are only running on one processor, we use optimized code paths
    # for single-GPU execution
    if hoomd.context.current.device.cpp_exec_conf.getNRanks() == 1:
        return None

    # okay, we want a decomposition but one isn't set, so make a default one
    if hoomd.context.current.decomposition is None:
        # this is happening transparently to the user, so hush this up
        hoomd.context.current.decomposition = hoomd.comm.decomposition()

    return hoomd.context.current.decomposition._make_cpp_decomposition(box)
Esempio n. 18
0
def get_num_ranks():
    """ Get the number of ranks in this partition.

    Returns:
        The number of MPI ranks in this partition.

    Note:
        Returns 1 in non-mpi builds.
    """

    hoomd.context._verify_init()
    if _hoomd.is_MPI_available():
        return hoomd.context.exec_conf.getNRanks()
    else:
        return 1
Esempio n. 19
0
    def num_ranks(self):
        """ Get the number of ranks in this partition.

        Returns:
            The number of MPI ranks in this partition.

        Note:
            Returns 1 in non-mpi builds.
        """

        hoomd.context._verify_init();
        if _hoomd.is_MPI_available():
            return self.cpp_mpi_conf.getNRanks();
        else:
            return 1;
Esempio n. 20
0
def get_partition():
    """ Get the current partition index.

    Returns:
        Index of the current partition.

    Note:
        Always returns 0 in non-mpi builds.
    """
    hoomd.context._verify_init();

    if _hoomd.is_MPI_available():
        return hoomd.context.exec_conf.getPartition()
    else:
        return 0;
Esempio n. 21
0
def get_partition():
    """ Get the current partition index.

    Returns:
        Index of the current partition.

    Note:
        Always returns 0 in non-mpi builds.
    """
    hoomd.context._verify_init()

    if _hoomd.is_MPI_available():
        return hoomd.context.exec_conf.getPartition()
    else:
        return 0
Esempio n. 22
0
def get_num_ranks():
    """ Get the number of ranks in this partition.

    Returns:
        The number of MPI ranks in this partition.

    Note:
        Returns 1 in non-mpi builds.
    """

    hoomd.context._verify_init();
    if _hoomd.is_MPI_available():
        return hoomd.context.exec_conf.getNRanks();
    else:
        return 1;
Esempio n. 23
0
def get_rank():
    """ Get the current rank.

    Returns:
        Index of the current rank in this partition.

    Note:
        Always returns 0 in non-mpi builds.
    """

    hoomd.context._verify_init()

    if _hoomd.is_MPI_available():
        return hoomd.context.mpi_conf.getRank()
    else:
        return 0
Esempio n. 24
0
def _create_mpi_conf(mpi_comm, options):
    global mpi_conf

    # use a cached MPI configuration if available
    if mpi_conf is not None:
        return mpi_conf

    mpi_available = _hoomd.is_MPI_available();

    # create the specified configuration
    if mpi_comm is None:
        mpi_conf = _hoomd.MPIConfiguration();
    else:
        if not mpi_available:
            raise RuntimeError("mpi_comm is not supported in serial builds");

        handled = False;

        # pass in pointer to MPI_Comm object provided by mpi4py
        try:
            import mpi4py
            if isinstance(mpi_comm, mpi4py.MPI.Comm):
                addr = mpi4py.MPI._addressof(mpi_comm);
                mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr);
                handled = True
        except ImportError:
            # silently ignore when mpi4py is missing
            pass

        # undocumented case: handle plain integers as pointers to MPI_Comm objects
        if not handled and isinstance(mpi_comm, int):
            mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm);
            handled = True

        if not handled:
            raise RuntimeError("Invalid mpi_comm object: {}".format(mpi_comm));

    if options.nrank is not None:
        # check validity
        nrank = options.nrank
        if (mpi_conf.getNRanksGlobal() % nrank):
            raise RuntimeError('Total number of ranks is not a multiple of --nrank');

        # split the communicator into partitions
        mpi_conf.splitPartitions(nrank)

    return mpi_conf
Esempio n. 25
0
def _create_domain_decomposition(box):
    if not _hoomd.is_MPI_available():
        return None

    # if we are only running on one processor, we use optimized code paths
    # for single-GPU execution
    if hoomd.context.exec_conf.getNRanks() == 1:
        return None

    # okay, we want a decomposition but one isn't set, so make a default one
    if hoomd.context.current.decomposition is None:
        # this is happening transparently to the user, so hush this up
        hoomd.util.quiet_status()
        hoomd.context.current.decomposition = hoomd.comm.decomposition()
        hoomd.util.unquiet_status()

    return hoomd.context.current.decomposition._make_cpp_decomposition(box)
Esempio n. 26
0
    def __init__(self, group, r=None, rx=None, ry=None, rz=None, P=(0, 0, 0)):
        hoomd.util.print_status_line()
        period = 1

        # Error out in MPI simulations
        if (_hoomd.is_MPI_available()):
            if context.current.system_definition.getParticleData(
            ).getDomainDecomposition():
                context.msg.error(
                    "constrain.ellipsoid is not supported in multi-processor simulations.\n\n"
                )
                raise RuntimeError("Error initializing updater.")

        # Error out if no radii are set
        if (r is None and rx is None and ry is None and rz is None):
            context.msg.error(
                "no radii were defined in update.constraint_ellipsoid.\n\n")
            raise RuntimeError("Error initializing updater.")

        # initialize the base class
        _updater.__init__(self)

        # Set parameters
        P = _hoomd.make_scalar3(P[0], P[1], P[2])
        if (r is not None): rx = ry = rz = r

        # create the c++ mirror class
        if not hoomd.context.exec_conf.isCUDAEnabled():
            self.cpp_updater = _md.ConstraintEllipsoid(
                hoomd.context.current.system_definition, group.cpp_group, P,
                rx, ry, rz)
        else:
            self.cpp_updater = _md.ConstraintEllipsoidGPU(
                hoomd.context.current.system_definition, group.cpp_group, P,
                rx, ry, rz)

        self.setupUpdater(period)

        # store metadata
        self.group = group
        self.P = P
        self.rx = rx
        self.ry = ry
        self.rz = rz
        self.metadata_fields = ['group', 'P', 'rx', 'ry', 'rz']
Esempio n. 27
0
def _create_exec_conf():
    global exec_conf, options, msg

    # use a cached execution configuration if available
    if exec_conf is not None:
        return exec_conf

    mpi_available = _hoomd.is_MPI_available();

    # error out on nyx/flux if the auto mode is set
    if options.mode == 'auto':
        host = _get_proc_name()
        if "flux" in host or "nyx" in host:
            msg.error("--mode=gpu or --mode=cpu must be specified on nyx/flux\n");
            raise RuntimeError("Error initializing");
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO;
    elif options.mode == "cpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU;
    elif options.mode == "gpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU;
    else:
        raise RuntimeError("Invalid mode");

    # convert None options to defaults
    if options.gpu is None:
        gpu_id = -1;
    else:
        gpu_id = int(options.gpu);

    if options.nrank is None:
        nrank = 0;
    else:
        nrank = int(options.nrank);

    # create the specified configuration
    exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_id, options.min_cpu, options.ignore_display, msg, nrank);

    # if gpu_error_checking is set, enable it on the GPU
    if options.gpu_error_checking:
       exec_conf.setCUDAErrorChecking(True);

    exec_conf = exec_conf;

    return exec_conf;
Esempio n. 28
0
def _perform_common_init_tasks():
    # create the sorter
    hoomd.context.current.sorter = hoomd.update.sort();

    # create the default compute.thermo on the all group
    hoomd.util.quiet_status();
    all = hoomd.group.all();
    hoomd.compute._get_unique_thermo(group=all);
    hoomd.util.unquiet_status();

    # set up Communicator, and register it with the System
    if _hoomd.is_MPI_available():
        cpp_decomposition = hoomd.context.current.system_definition.getParticleData().getDomainDecomposition();
        if cpp_decomposition is not None:
            # create the c++ Communicator
            if not hoomd.context.exec_conf.isCUDAEnabled():
                cpp_communicator = _hoomd.Communicator(hoomd.context.current.system_definition, cpp_decomposition)
            else:
                cpp_communicator = _hoomd.CommunicatorGPU(hoomd.context.current.system_definition, cpp_decomposition)

            # set Communicator in C++ System
            hoomd.context.current.system.setCommunicator(cpp_communicator)
Esempio n. 29
0
    def __init__(self, group, r=None, rx=None, ry=None, rz=None, P=(0,0,0)):
        hoomd.util.print_status_line();
        period = 1;

        # Error out in MPI simulations
        if (_hoomd.is_MPI_available()):
            if context.current.system_definition.getParticleData().getDomainDecomposition():
                context.msg.error("constrain.ellipsoid is not supported in multi-processor simulations.\n\n")
                raise RuntimeError("Error initializing updater.")

        # Error out if no radii are set
        if (r is None and rx is None and ry is None and rz is None):
            context.msg.error("no radii were defined in update.constraint_ellipsoid.\n\n")
            raise RuntimeError("Error initializing updater.")

        # initialize the base class
        _updater.__init__(self);

        # Set parameters
        P = _hoomd.make_scalar3(P[0], P[1], P[2]);
        if (r is not None): rx = ry = rz = r

        # create the c++ mirror class
        if not hoomd.context.exec_conf.isCUDAEnabled():
            self.cpp_updater = _md.ConstraintEllipsoid(hoomd.context.current.system_definition, group.cpp_group, P, rx, ry, rz);
        else:
            self.cpp_updater = _md.ConstraintEllipsoidGPU(hoomd.context.current.system_definition, group.cpp_group, P, rx, ry, rz);

        self.setupUpdater(period);

        # store metadata
        self.group = group
        self.P = P
        self.rx = rx
        self.ry = ry
        self.rz = rz
        self.metadata_fields = ['group','P', 'rx', 'ry', 'rz']
Esempio n. 30
0
    util.unquiet_status();

def get_step():
    """ Get the current simulation time step.

    Returns:
        The current simulation time step.

    Example::

            print(hoomd.get_step())
    """

    # check if initialization has occurred
    if not init.is_initialized():
        context.msg.error("Cannot get step before initialization\n");
        raise RuntimeError('Error getting step');

    return context.current.system.getCurrentTimeStep();

# Check to see if we are built without MPI support and the user used mpirun
if (not _hoomd.is_MPI_available()
    and (    'OMPI_COMM_WORLD_RANK' in os.environ
          or 'MV2_COMM_WORLD_LOCAL_RANK' in os.environ
          or 'PMI_RANK' in os.environ
          or 'ALPS_APP_PE' in os.environ)
   ):
    print('HOOMD-blue is built without MPI support, but seems to have been launched with mpirun');
    print('exiting now to prevent many sequential jobs from starting');
    raise RuntimeError('Error launching hoomd')
Esempio n. 31
0
def initialize(args=None, memory_traceback=False, mpi_comm=None):
    R""" Initialize the execution context

    Args:
        args (str): Arguments to parse. When *None*, parse the arguments passed on the command line.
        memory_traceback (bool): If true, enable memory allocation tracking (*only for debugging/profiling purposes*)
        mpi_comm: Accepts an mpi4py communicator. Use this argument to perform many independent hoomd simulations
                  where you communicate between those simulations using your own mpi4py code.

    :py:func:`hoomd.context.initialize()` parses the command line arguments given, sets the options and initializes MPI and GPU execution
    (if any). By default, :py:func:`hoomd.context.initialize()` reads arguments given on the command line. Provide a string to :py:func:`hoomd.context.initialize()`
    to set the launch configuration within the job script.

    :py:func:`hoomd.context.initialize()` can be called more than once in a script. However, the execution parameters are fixed on the first call
    and *args* is ignored. Subsequent calls to :py:func:`hoomd.context.initialize()` create a new :py:class:`SimulationContext` and set it current. This
    behavior is primarily to support use of hoomd in jupyter notebooks, so that a new clean simulation context is
    set when rerunning the notebook within an existing kernel.

    Example::

        from hoomd import *
        context.initialize();
        context.initialize("--mode=gpu --nrank=64");
        context.initialize("--mode=cpu --nthreads=64");

        world = MPI.COMM_WORLD
        comm = world.Split(world.Get_rank(), 0)
        hoomd.context.initialize(mpi_comm=comm)

    """
    global mpi_conf, exec_conf, msg, options, current, _prev_args

    if mpi_conf is not None or exec_conf is not None:
        if args != _prev_args:
            msg.warning("Ignoring new options, cannot change execution mode after initialization.\n");
        current = SimulationContext();
        return current

    _prev_args = args;

    options = hoomd.option.options();
    hoomd.option._parse_command_line(args);

    # Check to see if we are built without MPI support and the user used mpirun
    if (not _hoomd.is_MPI_available() and not options.single_mpi
        and (    'OMPI_COMM_WORLD_RANK' in os.environ
              or 'MV2_COMM_WORLD_LOCAL_RANK' in os.environ
              or 'PMI_RANK' in os.environ
              or 'ALPS_APP_PE' in os.environ)
       ):
        print('HOOMD-blue is built without MPI support, but seems to have been launched with mpirun');
        print('exiting now to prevent many sequential jobs from starting');
        raise RuntimeError('Error launching hoomd')

    # create the MPI configuration
    mpi_conf = _create_mpi_conf(mpi_comm, options)

    # set options on messenger object
    msg = _create_messenger(mpi_conf, options)

    # output the version info on initialization
    msg.notice(1, _hoomd.output_version_info())

    # ensure creation of global bibliography to print HOOMD base citations
    cite._ensure_global_bib()

    # create the parallel execution configuration
    exec_conf = _create_exec_conf(mpi_conf, msg, options);

    # set memory tracing option
    exec_conf.setMemoryTracing(memory_traceback)

    current = SimulationContext();
    return current
Esempio n. 32
0
    def __init__(self, file, type, nlist):
        c = hoomd.cite.article(
            cite_key='morozov2011',
            author=[
                'I V Morozov', 'A M Kazennova', 'R G Bystryia', 'G E Normana',
                'V V Pisareva', 'V V Stegailova'
            ],
            title=
            'Molecular dynamics simulations of the relaxation processes in the condensed matter on GPUs',
            journal='Computer Physics Communications',
            volume=182,
            number=9,
            pages='1974--1978',
            year='2011',
            doi='10.1016/j.cpc.2010.12.026',
            feature='EAM')
        hoomd.cite._ensure_global_bib().add(c)

        hoomd.util.print_status_line()

        # Error out in MPI simulations
        if (_hoomd.is_MPI_available()):
            if hoomd.context.current.system_definition.getParticleData(
            ).getDomainDecomposition():
                hoomd.context.msg.error(
                    "pair.eam is not supported in multi-processor simulations.\n\n"
                )
                raise RuntimeError("Error setting up pair potential.")

        # initialize the base class
        force._force.__init__(self)
        # Translate type
        if (type == 'Alloy'): type_of_file = 0
        elif (type == 'FS'): type_of_file = 1
        else: raise RuntimeError('Unknown EAM input file type')

        # create the c++ mirror class
        if not hoomd.context.exec_conf.isCUDAEnabled():
            self.cpp_force = _metal.EAMForceCompute(
                hoomd.context.current.system_definition, file, type_of_file)
        else:
            self.cpp_force = _metal.EAMForceComputeGPU(
                hoomd.context.current.system_definition, file, type_of_file)

        #After load EAMForceCompute we know r_cut from EAM potential`s file. We need update neighbor list.
        r_cut_new = self.cpp_force.get_r_cut()
        self.nlist = nlist
        self.nlist.subscribe(lambda: r_cut_new)
        self.nlist.update_rcut()

        #Load neighbor list to compute.
        self.cpp_force.set_neighbor_list(self.nlist)
        if hoomd.context.exec_conf.isCUDAEnabled():
            self.nlist.setStorageMode(_md.NeighborList.storageMode.full)

        hoomd.context.msg.notice(
            2, "Set r_cut = " + str(r_cut_new) + " from potential`s file '" +
            str(file) + "'.\n")

        hoomd.context.current.system.addCompute(self.cpp_force,
                                                self.force_name)
        self.pair_coeff = coeff()
Esempio n. 33
0
def _parse_command_line(arg_string=None):
    parser = OptionParser()
    parser.add_option("--mode",
                      dest="mode",
                      help="Execution mode (cpu or gpu)",
                      default='auto')
    parser.add_option("--gpu", dest="gpu", help="GPU on which to execute")
    parser.add_option("--gpu_error_checking",
                      dest="gpu_error_checking",
                      action="store_true",
                      default=False,
                      help="Enable error checking on the GPU")
    parser.add_option(
        "--minimize-cpu-usage",
        dest="min_cpu",
        action="store_true",
        default=False,
        help=
        "Enable to keep the CPU usage of HOOMD to a bare minimum (will degrade overall performance somewhat)"
    )
    parser.add_option("--ignore-display-gpu",
                      dest="ignore_display",
                      action="store_true",
                      default=False,
                      help="Attempt to avoid running on the display GPU")
    parser.add_option("--notice-level",
                      dest="notice_level",
                      help="Minimum level of notice messages to print")
    parser.add_option("--msg-file",
                      dest="msg_file",
                      help="Name of file to write messages to")
    parser.add_option(
        "--shared-msg-file",
        dest="shared_msg_file",
        help=
        "(MPI only) Name of shared file to write message to (append partition #)"
    )
    parser.add_option("--nrank",
                      dest="nrank",
                      help="(MPI) Number of ranks to include in a partition")
    parser.add_option("--nx",
                      dest="nx",
                      help="(MPI) Number of domains along the x-direction")
    parser.add_option("--ny",
                      dest="ny",
                      help="(MPI) Number of domains along the y-direction")
    parser.add_option("--nz",
                      dest="nz",
                      help="(MPI) Number of domains along the z-direction")
    parser.add_option(
        "--linear",
        dest="linear",
        action="store_true",
        default=False,
        help="(MPI only) Force a slab (1D) decomposition along the z-direction"
    )
    parser.add_option(
        "--onelevel",
        dest="onelevel",
        action="store_true",
        default=False,
        help="(MPI only) Disable two-level (node-local) decomposition")
    parser.add_option("--single-mpi",
                      dest="single_mpi",
                      action="store_true",
                      help="Allow single-threaded HOOMD builds in MPI jobs")
    parser.add_option("--user", dest="user", help="User options")
    parser.add_option("--nthreads",
                      dest="nthreads",
                      help="Number of TBB threads")

    input_args = None
    if arg_string is not None:
        input_args = shlex.split(arg_string)

    (cmd_options, args) = parser.parse_args(args=input_args)

    # chedk for valid mode setting
    if cmd_options.mode is not None:
        if not (cmd_options.mode == "cpu" or cmd_options.mode == "gpu"
                or cmd_options.mode == "auto"):
            parser.error("--mode must be either cpu, gpu, or auto")

    # check for sane options
    if cmd_options.mode == "cpu" and (cmd_options.gpu is not None):
        parser.error("--mode=cpu cannot be specified along with --gpu")

    # set the mode to gpu if the gpu # was set
    if cmd_options.gpu is not None and cmd_options.mode == 'auto':
        cmd_options.mode = "gpu"

    # convert gpu to an integer
    if cmd_options.gpu is not None:
        try:
            cmd_options.gpu = int(cmd_options.gpu)
        except ValueError:
            parser.error('--gpu must be an integer')

    # convert notice_level to an integer
    if cmd_options.notice_level is not None:
        try:
            cmd_options.notice_level = int(cmd_options.notice_level)
        except ValueError:
            parser.error('--notice-level must be an integer')

    # Convert nx to an integer
    if cmd_options.nx is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error(
                "The --nx option is only avaible in MPI builds.\n")
            raise RuntimeError('Error setting option')
        try:
            cmd_options.nx = int(cmd_options.nx)
        except ValueError:
            parser.error('--nx must be an integer')

    # Convert ny to an integer
    if cmd_options.ny is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error(
                "The --ny option is only avaible in MPI builds.\n")
            raise RuntimeError('Error setting option')
        try:
            cmd_options.ny = int(cmd_options.ny)
        except ValueError:
            parser.error('--ny must be an integer')

    # Convert nz to an integer
    if cmd_options.nz is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error(
                "The --nz option is only avaible in MPI builds.\n")
            raise RuntimeError('Error setting option')
        try:
            cmd_options.nz = int(cmd_options.nz)
        except ValueError:
            parser.error('--nz must be an integer')

    # Convert nthreads to an integer
    if cmd_options.nthreads is not None:
        if not _hoomd.is_TBB_available():
            hoomd.context.msg.error(
                "The --nthreads option is only avaible in TBB-enabled builds.\n"
            )
            raise RuntimeError('Error setting option')
        try:
            cmd_options.nthreads = int(cmd_options.nthreads)
        except ValueError:
            parser.error('--nthreads must be an integer')

    # copy command line options over to global options
    hoomd.context.options.mode = cmd_options.mode
    hoomd.context.options.gpu = cmd_options.gpu
    hoomd.context.options.gpu_error_checking = cmd_options.gpu_error_checking
    hoomd.context.options.min_cpu = cmd_options.min_cpu
    hoomd.context.options.ignore_display = cmd_options.ignore_display

    hoomd.context.options.nx = cmd_options.nx
    hoomd.context.options.ny = cmd_options.ny
    hoomd.context.options.nz = cmd_options.nz
    hoomd.context.options.linear = cmd_options.linear
    hoomd.context.options.onelevel = cmd_options.onelevel
    hoomd.context.options.single_mpi = cmd_options.single_mpi
    hoomd.context.options.nthreads = cmd_options.nthreads

    if cmd_options.notice_level is not None:
        hoomd.context.options.notice_level = cmd_options.notice_level
        hoomd.context.msg.setNoticeLevel(hoomd.context.options.notice_level)

    if cmd_options.msg_file is not None:
        hoomd.context.options.msg_file = cmd_options.msg_file
        hoomd.context.msg.openFile(hoomd.context.options.msg_file)

    if cmd_options.shared_msg_file is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error(
                "Shared log files are only available in MPI builds.\n")
            raise RuntimeError('Error setting option')
        hoomd.context.options.shared_msg_file = cmd_options.shared_msg_file
        hoomd.context.msg.setSharedFile(hoomd.context.options.shared_msg_file)

    if cmd_options.nrank is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error(
                "The --nrank option is only avaible in MPI builds.\n")
            raise RuntimeError('Error setting option')
        # check validity
        nrank = int(cmd_options.nrank)
        if (_hoomd.ExecutionConfiguration.getNRanksGlobal() % nrank):
            hoomd.context.msg.error(
                "Total number of ranks is not a multiple of --nrank\n")
            raise RuntimeError('Error checking option')
        hoomd.context.options.nrank = nrank

    if cmd_options.user is not None:
        hoomd.context.options.user = shlex.split(cmd_options.user)
Esempio n. 34
0
def _parse_command_line(arg_string=None):
    parser = OptionParser();
    parser.add_option("--mode", dest="mode", help="Execution mode (cpu or gpu)", default='auto');
    parser.add_option("--gpu", dest="gpu", help="GPU or comma-separated list of GPUs on which to execute");
    parser.add_option("--gpu_error_checking", dest="gpu_error_checking", action="store_true", default=False, help="Enable error checking on the GPU");
    parser.add_option("--minimize-cpu-usage", dest="min_cpu", action="store_true", default=False, help="Enable to keep the CPU usage of HOOMD to a bare minimum (will degrade overall performance somewhat)");
    parser.add_option("--ignore-display-gpu", dest="ignore_display", action="store_true", default=False, help="Attempt to avoid running on the display GPU");
    parser.add_option("--notice-level", dest="notice_level", help="Minimum level of notice messages to print");
    parser.add_option("--msg-file", dest="msg_file", help="Name of file to write messages to");
    parser.add_option("--shared-msg-file", dest="shared_msg_file", help="(MPI only) Name of shared file to write message to (append partition #)");
    parser.add_option("--nrank", dest="nrank", help="(MPI) Number of ranks to include in a partition");
    parser.add_option("--nx", dest="nx", help="(MPI) Number of domains along the x-direction");
    parser.add_option("--ny", dest="ny", help="(MPI) Number of domains along the y-direction");
    parser.add_option("--nz", dest="nz", help="(MPI) Number of domains along the z-direction");
    parser.add_option("--linear", dest="linear", action="store_true", default=False, help="(MPI only) Force a slab (1D) decomposition along the z-direction");
    parser.add_option("--onelevel", dest="onelevel", action="store_true", default=False, help="(MPI only) Disable two-level (node-local) decomposition");
    parser.add_option("--single-mpi", dest="single_mpi", action="store_true", help="Allow single-threaded HOOMD builds in MPI jobs");
    parser.add_option("--user", dest="user", help="User options");
    parser.add_option("--nthreads", dest="nthreads", help="Number of TBB threads");

    input_args = None;
    if arg_string is not None:
        input_args = shlex.split(arg_string);

    (cmd_options, args) = parser.parse_args(args=input_args);

    # check for valid mode setting
    if cmd_options.mode is not None:
        if not (cmd_options.mode == "cpu" or cmd_options.mode == "gpu" or cmd_options.mode == "auto"):
            parser.error("--mode must be either cpu, gpu, or auto");

    # check for sane options
    if cmd_options.mode == "cpu" and (cmd_options.gpu is not None):
        parser.error("--mode=cpu cannot be specified along with --gpu")

    # set the mode to gpu if the gpu # was set
    if cmd_options.gpu is not None and cmd_options.mode == 'auto':
        cmd_options.mode = "gpu"

    # convert gpu to an integer
    if cmd_options.gpu is not None:
        try:
            cmd_options.gpu = [int(gpu) for gpu in str(cmd_options.gpu).split(',')]
        except ValueError:
            parser.error('--gpu must be an integer or comma-separated list of integers')

    # convert notice_level to an integer
    if cmd_options.notice_level is not None:
        try:
            cmd_options.notice_level = int(cmd_options.notice_level);
        except ValueError:
            parser.error('--notice-level must be an integer')

    # Convert nx to an integer
    if cmd_options.nx is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error("The --nx option is only available in MPI builds.\n");
            raise RuntimeError('Error setting option');
        try:
            cmd_options.nx = int(cmd_options.nx);
        except ValueError:
            parser.error('--nx must be an integer')

    # Convert ny to an integer
    if cmd_options.ny is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error("The --ny option is only available in MPI builds.\n");
            raise RuntimeError('Error setting option');
        try:
            cmd_options.ny = int(cmd_options.ny);
        except ValueError:
            parser.error('--ny must be an integer')

    # Convert nz to an integer
    if cmd_options.nz is not None:
       if not _hoomd.is_MPI_available():
            hoomd.context.msg.error("The --nz option is only available in MPI builds.\n");
            raise RuntimeError('Error setting option');
       try:
            cmd_options.nz = int(cmd_options.nz);
       except ValueError:
            parser.error('--nz must be an integer')

    # Convert nthreads to an integer
    if cmd_options.nthreads is not None:
       if not _hoomd.is_TBB_available():
            hoomd.context.msg.error("The --nthreads option is only available in TBB-enabled builds.\n");
            raise RuntimeError('Error setting option');
       try:
            cmd_options.nthreads = int(cmd_options.nthreads);
       except ValueError:
            parser.error('--nthreads must be an integer')


    # copy command line options over to global options
    hoomd.context.options.mode = cmd_options.mode;
    hoomd.context.options.gpu = cmd_options.gpu;
    hoomd.context.options.gpu_error_checking = cmd_options.gpu_error_checking;
    hoomd.context.options.min_cpu = cmd_options.min_cpu;
    hoomd.context.options.ignore_display = cmd_options.ignore_display;

    hoomd.context.options.nx = cmd_options.nx;
    hoomd.context.options.ny = cmd_options.ny;
    hoomd.context.options.nz = cmd_options.nz;
    hoomd.context.options.linear = cmd_options.linear
    hoomd.context.options.onelevel = cmd_options.onelevel
    hoomd.context.options.single_mpi = cmd_options.single_mpi
    hoomd.context.options.nthreads = cmd_options.nthreads

    if cmd_options.notice_level is not None:
        hoomd.context.options.notice_level = cmd_options.notice_level;
        hoomd.context.msg.setNoticeLevel(hoomd.context.options.notice_level);

    if cmd_options.msg_file is not None:
        hoomd.context.options.msg_file = cmd_options.msg_file;
        hoomd.context.msg.openFile(hoomd.context.options.msg_file);

    if cmd_options.shared_msg_file is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error("Shared log files are only available in MPI builds.\n");
            raise RuntimeError('Error setting option');
        hoomd.context.options.shared_msg_file = cmd_options.shared_msg_file;
        hoomd.context.msg.setSharedFile(hoomd.context.options.shared_msg_file);

    if cmd_options.nrank is not None:
        if not _hoomd.is_MPI_available():
            hoomd.context.msg.error("The --nrank option is only available in MPI builds.\n");
            raise RuntimeError('Error setting option');
        # check validity
        nrank = int(cmd_options.nrank)
        if (_hoomd.ExecutionConfiguration.getNRanksGlobal() % nrank):
            hoomd.context.msg.error("Total number of ranks is not a multiple of --nrank\n");
            raise RuntimeError('Error checking option');
        hoomd.context.options.nrank = nrank

    if cmd_options.user is not None:
        hoomd.context.options.user = shlex.split(cmd_options.user);
Esempio n. 35
0
def initialize(args=None, memory_traceback=False, mpi_comm=None):
    R""" Initialize the execution context

    Args:
        args (str): Arguments to parse. When *None*, parse the arguments passed on the command line.
        memory_traceback (bool): If true, enable memory allocation tracking (*only for debugging/profiling purposes*)
        mpi_comm: Accepts an mpi4py communicator. Use this argument to perform many independent hoomd simulations
                  where you communicate between those simulations using your own mpi4py code.

    :py:func:`hoomd.context.initialize()` parses the command line arguments given, sets the options and initializes MPI and GPU execution
    (if any). By default, :py:func:`hoomd.context.initialize()` reads arguments given on the command line. Provide a string to :py:func:`hoomd.context.initialize()`
    to set the launch configuration within the job script.

    :py:func:`hoomd.context.initialize()` can be called more than once in a script. However, the execution parameters are fixed on the first call
    and *args* is ignored. Subsequent calls to :py:func:`hoomd.context.initialize()` create a new :py:class:`SimulationContext` and set it current. This
    behavior is primarily to support use of hoomd in jupyter notebooks, so that a new clean simulation context is
    set when rerunning the notebook within an existing kernel.

    Example::

        from hoomd import *
        context.initialize();
        context.initialize("--mode=gpu --nrank=64");
        context.initialize("--mode=cpu --nthreads=64");

        world = MPI.COMM_WORLD
        comm = world.Split(world.Get_rank(), 0)
        hoomd.context.initialize(mpi_comm=comm)

    """
    global exec_conf, msg, options, current, _prev_args

    if exec_conf is not None:
        if args != _prev_args:
            msg.warning("Ignoring new options, cannot change execution mode after initialization.\n");
        current = SimulationContext();
        return current

    _prev_args = args;

    options = hoomd.option.options();
    hoomd.option._parse_command_line(args);

    # Check to see if we are built without MPI support and the user used mpirun
    if (not _hoomd.is_MPI_available() and not options.single_mpi
        and (    'OMPI_COMM_WORLD_RANK' in os.environ
              or 'MV2_COMM_WORLD_LOCAL_RANK' in os.environ
              or 'PMI_RANK' in os.environ
              or 'ALPS_APP_PE' in os.environ)
       ):
        print('HOOMD-blue is built without MPI support, but seems to have been launched with mpirun');
        print('exiting now to prevent many sequential jobs from starting');
        raise RuntimeError('Error launching hoomd')

    # output the version info on initialization
    msg.notice(1, _hoomd.output_version_info())

    # ensure creation of global bibliography to print HOOMD base citations
    cite._ensure_global_bib()

    exec_conf = _create_exec_conf(mpi_comm);

    # set memory tracing option
    exec_conf.setMemoryTracing(memory_traceback)

    current = SimulationContext();
    return current
Esempio n. 36
0
def _create_exec_conf(mpi_comm):
    global exec_conf, options, msg

    # use a cached execution configuration if available
    if exec_conf is not None:
        return exec_conf

    mpi_available = _hoomd.is_MPI_available()

    if options.mode == 'auto':
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO
    elif options.mode == "cpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU
    elif options.mode == "gpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU
    else:
        raise RuntimeError("Invalid mode")

    # convert None options to defaults
    if options.gpu is None:
        gpu_id = -1
    else:
        gpu_id = int(options.gpu)

    if options.nrank is None:
        nrank = 0
    else:
        nrank = int(options.nrank)

    # create the specified configuration
    if mpi_comm is None:
        exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_id,
                                                  options.min_cpu,
                                                  options.ignore_display, msg,
                                                  nrank)
    else:
        if not mpi_available:
            msg.error(
                "mpi_comm provided, but MPI support was disabled at compile time\n"
            )
            raise RuntimeError("mpi_comm is not supported in serial builds")

        handled = False

        # pass in pointer to MPI_Comm object provided by mpi4py
        try:
            import mpi4py
            if isinstance(mpi_comm, mpi4py.MPI.Comm):
                addr = mpi4py.MPI._addressof(mpi_comm)
                exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm(
                    exec_mode, gpu_id, options.min_cpu, options.ignore_display,
                    msg, nrank, addr)
                handled = True
        except ImportError:
            # silently ignore when mpi4py is missing
            pass

        # undocumented case: handle plain integers as pointers to MPI_Comm objects
        if not handled and isinstance(mpi_comm, int):
            exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm(
                exec_mode, gpu_id, options.min_cpu, options.ignore_display,
                msg, nrank, mpi_comm)
            handled = True

        if not handled:
            msg.error("unknown mpi_comm object: {}.\n".format(mpi_comm))
            raise RuntimeError("Invalid mpi_comm object")

    # if gpu_error_checking is set, enable it on the GPU
    if options.gpu_error_checking:
        exec_conf.setCUDAErrorChecking(True)

    if _hoomd.is_TBB_available():
        # set the number of TBB threads as necessary
        if options.nthreads != None:
            exec_conf.setNumThreads(options.nthreads)

    exec_conf = exec_conf

    return exec_conf
Esempio n. 37
0
def _create_exec_conf(mpi_comm):
    global exec_conf, options, msg

    # use a cached execution configuration if available
    if exec_conf is not None:
        return exec_conf

    mpi_available = _hoomd.is_MPI_available();

    if options.mode == 'auto':
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO;
    elif options.mode == "cpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU;
    elif options.mode == "gpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU;
    else:
        raise RuntimeError("Invalid mode");

    # convert None options to defaults
    if options.gpu is None:
        gpu_id = [];
    else:
        gpu_id = options.gpu;

    if options.nrank is None:
        nrank = 0;
    else:
        nrank = int(options.nrank);

    gpu_vec = _hoomd.std_vector_int()
    for gpuid in gpu_id:
        gpu_vec.append(gpuid)

    # create the specified configuration
    if mpi_comm is None:
        exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, msg, nrank);
    else:
        if not mpi_available:
            msg.error("mpi_comm provided, but MPI support was disabled at compile time\n");
            raise RuntimeError("mpi_comm is not supported in serial builds");

        handled = False;

        # pass in pointer to MPI_Comm object provided by mpi4py
        try:
            import mpi4py
            if isinstance(mpi_comm, mpi4py.MPI.Comm):
                addr = mpi4py.MPI._addressof(mpi_comm);
                exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, msg, nrank, addr);
                handled = True
        except ImportError:
            # silently ignore when mpi4py is missing
            pass

        # undocumented case: handle plain integers as pointers to MPI_Comm objects
        if not handled and isinstance(mpi_comm, int):
            exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, msg, nrank, mpi_comm);
            handled = True

        if not handled:
            msg.error("unknown mpi_comm object: {}.\n".format(mpi_comm));
            raise RuntimeError("Invalid mpi_comm object");

    # if gpu_error_checking is set, enable it on the GPU
    if options.gpu_error_checking:
       exec_conf.setCUDAErrorChecking(True);

    if _hoomd.is_TBB_available():
        # set the number of TBB threads as necessary
        if options.nthreads != None:
            exec_conf.setNumThreads(options.nthreads)

    exec_conf = exec_conf;

    return exec_conf;
Esempio n. 38
0
    util.unquiet_status();

def get_step():
    """ Get the current simulation time step.

    Returns:
        The current simulation time step.

    Example::

            print(hoomd.get_step())
    """

    # check if initialization has occurred
    if not init.is_initialized():
        context.msg.error("Cannot get step before initialization\n");
        raise RuntimeError('Error getting step');

    return context.current.system.getCurrentTimeStep();

# Check to see if we are built without MPI support and the user used mpirun
if (not _hoomd.is_MPI_available()
    and (    'OMPI_COMM_WORLD_RANK' in os.environ
          or 'MV2_COMM_WORLD_LOCAL_RANK' in os.environ
          or 'PMI_RANK' in os.environ
          or 'ALPS_APP_PE' in os.environ)
   ):
    print('HOOMD-blue is built without MPI support, but seems to have been launched with mpirun');
    print('exiting now to prevent many sequential jobs from starting');
    raise RuntimeError('Error launching hoomd')
Esempio n. 39
0
def _get_proc_name():
    if _hoomd.is_MPI_available():
        return _hoomd.get_mpi_proc_name()
    else:
        return platform.node()
Esempio n. 40
0
def _get_proc_name():
    if _hoomd.is_MPI_available():
        return _hoomd.get_mpi_proc_name()
    else:
        return platform.node()