Beispiel #1
0
def _create_exec_conf():
    global exec_conf, options, msg

    # use a cached execution configuration if available
    if exec_conf is not None:
        return exec_conf

    mpi_available = _hoomd.is_MPI_available()

    # error out on nyx/flux if the auto mode is set
    if options.mode == 'auto':
        host = _get_proc_name()
        if "flux" in host or "nyx" in host:
            msg.error(
                "--mode=gpu or --mode=cpu must be specified on nyx/flux\n")
            raise RuntimeError("Error initializing")
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO
    elif options.mode == "cpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU
    elif options.mode == "gpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU
    else:
        raise RuntimeError("Invalid mode")

    # convert None options to defaults
    if options.gpu is None:
        gpu_id = -1
    else:
        gpu_id = int(options.gpu)

    if options.nrank is None:
        nrank = 0
    else:
        nrank = int(options.nrank)

    # create the specified configuration
    exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_id,
                                              options.min_cpu,
                                              options.ignore_display, msg,
                                              nrank)

    # if gpu_error_checking is set, enable it on the GPU
    if options.gpu_error_checking:
        exec_conf.setCUDAErrorChecking(True)

    if _hoomd.is_TBB_available():
        # set the number of TBB threads as necessary
        if options.nthreads != None:
            exec_conf.setNumThreads(options.nthreads)

    exec_conf = exec_conf

    return exec_conf
Beispiel #2
0
    def __init__(self,
                 num_cpu_threads=None,
                 communicator=None,
                 msg_file=None,
                 notice_level=2):

        super().__init__(communicator, notice_level, msg_file)

        self._cpp_exec_conf = _hoomd.ExecutionConfiguration(
            _hoomd.ExecutionConfiguration.executionMode.CPU, [],
            self.communicator.cpp_mpi_conf, self._cpp_msg)

        if num_cpu_threads is not None:
            self.num_cpu_threads = num_cpu_threads
    def __init__(self,
                 nthreads=None,
                 communicator=None,
                 msg_file=None,
                 shared_msg_file=None,
                 notice_level=2):

        _device.__init__(self, communicator, notice_level, msg_file,
                         shared_msg_file)

        _init_nthreads(nthreads)

        self.cpp_exec_conf = _hoomd.ExecutionConfiguration(
            _hoomd.ExecutionConfiguration.executionMode.AUTO,
            _hoomd.std_vector_int(), False, False, self.comm.cpp_mpi_conf,
            self.cpp_msg)
Beispiel #4
0
    def __init__(self,
                 gpu_ids=None,
                 num_cpu_threads=None,
                 communicator=None,
                 msg_file=None,
                 notice_level=2):

        super().__init__(communicator, notice_level, msg_file)

        if gpu_ids is None:
            gpu_ids = []

        # convert None options to defaults
        self._cpp_exec_conf = _hoomd.ExecutionConfiguration(
            _hoomd.ExecutionConfiguration.executionMode.GPU, gpu_ids,
            self.communicator.cpp_mpi_conf, self._cpp_msg)

        if num_cpu_threads is not None:
            self.num_cpu_threads = num_cpu_threads
Beispiel #5
0
def _create_exec_conf(mpi_config, msg, options):
    global exec_conf

    # use a cached execution configuration if available
    if exec_conf is not None:
        return exec_conf

    if options.mode == 'auto':
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO;
    elif options.mode == "cpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU;
    elif options.mode == "gpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU;
    else:
        raise RuntimeError("Invalid mode");

    # convert None options to defaults
    if options.gpu is None:
        gpu_id = [];
    else:
        gpu_id = options.gpu;

    gpu_vec = _hoomd.std_vector_int()
    for gpuid in gpu_id:
        gpu_vec.append(gpuid)

    # create the specified configuration
    exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, mpi_conf, msg);

    # if gpu_error_checking is set, enable it on the GPU
    if options.gpu_error_checking:
       exec_conf.setCUDAErrorChecking(True);

    if _hoomd.is_TBB_available():
        # set the number of TBB threads as necessary
        if options.nthreads != None:
            exec_conf.setNumThreads(options.nthreads)

    exec_conf = exec_conf;

    return exec_conf;
    def __init__(self,
                 gpu_ids=None,
                 communicator=None,
                 msg_file=None,
                 shared_msg_file=None,
                 notice_level=2):

        _device.__init__(self, communicator, notice_level, msg_file,
                         shared_msg_file)

        # convert None options to defaults
        if gpu_ids is None:
            gpu_id = []
        else:
            gpu_id = gpu_ids

        gpu_vec = _hoomd.std_vector_int()
        for gpuid in gpu_id:
            gpu_vec.append(gpuid)

        self.cpp_exec_conf = _hoomd.ExecutionConfiguration(
            _hoomd.ExecutionConfiguration.executionMode.GPU, gpu_vec, False,
            False, self.comm.cpp_mpi_conf, self.cpp_msg)
Beispiel #7
0
def _create_exec_conf(mpi_comm):
    global exec_conf, options, msg

    # use a cached execution configuration if available
    if exec_conf is not None:
        return exec_conf

    mpi_available = _hoomd.is_MPI_available()

    if options.mode == 'auto':
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO
    elif options.mode == "cpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU
    elif options.mode == "gpu":
        exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU
    else:
        raise RuntimeError("Invalid mode")

    # convert None options to defaults
    if options.gpu is None:
        gpu_id = -1
    else:
        gpu_id = int(options.gpu)

    if options.nrank is None:
        nrank = 0
    else:
        nrank = int(options.nrank)

    # create the specified configuration
    if mpi_comm is None:
        exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_id,
                                                  options.min_cpu,
                                                  options.ignore_display, msg,
                                                  nrank)
    else:
        if not mpi_available:
            msg.error(
                "mpi_comm provided, but MPI support was disabled at compile time\n"
            )
            raise RuntimeError("mpi_comm is not supported in serial builds")

        handled = False

        # pass in pointer to MPI_Comm object provided by mpi4py
        try:
            import mpi4py
            if isinstance(mpi_comm, mpi4py.MPI.Comm):
                addr = mpi4py.MPI._addressof(mpi_comm)
                exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm(
                    exec_mode, gpu_id, options.min_cpu, options.ignore_display,
                    msg, nrank, addr)
                handled = True
        except ImportError:
            # silently ignore when mpi4py is missing
            pass

        # undocumented case: handle plain integers as pointers to MPI_Comm objects
        if not handled and isinstance(mpi_comm, int):
            exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm(
                exec_mode, gpu_id, options.min_cpu, options.ignore_display,
                msg, nrank, mpi_comm)
            handled = True

        if not handled:
            msg.error("unknown mpi_comm object: {}.\n".format(mpi_comm))
            raise RuntimeError("Invalid mpi_comm object")

    # if gpu_error_checking is set, enable it on the GPU
    if options.gpu_error_checking:
        exec_conf.setCUDAErrorChecking(True)

    if _hoomd.is_TBB_available():
        # set the number of TBB threads as necessary
        if options.nthreads != None:
            exec_conf.setNumThreads(options.nthreads)

    exec_conf = exec_conf

    return exec_conf