def __init__(self, nthreads=None, communicator=None, msg_file=None, shared_msg_file=None, notice_level=2): _device.__init__(self, communicator, notice_level, msg_file, shared_msg_file) _init_nthreads(nthreads) self.cpp_exec_conf = _hoomd.ExecutionConfiguration( _hoomd.ExecutionConfiguration.executionMode.AUTO, _hoomd.std_vector_int(), False, False, self.comm.cpp_mpi_conf, self.cpp_msg)
def _create_exec_conf(mpi_config, msg, options): global exec_conf # use a cached execution configuration if available if exec_conf is not None: return exec_conf if options.mode == 'auto': exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO; elif options.mode == "cpu": exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU; elif options.mode == "gpu": exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU; else: raise RuntimeError("Invalid mode"); # convert None options to defaults if options.gpu is None: gpu_id = []; else: gpu_id = options.gpu; gpu_vec = _hoomd.std_vector_int() for gpuid in gpu_id: gpu_vec.append(gpuid) # create the specified configuration exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, mpi_conf, msg); # if gpu_error_checking is set, enable it on the GPU if options.gpu_error_checking: exec_conf.setCUDAErrorChecking(True); if _hoomd.is_TBB_available(): # set the number of TBB threads as necessary if options.nthreads != None: exec_conf.setNumThreads(options.nthreads) exec_conf = exec_conf; return exec_conf;
def __init__(self, gpu_ids=None, communicator=None, msg_file=None, shared_msg_file=None, notice_level=2): _device.__init__(self, communicator, notice_level, msg_file, shared_msg_file) # convert None options to defaults if gpu_ids is None: gpu_id = [] else: gpu_id = gpu_ids gpu_vec = _hoomd.std_vector_int() for gpuid in gpu_id: gpu_vec.append(gpuid) self.cpp_exec_conf = _hoomd.ExecutionConfiguration( _hoomd.ExecutionConfiguration.executionMode.GPU, gpu_vec, False, False, self.comm.cpp_mpi_conf, self.cpp_msg)
def _create_exec_conf(mpi_comm): global exec_conf, options, msg # use a cached execution configuration if available if exec_conf is not None: return exec_conf mpi_available = _hoomd.is_MPI_available() if options.mode == 'auto': exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO elif options.mode == "cpu": exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU elif options.mode == "gpu": exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU else: raise RuntimeError("Invalid mode") # convert None options to defaults if options.gpu is None: gpu_id = [] else: gpu_id = options.gpu if options.nrank is None: nrank = 0 else: nrank = int(options.nrank) gpu_vec = _hoomd.std_vector_int() for gpuid in gpu_id: gpu_vec.append(gpuid) # create the specified configuration if mpi_comm is None: exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, msg, nrank) else: if not mpi_available: msg.error( "mpi_comm provided, but MPI support was disabled at compile time\n" ) raise RuntimeError("mpi_comm is not supported in serial builds") handled = False # pass in pointer to MPI_Comm object provided by mpi4py try: import mpi4py if isinstance(mpi_comm, mpi4py.MPI.Comm): addr = mpi4py.MPI._addressof(mpi_comm) exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm( exec_mode, gpu_vec, options.min_cpu, options.ignore_display, msg, nrank, addr) handled = True except ImportError: # silently ignore when mpi4py is missing pass # undocumented case: handle plain integers as pointers to MPI_Comm objects if not handled and isinstance(mpi_comm, int): exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm( exec_mode, gpu_vec, options.min_cpu, options.ignore_display, msg, nrank, mpi_comm) handled = True if not handled: msg.error("unknown mpi_comm object: {}.\n".format(mpi_comm)) raise RuntimeError("Invalid mpi_comm object") # if gpu_error_checking is set, enable it on the GPU if options.gpu_error_checking: exec_conf.setCUDAErrorChecking(True) if _hoomd.is_TBB_available(): # set the number of TBB threads as necessary if options.nthreads != None: exec_conf.setNumThreads(options.nthreads) exec_conf = exec_conf return exec_conf
def _create_exec_conf(mpi_comm): global exec_conf, options, msg # use a cached execution configuration if available if exec_conf is not None: return exec_conf mpi_available = _hoomd.is_MPI_available(); if options.mode == 'auto': exec_mode = _hoomd.ExecutionConfiguration.executionMode.AUTO; elif options.mode == "cpu": exec_mode = _hoomd.ExecutionConfiguration.executionMode.CPU; elif options.mode == "gpu": exec_mode = _hoomd.ExecutionConfiguration.executionMode.GPU; else: raise RuntimeError("Invalid mode"); # convert None options to defaults if options.gpu is None: gpu_id = []; else: gpu_id = options.gpu; if options.nrank is None: nrank = 0; else: nrank = int(options.nrank); gpu_vec = _hoomd.std_vector_int() for gpuid in gpu_id: gpu_vec.append(gpuid) # create the specified configuration if mpi_comm is None: exec_conf = _hoomd.ExecutionConfiguration(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, msg, nrank); else: if not mpi_available: msg.error("mpi_comm provided, but MPI support was disabled at compile time\n"); raise RuntimeError("mpi_comm is not supported in serial builds"); handled = False; # pass in pointer to MPI_Comm object provided by mpi4py try: import mpi4py if isinstance(mpi_comm, mpi4py.MPI.Comm): addr = mpi4py.MPI._addressof(mpi_comm); exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, msg, nrank, addr); handled = True except ImportError: # silently ignore when mpi4py is missing pass # undocumented case: handle plain integers as pointers to MPI_Comm objects if not handled and isinstance(mpi_comm, int): exec_conf = _hoomd.ExecutionConfiguration._make_exec_conf_mpi_comm(exec_mode, gpu_vec, options.min_cpu, options.ignore_display, msg, nrank, mpi_comm); handled = True if not handled: msg.error("unknown mpi_comm object: {}.\n".format(mpi_comm)); raise RuntimeError("Invalid mpi_comm object"); # if gpu_error_checking is set, enable it on the GPU if options.gpu_error_checking: exec_conf.setCUDAErrorChecking(True); if _hoomd.is_TBB_available(): # set the number of TBB threads as necessary if options.nthreads != None: exec_conf.setNumThreads(options.nthreads) exec_conf = exec_conf; return exec_conf;