def processCommandLine(self, options): # Not processed here: # -p, --print-num-params, --print-all-params # --on-io-error # Loglevel: -l logger.logger.setLevel(logger.LOGLEVELS[options.loglevel][0]) for line in self.banner: logger.info(line) # Profiling: --prof if options.profiling_enabled: self.enableProfiling(True) logger.info("Profiling enabled") # Number of fftw threads: -t if options.num_fftw_threads != 1: self.setFFTWThreads(options.num_fftw_threads) # GPU enable: -g, -G if options.gpu32 and options.gpu64: logger.warn("Ignoring -g because -G was given") def determine_gpu_id(arg): # 'auto' if arg == "auto": try: gpus = nvidia_smi.run() available = nvidia_smi.available(gpus) if len(available == 0): logger.warn("Could not find any available GPUs (out of %s detected GPUs)." % len(gpus)) return -1 return available[0] except: return -1 else: # id specified by user return int(arg) if options.gpu64: cuda_mode, cuda_dev = magneto.CUDA_64, determine_gpu_id(options.gpu64) elif options.gpu32: cuda_mode, cuda_dev = magneto.CUDA_32, determine_gpu_id(options.gpu32) else: cuda_mode, cuda_dev = magneto.CUDA_DISABLED, -1 if cuda_mode != magneto.CUDA_DISABLED and not self.haveCudaSupport(): logger.error("Can't enable GPU computation (-g or -G flags): CUDA libraries could not be loaded.") logger.error("-> Falling back to CPU.") self.enableCuda(cuda_mode, cuda_dev) cuda_support = "yes" if self.haveCudaSupport() else "no" logger.info("CUDA GPU support: %s", cuda_support) logger.info("Running on host %s", socket.gethostname())
def __init__(self, run, params): super(LocalController, self).__init__(run, params) idx_range = getattr(cfg.options, 'prange', range(self.num_params)) for i in idx_range: if 0 <= i < self.num_params: continue logger.warn("Controller: No such parameter set with index %s!" % i) if len(idx_range) == 0: logger.warn("Controller: No parameter sets selected!") self.select(idx_range)
def __init__(self, run, params, env, offset=0): super(EnvironmentVariableController, self).__init__(run, params) try: p_idx = int(os.environ[env]) - offset except: logger.error("Could not read environment variable '%s'." % env) raise if 0 >= p_idx < self.num_params: self.select([p_idx]) else: logger.warn("Controller: No such parameter set with index %s!" % p_idx) self.select([])
def determine_gpu_id(arg): # 'auto' if arg == "auto": try: gpus = nvidia_smi.run() available = nvidia_smi.available(gpus) if len(available == 0): logger.warn("Could not find any available GPUs (out of %s detected GPUs)." % len(gpus)) return -1 return available[0] except: return -1 else: # id specified by user return int(arg)
def determine_gpu_id(arg): # 'auto' if arg == "auto": try: gpus = nvidia_smi.run() available = nvidia_smi.available(gpus) if len(available == 0): logger.warn( "Could not find any available GPUs (out of %s detected GPUs)." % len(gpus)) return -1 return available[0] except: return -1 else: # id specified by user return int(arg)
def create_controller(run, params, *args, **kwargs): """ Create a controller object, depending on the environment in which the script was executed: TODO: Explain. """ # Case 1: Report to stdout, then exit, if requested by command # line options. print_num_params = cfg.options.print_num_params print_all_params = cfg.options.print_all_params if print_num_params or print_all_params: return PrintParametersController( run, params, print_num_params, print_all_params ) # Case 2: Use environment variable to select parameter set env = kwargs.pop("env", None) env_offset = kwargs.pop("env_offset", 0) if env: if env in os.environ: return EnvironmentVariableController(run, params, env, env_offset) else: logger.warn( "Environment variable '%s' not found, ignoring 'env' " "parameter in controller creation" % env ) # Case 3: Sun grid engine controller sge = kwargs.pop("sun_grid_engine", False) if sge: if "SGE_TASK_ID" in os.environ: return SunGridEngineController(run, params) else: logger.warn( "Environment variable 'SGE_TASK_ID' not found, " "ignoring 'sun_grid_engine' parameter in controller creation" ) # Case 4: This controller is used when the script was executed # locally. It optionally uses the -p argument passed to # the simulation script. return LocalController(run, params)
def Controller(run, params, *args, **kwargs): logger.warn( "The 'Controller' function is deprecated, please use " "'create_controller' instead." ) return create_controller(run, params, *args, **kwargs)
def __init__(self, mesh, step_size): super(RungeKutta4, self).__init__(mesh) self.step_size = float(step_size) logger.warn("RungeKutta4 evolver: Performance hint: Don't use me.")
def __init__(self, mesh, step_size): super(Euler, self).__init__(mesh) self.step_size = float(step_size) logger.warn("Euler evolver: Performance hint: Don't use me.")
def processCommandLine(self, options): # Not processed here: # -p, --print-num-params, --print-all-params # --on-io-error # Loglevel: -l logger.logger.setLevel(logger.LOGLEVELS[options.loglevel][0]) for line in self.banner: logger.info(line) # Profiling: --prof if options.profiling_enabled: self.enableProfiling(True) logger.info("Profiling enabled") # Number of fftw threads: -t if options.num_fftw_threads != 1: self.setFFTWThreads(options.num_fftw_threads) # GPU enable: -g, -G if options.gpu32 and options.gpu64: logger.warn("Ignoring -g because -G was given") def determine_gpu_id(arg): # 'auto' if arg == "auto": try: gpus = nvidia_smi.run() available = nvidia_smi.available(gpus) if len(available == 0): logger.warn( "Could not find any available GPUs (out of %s detected GPUs)." % len(gpus)) return -1 return available[0] except: return -1 else: # id specified by user return int(arg) if options.gpu64: cuda_mode, cuda_dev = magneto.CUDA_64, determine_gpu_id( options.gpu64) elif options.gpu32: cuda_mode, cuda_dev = magneto.CUDA_32, determine_gpu_id( options.gpu32) else: cuda_mode, cuda_dev = magneto.CUDA_DISABLED, -1 if cuda_mode != magneto.CUDA_DISABLED and not self.haveCudaSupport(): logger.error( "Can't enable GPU computation (-g or -G flags): CUDA libraries could not be loaded." ) logger.error("-> Falling back to CPU.") self.enableCuda(cuda_mode, cuda_dev) cuda_support = "yes" if self.haveCudaSupport() else "no" logger.info("CUDA GPU support: %s", cuda_support) logger.info("Running on host %s", socket.gethostname())