def __init__(self, config, name): self._sandbox_helper = SandboxHelper(config) self._error_log_fn = config.get_work_path('error.tar') cancel_executor = CancelAndPurgeJobs(config, CondorCancelJobs(config), LocalPurgeJobs(config, self._sandbox_helper)) BasicWMS.__init__(self, config, name, check_executor=CheckJobsMissingState(config, CondorCheckJobs(config)), cancel_executor=cancel_executor) self._task_id = config.get('task id', md5_hex(str(time.time())), persistent=True) # FIXME # finalize config state by reading values or setting to defaults # load keys for condor pool ClassAds self._jdl_writer = CondorJDLWriter(config) self._universe = config.get('universe', 'vanilla', on_change=None) self._pool_req_dict = config.get_dict('poolArgs req', {})[0] self._pool_work_dn = None self._proc_factory = None (self._submit_exec, self._transfer_exec) = (None, None) # prepare interfaces for local/remote/ssh pool access self._remote_type = config.get_enum('remote Type', PoolType, PoolType.LOCAL) self._init_pool_interface(config) # Sandbox base path where individual job data is stored, staged and returned to self._sandbox_dn = config.get_path('sandbox path', config.get_work_path('sandbox'), must_exist=False) # broker for selecting sites - FIXME: this looks wrong... pool != site self._pool_host_list = config.get_list(['poolhostlist', 'pool host list'], []) self._broker_site = config.get_plugin('site broker', 'UserBroker', cls=Broker, bind_kwargs={'tags': [self]}, pargs=('sites', 'sites', lambda: self._pool_host_list)) self._wall_time_mode = config.get_enum('wall time mode', WallTimeMode, WallTimeMode.ignore, subset=[WallTimeMode.hard, WallTimeMode.ignore]) self._blacklist_nodes = config.get_list(['blacklist nodes'], [], on_change=None) self._user_requirements = config.get('user requirements', '', on_change=None)
def __init__(self, config, name, submit_exec, check_executor, cancel_executor, nodes_finder=None, queues_finder=None): config.set('broker', 'RandomBroker') config.set_int('wait idle', 20) config.set_int('wait work', 5) self._submit_exec = submit_exec self._sandbox_helper = SandboxHelper(config) BasicWMS.__init__(self, config, name, check_executor=check_executor, cancel_executor=CancelAndPurgeJobs(config, cancel_executor, LocalPurgeJobs(config, self._sandbox_helper))) def _get_nodes_list(): if nodes_finder: return lmap(lambda x: x['name'], nodes_finder.discover()) self._broker_site = config.get_plugin('site broker', 'UserBroker', cls=Broker, bind_kwargs={'inherit': True, 'tags': [self]}, pargs=('sites', 'sites', _get_nodes_list)) def _get_queues_list(): if queues_finder: result = {} for entry in queues_finder.discover(): result[entry.pop('name')] = entry return result self._broker_queue = config.get_plugin('queue broker', 'UserBroker', cls=Broker, bind_kwargs={'inherit': True, 'tags': [self]}, pargs=('queue', 'queues', _get_queues_list)) self._scratch_path = config.get_list('scratch path', ['TMPDIR', '/tmp'], on_change=True) self._submit_opt_list = shlex.split(config.get('submit options', '', on_change=None)) self._memory = config.get_int('memory', -1, on_change=None)
def __init__(self, config, name, checkExecutor, cancelExecutor, jdlWriter=None): config.set('access token', 'VomsProxy') BasicWMS.__init__(self, config, name, checkExecutor=checkExecutor, cancelExecutor=cancelExecutor) self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls=Broker, tags=[self], pargs=('sites', 'sites', self.getSites)) self.vo = config.get('vo', self._token.getGroup()) self._submitParams = {} self._ce = config.get('ce', '', onChange=None) self._configVO = config.getPath('config', '', onChange=None) self._warnSBSize = config.getInt('warn sb size', 5, onChange=None) self._jobPath = config.getWorkPath('jobs') self._jdl_writer = jdlWriter or JDLWriter()
def __init__(self, config, name, submit_exec, output_exec, check_executor, cancel_executor, jdl_writer=None): config.set('access token', 'VomsProxy') BasicWMS.__init__(self, config, name, check_executor=check_executor, cancel_executor=cancel_executor) self._broker_site = config.get_plugin('site broker', 'UserBroker', cls=Broker, bind_kwargs={'tags': [self]}, pargs=('sites', 'sites', self._get_site_list)) self._vo = config.get('vo', self._token.get_group()) self._submit_exec = submit_exec self._submit_args_dict = {} self._output_exec = output_exec self._ce = config.get('ce', '', on_change=None) self._config_fn = config.get_path('config', '', on_change=None) self._sb_warn_size = config.get_int('warn sb size', 5, on_change=None) self._job_dn = config.get_work_path('jobs') self._jdl_writer = jdl_writer or JDLWriter()
def __init__(self, config, name, submitExec, checkExecutor, cancelExecutor, nodesFinder = None, queuesFinder = None): config.set('broker', 'RandomBroker') config.setInt('wait idle', 20) config.setInt('wait work', 5) self.submitExec = submitExec self._sandbox_helper = SandboxHelper(config) BasicWMS.__init__(self, config, name, checkExecutor = checkExecutor, cancelExecutor = CancelAndPurgeJobs(config, cancelExecutor, LocalPurgeJobs(config, self._sandbox_helper))) def getNodes(): if nodesFinder: return lmap(lambda x: x['name'], self._nodes_finder.discover()) self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker, inherit = True, tags = [self], pargs = ('sites', 'sites', getNodes)) def getQueues(): if queuesFinder: result = {} for entry in queuesFinder.discover(): result[entry.pop('name')] = entry return result self.brokerQueue = config.getPlugin('queue broker', 'UserBroker', cls = Broker, inherit = True, tags = [self], pargs = ('queue', 'queues', getQueues)) self.scratchPath = config.getList('scratch path', ['TMPDIR', '/tmp'], onChange = True) self.submitOpts = config.get('submit options', '', onChange = None) self.memory = config.getInt('memory', -1, onChange = None)
def __init__(self, config, name): self._sandbox_helper = SandboxHelper(config) self._error_log_fn = config.get_work_path('error.tar') cancel_executor = CancelAndPurgeJobs(config, CondorCancelJobs(config), LocalPurgeJobs(config, self._sandbox_helper)) BasicWMS.__init__(self, config, name, check_executor=CheckJobsMissingState(config, CondorCheckJobs(config)), cancel_executor=cancel_executor) self._task_id = config.get('task id', md5_hex(str(time.time())), persistent=True) # FIXME # finalize config state by reading values or setting to defaults # load keys for condor pool ClassAds self._jdl_writer = CondorJDLWriter(config) self._universe = config.get('universe', 'vanilla', on_change=None) self._pool_req_dict = config.get_dict('poolArgs req', {})[0] self._pool_work_dn = None self._proc_factory = None (self._submit_exec, self._transfer_exec) = (None, None) # prepare interfaces for local/remote/ssh pool access self._remote_type = config.get_enum('remote Type', PoolType, PoolType.LOCAL) self._init_pool_interface(config) # Sandbox base path where individual job data is stored, staged and returned to self._sandbox_dn = config.get_path('sandbox path', config.get_work_path('sandbox'), must_exist=False) # broker for selecting sites - FIXME: this looks wrong... pool != site self._pool_host_list = config.get_list(['poolhostlist', 'pool host list'], []) self._broker_site = config.get_plugin('site broker', 'UserBroker', cls=Broker, bind_kwargs={'tags': [self]}, pargs=('sites', 'sites', lambda: self._pool_host_list)) self._wall_time_mode = config.get_enum('wall time mode', WallTimeMode, WallTimeMode.ignore, subset=[WallTimeMode.hard, WallTimeMode.ignore])
def __init__(self, config, name, submit_exec, check_executor, cancel_executor, nodes_finder=None, queues_finder=None): config.set('broker', 'RandomBroker') config.set_int('wait idle', 20) config.set_int('wait work', 5) self._submit_exec = submit_exec self._sandbox_helper = SandboxHelper(config) BasicWMS.__init__(self, config, name, check_executor=check_executor, cancel_executor=CancelAndPurgeJobs( config, cancel_executor, LocalPurgeJobs(config, self._sandbox_helper))) def _get_nodes_list(): if nodes_finder: return lmap(lambda x: x['name'], nodes_finder.discover()) self._broker_site = config.get_plugin('site broker', 'UserBroker', cls=Broker, bind_kwargs={ 'inherit': True, 'tags': [self] }, pargs=('sites', 'sites', _get_nodes_list)) def _get_queues_list(): if queues_finder: result = {} for entry in queues_finder.discover(): result[entry.pop('name')] = entry return result self._broker_queue = config.get_plugin('queue broker', 'UserBroker', cls=Broker, bind_kwargs={ 'inherit': True, 'tags': [self] }, pargs=('queue', 'queues', _get_queues_list)) self._scratch_path = config.get_list('scratch path', ['TMPDIR', '/tmp'], on_change=True) self._submit_opt_list = shlex.split( config.get('submit options', '', on_change=None)) self._memory = config.get_int('memory', -1, on_change=None)
def __init__(self, config, name): self._sandbox_helper = SandboxHelper(config) BasicWMS.__init__(self, config, name, checkExecutor = CheckJobsMissingState(config, Condor_CheckJobs(config)), cancelExecutor = CancelAndPurgeJobs(config, Condor_CancelJobs(config), LocalPurgeJobs(config, self._sandbox_helper))) # special debug out/messages/annotations - may have noticeable effect on storage and performance! debugLogFN = config.get('debugLog', '') self.debug = False if debugLogFN: self.debug = open(debugLogFN, 'a') ###### try: random_task_id = md5(str(time.time())).hexdigest() except TypeError: random_task_id = md5(str(time.time()).encode()).hexdigest() self.taskID = config.get('task id', random_task_id, persistent = True) # FIXME! self.debugOut(""" ############################# Initialized Condor/GlideInWMS ############################# Config: %s taskID: %s Name: %s ############################# """ % (config.getConfigName(), self.taskID, name)) # finalize config state by reading values or setting to defaults self.settings={ 'jdl': { 'Universe' : config.get('Universe', 'vanilla'), 'NotifyEmail' : config.get('NotifyEmail', ''), 'ClassAdData' : config.getList('ClassAdData',[]), 'JDLData' : config.getList('JDLData',[]) }, 'pool' : { 'hosts' : config.getList('PoolHostList',[]) } } # prepare interfaces for local/remote/ssh pool access self._initPoolInterfaces(config) # load keys for condor pool ClassAds self.poolReqs = config.getDict('poolArgs req', {})[0] self.poolQuery = config.getDict('poolArgs query', {})[0] # Sandbox base path where individual job data is stored, staged and returned to self.sandPath = config.getPath('sandbox path', config.getWorkPath('sandbox'), mustExist = False) # history query is faster with split files - check if and how this is used # default condor_history command works WITHOUT explicitly specified file self.historyFile = None if self.remoteType == PoolType.LOCAL and getoutput( self.configValExec + ' ENABLE_HISTORY_ROTATION').lower() == 'true': self.historyFile = getoutput( self.configValExec + ' HISTORY') if not os.path.isfile(self.historyFile): self.historyFile = None # broker for selecting Sites self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker, tags = [self], pargs = ('sites', 'sites', self.getSites)) self.debugFlush()
def __init__(self, config, wmsName): self._initLogger() BasicWMS.__init__(self, config, wmsName) # setup the connection to pools and their interfaces self._sandboxDir = config.getPath('sandbox path', config.getWorkPath('sandbox.%s'%wmsName), mustExist = False) self._initPoolInterfaces(config) self._jobSettings = { "Universe" : config.get("universe", "vanilla"), "ClassAd" : config.getList("append info", []), "JDL" : config.getList("append opts", []), }
def __init__(self, config, name): self._sandbox_helper = SandboxHelper(config) BasicWMS.__init__(self, config, name, checkExecutor = CheckJobsMissingState(config, Condor_CheckJobs(config)), cancelExecutor = CancelAndPurgeJobs(config, Condor_CancelJobs(config), LocalPurgeJobs(config, self._sandbox_helper))) # special debug out/messages/annotations - may have noticeable effect on storage and performance! debugLogFN = config.get('debugLog', '') self.debug = False if debugLogFN: self.debug = open(debugLogFN, 'a') ###### self.taskID = config.get('task id', md5(str(time.time())).hexdigest(), persistent = True) # FIXME! self.debugOut(""" ############################# Initialized Condor/GlideInWMS ############################# Config: %s taskID: %s Name: %s ############################# """ % (config.getConfigName(), self.taskID, name)) # finalize config state by reading values or setting to defaults self.settings={ 'jdl': { 'Universe' : config.get('Universe', 'vanilla'), 'NotifyEmail' : config.get('NotifyEmail', ''), 'ClassAdData' : config.getList('ClassAdData',[]), 'JDLData' : config.getList('JDLData',[]) }, 'pool' : { 'hosts' : config.getList('PoolHostList',[]) } } # prepare interfaces for local/remote/ssh pool access self._initPoolInterfaces(config) # load keys for condor pool ClassAds self.poolReqs = config.getDict('poolArgs req', {})[0] self.poolQuery = config.getDict('poolArgs query', {})[0] # Sandbox base path where individual job data is stored, staged and returned to self.sandPath = config.getPath('sandbox path', config.getWorkPath('sandbox'), mustExist = False) # history query is faster with split files - check if and how this is used # default condor_history command works WITHOUT explicitly specified file self.historyFile = None if self.remoteType == PoolType.LOCAL and getoutput( self.configValExec + ' ENABLE_HISTORY_ROTATION').lower() == 'true': self.historyFile = getoutput( self.configValExec + ' HISTORY') if not os.path.isfile(self.historyFile): self.historyFile = None # broker for selecting Sites self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker, tags = [self], pargs = ('sites', 'sites', self.getSites)) self.debugFlush()
def __init__(self, config, wmsName): utils.vprint('Using batch system: Condor/GlideInWMS', -1) BasicWMS.__init__(self, config, wmsName) # special debug out/messages/annotations - may have noticeable effect on storage and performance! if config.get("debugLog", ""): self.debug=open(config.get("debugLog", ""),'a') else: self.debug=False ###### self.taskID = config.get('task id', md5(str(time.time())).hexdigest(), persistent = True) # FIXME! self.debugOut(""" ############################# Initialized Condor/GlideInWMS ############################# Config: %s taskID: %s Name: %s ############################# """%(config.getConfigName(),self.taskID,wmsName)) # finalize config state by reading values or setting to defaults self.settings={ "jdl": { "Universe" : config.get("Universe", "vanilla"), "NotifyEmail" : config.get("NotifyEmail", ""), "ClassAdData" : config.getList("ClassAdData",[]), "JDLData" : config.getList("JDLData",[]) }, "pool" : { "hosts" : config.getList("PoolHostList",[]) } } # prepare interfaces for local/remote/ssh pool access self._initPoolInterfaces(config) # load keys for condor pool ClassAds self.poolReqs = config.getDict('poolArgs req', {})[0] self.poolQuery = config.getDict('poolArgs query', {})[0] self._formatStatusReturnQuery(config) # Sandbox base path where individual job data is stored, staged and returned to self.sandPath = config.getPath('sandbox path', config.getWorkPath('sandbox'), mustExist = False) # history query is faster with split files - check if and how this is used # default condor_history command works WITHOUT explicitly specified file self.historyFile = None if self.remoteType == poolType.LOCAL and commands.getoutput( self.configValExec + " ENABLE_HISTORY_ROTATION").lower() == "true": self.historyFile = commands.getoutput( self.configValExec + " HISTORY") if not os.path.isfile(self.historyFile): self.historyFile = None # broker for selecting Sites self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker, tags = [self]).getInstance('sites', 'sites', self.getSites) self.debugFlush()
def __init__(self, config, name): config.set('access token', 'VomsProxy') BasicWMS.__init__(self, config, name) self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker, tags = [self]).getInstance('sites', 'sites', self.getSites) self.vo = config.get('vo', self._token.getGroup()) self._submitParams = {} self._ce = config.get('ce', '', onChange = None) self._configVO = config.getPath('config', '', onChange = None) self._warnSBSize = config.getInt('warn sb size', 5 * 1024 * 1024) self._jobPath = config.getWorkPath('jobs')
def __init__(self, config, name, checkExecutor, cancelExecutor, jdlWriter = None): config.set('access token', 'VomsProxy') BasicWMS.__init__(self, config, name, checkExecutor = checkExecutor, cancelExecutor = cancelExecutor) self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker, tags = [self], pargs = ('sites', 'sites', self.getSites)) self.vo = config.get('vo', self._token.getGroup()) self._submitParams = {} self._ce = config.get('ce', '', onChange = None) self._configVO = config.getPath('config', '', onChange = None) self._warnSBSize = config.getInt('warn sb size', 5, onChange = None) self._jobPath = config.getWorkPath('jobs') self._jdl_writer = jdlWriter or JDLWriter()
def _getSandboxFiles(self, module, monitor, smList): files = BasicWMS._getSandboxFiles(self, module, monitor, smList) for idx, authFile in enumerate(self._token.getAuthFiles()): files.append( VirtualFile(('_proxy.dat.%d' % idx).replace('.0', ''), open(authFile, 'r').read())) return files
def _get_sandbox_file_list(self, task, sm_list): files = BasicWMS._get_sandbox_file_list(self, task, sm_list) for idx, auth_fn in enumerate(self._token.get_auth_fn_list()): files.append( VirtualFile(('_proxy.dat.%d' % idx).replace('.0', ''), open(auth_fn, 'r').read())) return files
def __init__(self, config, name, submitExec, statusExec, cancelExec): config.set('broker', 'RandomBroker') config.setInt('wait idle', 20) config.setInt('wait work', 5) (self.submitExec, self.statusExec, self.cancelExec) = (submitExec, statusExec, cancelExec) BasicWMS.__init__(self, config, name) self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker, inherit = True, tags = [self], pargs = ('sites', 'sites', self.getNodes)) self.brokerQueue = config.getPlugin('queue broker', 'UserBroker', cls = Broker, inherit = True, tags = [self], pargs = ('queue', 'queues', self.getQueues)) self.sandCache = [] self.sandPath = config.getPath('sandbox path', config.getWorkPath('sandbox'), mustExist = False) self.scratchPath = config.getList('scratch path', ['TMPDIR', '/tmp'], onChange = True) self.submitOpts = config.get('submit options', '', onChange = None) self.memory = config.getInt('memory', -1, onChange = None)
def __init__(self, config, name): config.set('access token', 'VomsProxy') BasicWMS.__init__(self, config, name) self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls=Broker, tags=[self], pargs=('sites', 'sites', self.getSites)) self.vo = config.get('vo', self._token.getGroup()) self._submitParams = {} self._ce = config.get('ce', '', onChange=None) self._configVO = config.getPath('config', '', onChange=None) self._warnSBSize = config.getInt('warn sb size', 5 * 1024 * 1024) self._jobPath = config.getWorkPath('jobs')
def __init__(self, config, name, submit_exec, output_exec, check_executor, cancel_executor, jdl_writer=None): config.set('access token', 'VomsProxy') BasicWMS.__init__(self, config, name, check_executor=check_executor, cancel_executor=cancel_executor) self._broker_site = config.get_plugin('site broker', 'UserBroker', cls=Broker, bind_kwargs={'tags': [self]}, pargs=('sites', 'sites', self._get_site_list)) self._vo = config.get('vo', self._token.get_group()) self._submit_exec = submit_exec self._submit_args_dict = {} self._output_exec = output_exec self._ce = config.get('ce', '', on_change=None) self._config_fn = config.get_path('config', '', on_change=None) self._sb_warn_size = config.get_int('warn sb size', 5, on_change=None) self._job_dn = config.get_work_path('jobs') self._jdl_writer = jdl_writer or JDLWriter()
def __init__(self, config, name, submitExec, statusExec, cancelExec): config.set('broker', 'RandomBroker') config.setInt('wait idle', 20) config.setInt('wait work', 5) (self.submitExec, self.statusExec, self.cancelExec) = (submitExec, statusExec, cancelExec) BasicWMS.__init__(self, config, name) self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker, inherit = True, tags = [self], pargs = ('sites', 'sites', self.getNodes)) self.brokerQueue = config.getPlugin('queue broker', 'UserBroker', cls = Broker, inherit = True, tags = [self], pargs = ('queue', 'queues', self.getQueues)) self.sandCache = [] self.sandPath = config.getPath('sandbox path', config.getWorkPath('sandbox'), mustExist = False) self.scratchPath = config.getList('scratch path', ['TMPDIR', '/tmp'], onChange = True) self.submitOpts = config.get('submit options', '', onChange = None) self.memory = config.getInt('memory', -1, onChange = None) try: if not os.path.exists(self.sandPath): os.mkdir(self.sandPath) except Exception: raise BackendError('Unable to create sandbox base directory "%s"!' % self.sandPath)
def __init__(self, config, name, submitExec, statusExec, cancelExec): config.set('broker', 'RandomBroker') config.setInt('wait idle', 20) config.setInt('wait work', 5) (self.submitExec, self.statusExec, self.cancelExec) = (submitExec, statusExec, cancelExec) BasicWMS.__init__(self, config, name) self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls=Broker, inherit=True, tags=[self], pargs=('sites', 'sites', self.getNodes)) self.brokerQueue = config.getPlugin('queue broker', 'UserBroker', cls=Broker, inherit=True, tags=[self], pargs=('queue', 'queues', self.getQueues)) self.sandCache = [] self.sandPath = config.getPath('sandbox path', config.getWorkPath('sandbox'), mustExist=False) self.scratchPath = config.getList('scratch path', ['TMPDIR', '/tmp'], onChange=True) self.submitOpts = config.get('submit options', '', onChange=None) self.memory = config.getInt('memory', -1, onChange=None) try: if not os.path.exists(self.sandPath): os.mkdir(self.sandPath) except Exception: raise BackendError( 'Unable to create sandbox base directory "%s"!' % self.sandPath)
def _getSandboxFiles(self, module, monitor, smList): files = BasicWMS._getSandboxFiles(self, module, monitor, smList) for idx, authFile in enumerate(self._token.getAuthFiles()): files.append(VirtualFile(('_proxy.dat.%d' % idx).replace('.0', ''), open(authFile, 'r').read())) return files
def _get_sandbox_file_list(self, task, sm_list): files = BasicWMS._get_sandbox_file_list(self, task, sm_list) for idx, auth_fn in enumerate(self._token.get_auth_fn_list()): files.append(VirtualFile(('_proxy.dat.%d' % idx).replace('.0', ''), open(auth_fn, 'r').read())) return files