def __init__(self, config, name): self._sandbox_helper = SandboxHelper(config) self._error_log_fn = config.get_work_path('error.tar') cancel_executor = CancelAndPurgeJobs(config, CondorCancelJobs(config), LocalPurgeJobs(config, self._sandbox_helper)) BasicWMS.__init__(self, config, name, check_executor=CheckJobsMissingState(config, CondorCheckJobs(config)), cancel_executor=cancel_executor) self._task_id = config.get('task id', md5_hex(str(time.time())), persistent=True) # FIXME # finalize config state by reading values or setting to defaults # load keys for condor pool ClassAds self._jdl_writer = CondorJDLWriter(config) self._universe = config.get('universe', 'vanilla', on_change=None) self._pool_req_dict = config.get_dict('poolArgs req', {})[0] self._pool_work_dn = None self._proc_factory = None (self._submit_exec, self._transfer_exec) = (None, None) # prepare interfaces for local/remote/ssh pool access self._remote_type = config.get_enum('remote Type', PoolType, PoolType.LOCAL) self._init_pool_interface(config) # Sandbox base path where individual job data is stored, staged and returned to self._sandbox_dn = config.get_path('sandbox path', config.get_work_path('sandbox'), must_exist=False) # broker for selecting sites - FIXME: this looks wrong... pool != site self._pool_host_list = config.get_list(['poolhostlist', 'pool host list'], []) self._broker_site = config.get_plugin('site broker', 'UserBroker', cls=Broker, bind_kwargs={'tags': [self]}, pargs=('sites', 'sites', lambda: self._pool_host_list)) self._wall_time_mode = config.get_enum('wall time mode', WallTimeMode, WallTimeMode.ignore, subset=[WallTimeMode.hard, WallTimeMode.ignore]) self._blacklist_nodes = config.get_list(['blacklist nodes'], [], on_change=None) self._user_requirements = config.get('user requirements', '', on_change=None)
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec=utils.pathShare('gc-host.sh'), checkExecutor=CheckJobsMissingState( config, Host_CheckJobs(config)), cancelExecutor=Host_CancelJobs(config))
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submit_exec=get_path_share('gc-host.sh'), check_executor=CheckJobsMissingState( config, HostCheckJobs(config)), cancel_executor=HostCancelJobs(config))
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec=utils.resolveInstallPath('bsub'), cancelExecutor=LSF_CancelJobs(config), checkExecutor=CheckJobsMissingState( config, LSF_CheckJobs(config)))
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submit_exec=resolve_install_path('sbatch'), check_executor=CheckJobsMissingState( config, SLURMCheckJobs(config)), cancel_executor=CancelJobsWithProcessBlind( config, 'scancel', unknown_id='not in queue !'))
def __init__(self, config, name): self._sandbox_helper = SandboxHelper(config) BasicWMS.__init__(self, config, name, checkExecutor = CheckJobsMissingState(config, Condor_CheckJobs(config)), cancelExecutor = CancelAndPurgeJobs(config, Condor_CancelJobs(config), LocalPurgeJobs(config, self._sandbox_helper))) # special debug out/messages/annotations - may have noticeable effect on storage and performance! debugLogFN = config.get('debugLog', '') self.debug = False if debugLogFN: self.debug = open(debugLogFN, 'a') ###### try: random_task_id = md5(str(time.time())).hexdigest() except TypeError: random_task_id = md5(str(time.time()).encode()).hexdigest() self.taskID = config.get('task id', random_task_id, persistent = True) # FIXME! self.debugOut(""" ############################# Initialized Condor/GlideInWMS ############################# Config: %s taskID: %s Name: %s ############################# """ % (config.getConfigName(), self.taskID, name)) # finalize config state by reading values or setting to defaults self.settings={ 'jdl': { 'Universe' : config.get('Universe', 'vanilla'), 'NotifyEmail' : config.get('NotifyEmail', ''), 'ClassAdData' : config.getList('ClassAdData',[]), 'JDLData' : config.getList('JDLData',[]) }, 'pool' : { 'hosts' : config.getList('PoolHostList',[]) } } # prepare interfaces for local/remote/ssh pool access self._initPoolInterfaces(config) # load keys for condor pool ClassAds self.poolReqs = config.getDict('poolArgs req', {})[0] self.poolQuery = config.getDict('poolArgs query', {})[0] # Sandbox base path where individual job data is stored, staged and returned to self.sandPath = config.getPath('sandbox path', config.getWorkPath('sandbox'), mustExist = False) # history query is faster with split files - check if and how this is used # default condor_history command works WITHOUT explicitly specified file self.historyFile = None if self.remoteType == PoolType.LOCAL and getoutput( self.configValExec + ' ENABLE_HISTORY_ROTATION').lower() == 'true': self.historyFile = getoutput( self.configValExec + ' HISTORY') if not os.path.isfile(self.historyFile): self.historyFile = None # broker for selecting Sites self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker, tags = [self], pargs = ('sites', 'sites', self.getSites)) self.debugFlush()
def __init__(self, config, name): cancelExecutor = CancelJobsWithProcessBlind( config, 'qdel', fmt=lambda wmsIDs: lmap(self._fqid, wmsIDs), unknownID='Unknown Job Id') PBSGECommon.__init__(self, config, name, cancelExecutor=cancelExecutor, checkExecutor=CheckJobsMissingState( config, PBS_CheckJobs(config, self._fqid))) self._nodes_finder = PBS_Discover_Nodes(config) self._queues_finder = PBS_Discover_Queues(config) self._server = config.get('server', '', onChange=None)
def __init__(self, config, name): cancel_executor = CancelJobsWithProcessBlind( config, 'qdel', fmt=lambda wms_id_list: lmap(self._fqid, wms_id_list), unknown_id='Unknown Job Id') PBSGECommon.__init__(self, config, name, cancel_executor=cancel_executor, check_executor=CheckJobsMissingState( config, PBSCheckJobs(config, self._fqid)), nodes_finder=PBSDiscoverNodes(config), queues_finder=PBSDiscoverQueues(config)) self._server = config.get('server', '', on_change=None)
def __init__(self, config, name): cancelExecutor = CancelJobsWithProcessBlind( config, 'qdel', fmt=lambda wmsIDs: [str.join(',', wmsIDs)], unknownID=['Unknown Job Id']) PBSGECommon.__init__(self, config, name, cancelExecutor=cancelExecutor, checkExecutor=CheckJobsMissingState( config, GridEngine_CheckJobs(config)), nodesFinder=GridEngine_Discover_Nodes(config), queuesFinder=GridEngine_Discover_Queues(config)) self._project = config.get('project name', '', onChange=None) self._configExec = utils.resolveInstallPath('qconf')
def __init__(self, config, name): cancel_executor = CancelJobsWithProcessBlind( config, 'qdel', fmt=lambda wms_id_list: [str.join(',', wms_id_list)], unknown_id='Unknown Job Id') PBSGECommon.__init__(self, config, name, cancel_executor=cancel_executor, check_executor=CheckJobsMissingState( config, GridEngineCheckJobs(config)), nodes_finder=GridEngineDiscoverNodes(config), queues_finder=GridEngineDiscoverQueues(config)) self._project = config.get('project name', '', on_change=None) self._config_exec = resolve_install_path('qconf')
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec = utils.resolveInstallPath('job_submit'), checkExecutor = CheckJobsMissingState(config, JMS_CheckJobs(config)), cancelExecutor = CancelJobsWithProcessBlind(config, 'job_cancel', unknownID = 'not in queue !'))