def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec=utils.pathShare('gc-host.sh'), statusExec=utils.resolveInstallPath('ps'), cancelExec=utils.resolveInstallPath('kill'))
def __init__(self, config): self.statePath = config.getWorkPath('glitewms.info') (self.wms_ok, self.wms_all, self.pingDict, self.pos) = self.loadState() self.wms_timeout = {} self._full = config.getBool('wms discover full', True, onChange = None) self._exeLCGInfoSites = utils.resolveInstallPath('lcg-infosites') self._exeGliteWMSJobListMatch = utils.resolveInstallPath('glite-wms-job-list-match')
def _initInterfaces(self, **kwargs): def makeArgList(*args): argList = [] for arg in args: try: if isinstance(arg, str): raise argList.extend(arg) except Exception: argList.append(arg) return [arg for arg in argList if arg] portArgs = lambda key: self._port and "-%s%s" % (key, self._port) or "" self._exeWrapper = CommandContainer( resolveInstallPath("ssh"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getValidSocketArgs(), portArgs('p'), self._host, " ".join( (kwargs["command"], kwargs.get("args", '')))), lambda **kwargs: "'%(command)s' [via ssh %(URI)s]" % { "command": kwargs.get("command", "<undefined command>"), "URI": self.URI, }, lambda **kwargs: kwargs.get( 'args') and "Arguments: '%s'" % kwargs.get('args') or '') self._copy = CommandContainer( resolveInstallPath("scp"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getValidSocketArgs(), "-r", portArgs('P'), kwargs["source"], portArgs('P'), kwargs["destination"], ), lambda **kwargs: "'scp' [%(URI)s]", lambda **kwargs: "Transfer: '%(source)' -> '%(destination)'" % kwargs, ) self._delete = CommandContainer( resolveInstallPath("ssh"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getValidSocketArgs(), portArgs('p'), self._host, "rm -rf " + kwargs["target"], ), lambda **kwargs: "'rm' [via ssh %(URI)s]" % kwargs, lambda **kwargs: "Target: '%(target)s'" % kwargs, ) self._socketWrapper = CommandContainer( resolveInstallPath("ssh"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getCurrentSocketArgs(), portArgs('p'), self._host, " ".join( (kwargs["command"], kwargs.get("args", '')))), lambda **kwargs: "'%(command)s' [via ssh %(URI)s (master)]" % { "command": kwargs.get("command", "<undefined command>"), "URI": self.URI, }, lambda **kwargs: kwargs.get( 'args') and "Arguments: '%s'" % kwargs.get('args') or '')
def __init__(self, config, name): cancelExecutor = CancelAndPurgeJobs(config, CREAM_CancelJobs(config), CREAM_PurgeJobs(config)) GridWMS.__init__(self, config, name, checkExecutor=CREAM_CheckJobs(config), cancelExecutor=ChunkedExecutor( config, 'cancel', cancelExecutor)) self._nJobsPerChunk = config.getInt('job chunk size', 10, onChange=None) self._submitExec = utils.resolveInstallPath('glite-ce-job-submit') self._outputExec = utils.resolveInstallPath('glite-ce-job-output') self._submitParams.update({ '-r': self._ce, '--config-vo': self._configVO }) self._outputRegex = r'.*For JobID \[(?P<rawId>\S+)\] output will be stored in the dir (?P<outputDir>.*)$' self._useDelegate = False if self._useDelegate is False: self._submitParams.update({'-a': ' '})
def __init__(self, config, wmsName=None): LocalWMS.__init__(self, config, wmsName, submitExec=utils.resolveInstallPath('sbatch'), statusExec=utils.resolveInstallPath('sacct'), cancelExec=utils.resolveInstallPath('scancel'))
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec=utils.resolveInstallPath('job_submit'), statusExec=utils.resolveInstallPath('job_queue'), cancelExec=utils.resolveInstallPath('job_cancel'))
def __new__(cls, config, name): def createWMS(wms): try: wmsCls = WMS.getClass(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) wms_config = config.changeView(viewClass='TaggedConfigView', setClasses=[wmsCls]) return WMS.createInstance(wms, wms_config, name) wms = config.get('wms', '') if wms: return createWMS(wms) ec = ExceptionCollector() for cmd, wms in [('sacct', 'SLURM'), ('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('bsub', 'LSF'), ('job_slurm', 'JMS')]: try: utils.resolveInstallPath(cmd) except Exception: ec.collect() continue return createWMS(wms) ec.raise_any(BackendError('No valid local backend found!') ) # at this point all backends have failed!
def _initInterfaces(self, **kwargs): self._exeWrapper = CommandContainer( resolveInstallPath("gsissh"), lambda **kwargs: "%(port)s %(sshargs)s %(socketArgs)s %(host)s %(payload)s" % { "port": (self._port and "-p" + self._port or ""), "sshargs": self._getDefaultArgs(), "socketArgs": self._getValidSocketArgs(), "host": self._host, "payload": self._wrapPayload(kwargs["command"] + " " + kwargs.get( "args", '')) }, lambda **kwargs: "%(command)s via adapter gsissh [URI %(URI)s]" % { "command": kwargs.get("command", "<undefined command>"), "URI": self.URI, }, ) self._copy = CommandContainer( resolveInstallPath("gsiscp"), lambda **kwargs: "%(sshargs)s %(socketArgs)s -r %(port)s %(source)s %(port)s %(destination)s" % { "port": (self._port and "-P" + self._port or ""), "sshargs": self._getDefaultArgs(), "socketArgs": self._getValidSocketArgs(), "source": kwargs["source"], "destination": kwargs["destination"], }, lambda **kwargs: "gsiscp") self._delete = CommandContainer( resolveInstallPath("gsissh"), lambda **kwargs: "%(port)s %(sshargs)s %(socketArgs)s %(payload)s" % { "port": (self._port and "-p" + self._port or ""), "sshargs": self._getDefaultArgs(), "socketArgs": self._getValidSocketArgs(), "payload": self._wrapPayload("rm -rf " + kwargs["target"]) }, lambda **kwargs: "'rm' via gsissh") self._socketWrapper = CommandContainer( resolveInstallPath("gsissh"), lambda **kwargs: "%(port)s %(sshargs)s %(socketArgs)s %(host)s %(payload)s" % { "port": (self._port and "-p" + self._port or ""), "sshargs": self._getDefaultArgs(), "socketArgs": self._getCurrentSocketArgs(), "host": self._host, "payload": self._wrapPayload(kwargs["command"] + " " + kwargs.get( "args", '')) }, lambda **kwargs: "%(command)s via adapter gsissh (master) [URI %(URI)s]" % { "command": kwargs.get("command", "<undefined command>"), "URI": self.URI, }, )
def _initPoolInterfaces(self, config): # check submissal type self.remoteType = config.getEnum('remote Type', PoolType, PoolType.LOCAL) self.debugOut("Selected pool type: %s" % PoolType.enum2str(self.remoteType)) # get remote destination features user,sched,collector = self._getDestination(config) nice_user = user or "<local default>" nice_sched = sched or "<local default>" nice_collector = collector or "<local default>" self.debugOut("Destination:\n") self.debugOut("\tuser:%s @ sched:%s via collector:%s" % (nice_user, nice_sched, nice_collector)) # prepare commands appropriate for pool type if self.remoteType == PoolType.LOCAL or self.remoteType == PoolType.SPOOL: self.user=user self.Pool=self.Pool=ProcessHandler.createInstance("LocalProcessHandler") # local and remote use condor tools installed locally - get them self.submitExec = utils.resolveInstallPath('condor_submit') self.historyExec = utils.resolveInstallPath('condor_history') # completed/failed jobs are stored outside the queue self.cancelExec = utils.resolveInstallPath('condor_rm') self.transferExec = utils.resolveInstallPath('condor_transfer_data') # submission might spool to another schedd and need to fetch output self.configValExec = utils.resolveInstallPath('condor_config_val') # service is better when being able to adjust to pool settings if self.remoteType == PoolType.SPOOL: # remote requires adding instructions for accessing remote pool self.submitExec+= " %s %s" % (utils.QM(sched,"-remote %s"%sched,""),utils.QM(collector, "-pool %s"%collector, "")) self.historyExec = "false" # disabled for this type self.cancelExec+= " %s %s" % (utils.QM(sched,"-name %s"%sched,""),utils.QM(collector, "-pool %s"%collector, "")) self.transferExec+= " %s %s" % (utils.QM(sched,"-name %s"%sched,""),utils.QM(collector, "-pool %s"%collector, "")) else: # ssh type instructions are passed to the remote host via regular ssh/gsissh host="%s%s"%(utils.QM(user,"%s@" % user,""), sched) if self.remoteType == PoolType.SSH: self.Pool=ProcessHandler.createInstance("SSHProcessHandler", remoteHost = host, sshLink = config.getWorkPath(".ssh", self._name + host)) else: self.Pool=ProcessHandler.createInstance("GSISSHProcessHandler", remoteHost = host, sshLink = config.getWorkPath(".gsissh", self._name + host)) # ssh type instructions rely on commands being available on remote pool self.submitExec = 'condor_submit' self.historyExec = 'condor_history' self.cancelExec = 'condor_rm' self.transferExec = "false" # disabled for this type self.configValExec = 'condor_config_val' # test availability of commands testProcess=self.Pool.LoggedExecute("condor_version") self.debugOut("*** Testing remote connectivity:\n%s"%testProcess.cmd) if testProcess.wait()!=0: testProcess.logError(self.errorLog) raise BackendError("Failed to access remote Condor tools! The pool you are submitting to is very likely not configured properly.") # get initial workdir on remote pool remote_workdir = config.get("remote workdir", '') if remote_workdir: uName = self.Pool.LoggedExecute("whoami").getOutput().strip() self.poolWorkDir = os.path.join(remote_workdir, uName) pwdProcess = self.Pool.LoggedExecute("mkdir -p %s" % self.poolWorkDir ) else: pwdProcess=self.Pool.LoggedExecute("pwd") self.poolWorkDir=pwdProcess.getOutput().strip() if pwdProcess.wait()!=0: self._log.critical("Code: %d\nOutput Message: %s\nError Message: %s", pwdProcess.wait(), pwdProcess.getOutput(), pwdProcess.getError()) raise BackendError("Failed to determine, create or verify base work directory on remote host")
def __init__(self, config, name): GridWMS.__init__(self, config, name) self._submitExec = utils.resolveInstallPath('edg-job-submit') self._statusExec = utils.resolveInstallPath('edg-job-status') self._outputExec = utils.resolveInstallPath('edg-job-get-output') self._cancelExec = utils.resolveInstallPath('edg-job-cancel') self._submitParams.update({'-r': self._ce, '--config-vo': self._configVO })
def __new__(cls, config, name): for cmd, wms in [('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('bsub', 'LSF'), ('job_slurm', 'SLURM')]: try: utils.resolveInstallPath(cmd) return WMS.getInstance(wms, config, name) except Exception: pass return WMS.getInstance('PBS', config, name)
def __init__(self, config, name): RefreshableAccessToken.__init__(self, config, name) self._kinitExec = utils.resolveInstallPath('kinit') self._klistExec = utils.resolveInstallPath('klist') self._cache = None self._authFiles = dict(imap(lambda name: (name, config.getWorkPath('proxy.%s' % name)), ['KRB5CCNAME', 'KRBTKFILE'])) self._backupTickets(config) self._tickets = config.getList('tickets', [], onChange = None)
def __init__(self, config, name): utils.deprecated("Please use the GliteWMS backend for grid jobs!") GridWMS.__init__(self, config, name) self._submitExec = utils.resolveInstallPath("glite-job-submit") self._statusExec = utils.resolveInstallPath("glite-job-status") self._outputExec = utils.resolveInstallPath("glite-job-output") self._cancelExec = utils.resolveInstallPath("glite-job-cancel") self._submitParams.update({"-r": self._ce, "--config-vo": self._configVO})
def __init__(self, config, name): GridWMS.__init__(self, config, name, checkExecutor = Grid_CheckJobs(config, 'edg-job-status'), cancelExecutor = Grid_CancelJobs(config, 'edg-job-cancel'), jdlWriter = EDGJDL()) self._submitExec = utils.resolveInstallPath('edg-job-submit') self._outputExec = utils.resolveInstallPath('edg-job-get-output') self._submitParams.update({'-r': self._ce, '--config-vo': self._configVO })
def __init__(self, config, name): utils.deprecated('Please use the GliteWMS backend for grid jobs!') GridWMS.__init__(self, config, name, checkExecutor = Grid_CheckJobs(config, 'glite-job-status'), cancelExecutor = Grid_CancelJobs(config, 'glite-job-cancel')) self._submitExec = utils.resolveInstallPath('glite-job-submit') self._outputExec = utils.resolveInstallPath('glite-job-output') self._submitParams.update({'-r': self._ce, '--config-vo': self._configVO })
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec = utils.resolveInstallPath('qsub'), statusExec = utils.resolveInstallPath('qstat'), cancelExec = utils.resolveInstallPath('qdel')) self._shell = config.get('shell', '', onChange = None) self._account = config.get('account', '', onChange = None) self._delay = config.getBool('delay output', False, onChange = None) self._softwareMap = config.getDict('software requirement map', {}, onChange = None)
def __init__(self, config, name): utils.deprecated('Please use the GliteWMS backend for grid jobs!') GridWMS.__init__(self, config, name) self._submitExec = utils.resolveInstallPath('glite-job-submit') self._statusExec = utils.resolveInstallPath('glite-job-status') self._outputExec = utils.resolveInstallPath('glite-job-output') self._cancelExec = utils.resolveInstallPath('glite-job-cancel') self._submitParams.update({'-r': self._ce, '--config-vo': self._configVO })
def __init__(self, config, name): RefreshableAccessToken.__init__(self, config, name) self._kinitExec = utils.resolveInstallPath('kinit') self._klistExec = utils.resolveInstallPath('klist') self._cache = None self._authFiles = dict( imap(lambda name: (name, config.getWorkPath('proxy.%s' % name)), ['KRB5CCNAME', 'KRBTKFILE'])) self._backupTickets(config) self._tickets = config.getList('tickets', [], onChange=None)
def __init__(self, config, name): RefreshableProxy.__init__(self, config, name) self._kinitExec = utils.resolveInstallPath('kinit') self._klistExec = utils.resolveInstallPath('klist') self._aklogExec = utils.resolveInstallPath('aklog') self._cache = None self._proxyPaths = {} for name in ['KRB5CCNAME', 'KRBTKFILE']: self._proxyPaths[name] = config.getWorkPath('proxy.%s' % name) self._backupTickets() self._tickets = config.getList('tickets', [], onChange = None)
def __init__(self, config, name): GridWMS.__init__(self, config, name) self._submitExec = utils.resolveInstallPath('edg-job-submit') self._statusExec = utils.resolveInstallPath('edg-job-status') self._outputExec = utils.resolveInstallPath('edg-job-get-output') self._cancelExec = utils.resolveInstallPath('edg-job-cancel') self._submitParams.update({ '-r': self._ce, '--config-vo': self._configVO })
def _initInterfaces(self, **kwargs): try: copypath = resolveInstallPath("rsync") copynice = lambda **kwargs: "copy via rsync" except InstallationError: copypath = resolveInstallPath("cp") copynice = lambda **kwargs: "copy via cp" self._copy = CommandContainer( copypath, lambda **kwargs: "-r %s %s" % (kwargs['source'], kwargs['destination']), copynice) self._delete = CommandContainer( resolveInstallPath("rm"), lambda *kwargs: "-r " + kwargs['target'], lambda **kwargs: "rm")
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec=utils.resolveInstallPath('qsub'), statusExec=utils.resolveInstallPath('qstat'), cancelExec=utils.resolveInstallPath('qdel')) self._shell = config.get('shell', '', onChange=None) self._account = config.get('account', '', onChange=None) self._delay = config.getBool('delay output', False, onChange=None) self._softwareReqs = config.getLookup('software requirement map', {}, single=False, onChange=None)
def _initInterfaces(self, **kwargs): self._exeWrapper = CommandContainer( resolveInstallPath("gsissh"), lambda **kwargs: "%(port)s %(sshargs)s %(socketArgs)s %(host)s %(payload)s" % { "port" : (self._port and "-p"+self._port or ""), "sshargs" : self._getDefaultArgs(), "socketArgs" : self._getValidSocketArgs(), "host" : self._host, "payload" : self._wrapPayload(kwargs["command"] + " " + kwargs.get("args",'')) }, lambda **kwargs: "%(command)s via adapter gsissh [URI %(URI)s]" % { "command" : kwargs.get("command","<undefined command>"), "URI" : self.URI, }, ) self._copy = CommandContainer( resolveInstallPath("gsiscp"), lambda **kwargs: "%(sshargs)s %(socketArgs)s -r %(port)s %(source)s %(port)s %(destination)s" % { "port" : (self._port and "-P"+self._port or ""), "sshargs" : self._getDefaultArgs(), "socketArgs" : self._getValidSocketArgs(), "source" : kwargs["source"], "destination": kwargs["destination"], }, lambda **kwargs: "gsiscp" ) self._delete = CommandContainer( resolveInstallPath("gsissh"), lambda **kwargs: "%(port)s %(sshargs)s %(socketArgs)s %(payload)s" % { "port" : (self._port and "-p"+self._port or ""), "sshargs" : self._getDefaultArgs(), "socketArgs" : self._getValidSocketArgs(), "payload" : self._wrapPayload( "rm -rf " + kwargs["target"] ) }, lambda **kwargs: "'rm' via gsissh" ) self._socketWrapper = CommandContainer( resolveInstallPath("gsissh"), lambda **kwargs: "%(port)s %(sshargs)s %(socketArgs)s %(host)s %(payload)s" % { "port" : (self._port and "-p"+self._port or ""), "sshargs" : self._getDefaultArgs(), "socketArgs" : self._getCurrentSocketArgs(), "host" : self._host, "payload" : self._wrapPayload(kwargs["command"] + " " + kwargs.get("args",'')) }, lambda **kwargs: "%(command)s via adapter gsissh (master) [URI %(URI)s]" % { "command" : kwargs.get("command","<undefined command>"), "URI" : self.URI, }, )
def __new__(cls, config, name): ec = ExceptionCollector() for cmd, wms in [('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('bsub', 'LSF'), ('job_slurm', 'SLURM')]: try: utils.resolveInstallPath(cmd) except Exception: ec.collect() continue try: wmsCls = WMS.getClass(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) config_wms = config.changeView(viewClass = 'TaggedConfigView', setClasses = [wmsCls]) return WMS.createInstance(wms, config_wms, name) ec.raise_any(BackendError('No valid local backend found!')) # at this point all backends have failed!
def _initInterfaces(self, **kwargs): try: copypath=resolveInstallPath("rsync") copynice=lambda **kwargs: "copy via rsync" except InstallationError: copypath=resolveInstallPath("cp") copynice=lambda **kwargs: "copy via cp" self._copy = CommandContainer( copypath, lambda **kwargs: "-r %s %s" % (kwargs['source'], kwargs['destination']), copynice) self._delete = CommandContainer( resolveInstallPath("rm"), lambda *kwargs : "-r " + kwargs['target'], lambda **kwargs : "rm")
def __init__(self, config, name): GridWMS.__init__(self, config, name) self._delegateExec = utils.resolveInstallPath('glite-wms-job-delegate-proxy') self._submitExec = utils.resolveInstallPath('glite-wms-job-submit') self._statusExec = utils.resolveInstallPath('glite-wms-job-status') self._outputExec = utils.resolveInstallPath('glite-wms-job-output') self._cancelExec = utils.resolveInstallPath('glite-wms-job-cancel') self._submitParams.update({'-r': self._ce, '--config': self._configVO}) self._useDelegate = config.getBool('try delegate', True, onChange = None) self._forceDelegate = config.getBool('force delegate', False, onChange = None) self._discovery_module = None if config.getBool('discover wms', True, onChange = None): self._discovery_module = DiscoverWMS_Lazy(config) self._discover_sites = config.getBool('discover sites', False, onChange = None)
def __init__(self, config, name): PBSGECommon.__init__(self, config, name) self._user = config.get('user', os.environ.get('LOGNAME', ''), onChange=None) self._project = config.get('project name', '', onChange=None) self._configExec = utils.resolveInstallPath('qconf')
def __init__(self, config, name): GridWMS.__init__(self, config, name, checkExecutor=Grid_CheckJobs(config, 'edg-job-status'), cancelExecutor=Grid_CancelJobs( config, 'edg-job-cancel'), jdlWriter=EDGJDL()) self._submitExec = utils.resolveInstallPath('edg-job-submit') self._outputExec = utils.resolveInstallPath('edg-job-get-output') self._submitParams.update({ '-r': self._ce, '--config-vo': self._configVO })
def __init__(self, config, name): cancelExecutor = CancelAndPurgeJobs(config, CREAM_CancelJobs(config), CREAM_PurgeJobs(config)) GridWMS.__init__(self, config, name, checkExecutor = CREAM_CheckJobs(config), cancelExecutor = ChunkedExecutor(config, 'cancel', cancelExecutor)) self._nJobsPerChunk = config.getInt('job chunk size', 10, onChange = None) self._submitExec = utils.resolveInstallPath('glite-ce-job-submit') self._outputExec = utils.resolveInstallPath('glite-ce-job-output') self._submitParams.update({'-r': self._ce, '--config-vo': self._configVO }) self._outputRegex = r'.*For JobID \[(?P<rawId>\S+)\] output will be stored in the dir (?P<outputDir>.*)$' self._useDelegate = False if self._useDelegate is False: self._submitParams.update({ '-a': ' ' })
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec=utils.resolveInstallPath('bsub'), cancelExecutor=LSF_CancelJobs(config), checkExecutor=CheckJobsMissingState( config, LSF_CheckJobs(config)))
def __init__(self, config, name): TimedAccessToken.__init__(self, config, name) self._infoExec = utils.resolveInstallPath('voms-proxy-info') self._proxyPath = config.get('proxy path', '') self._ignoreWarning = config.getBool('ignore warnings', False, onChange=None) self._cache = None
def __init__(self, config, name, checkExecutor, cancelExecutor, nodesFinder, queuesFinder): LocalWMS.__init__(self, config, name, submitExec = utils.resolveInstallPath('qsub'), checkExecutor = checkExecutor, cancelExecutor = cancelExecutor, nodesFinder = nodesFinder, queuesFinder = queuesFinder) self._shell = config.get('shell', '', onChange = None) self._account = config.get('account', '', onChange = None) self._delay = config.getBool('delay output', False, onChange = None) self._softwareReqs = config.getLookup('software requirement map', {}, single = False, onChange = None)
def __init__(self, config, name): cancelExecutor = CancelJobsWithProcessBlind(config, 'qdel', fmt = lambda wmsIDs: [str.join(',', wmsIDs)], unknownID = ['Unknown Job Id']) PBSGECommon.__init__(self, config, name, cancelExecutor = cancelExecutor, checkExecutor = CheckJobsMissingState(config, GridEngine_CheckJobs(config)), nodesFinder = GridEngine_Discover_Nodes(config), queuesFinder = GridEngine_Discover_Queues(config)) self._project = config.get('project name', '', onChange = None) self._configExec = utils.resolveInstallPath('qconf')
def __init__(self, config, name): GridWMS.__init__(self, config, name) self._nJobsPerChunk = config.getInt('job chunk size', 10, onChange = None) self._submitExec = utils.resolveInstallPath('glite-ce-job-submit') self._statusExec = utils.resolveInstallPath('glite-ce-job-status') self._outputExec = utils.resolveInstallPath('glite-ce-job-output') self._cancelExec = utils.resolveInstallPath('glite-ce-job-cancel') self._purgeExec = utils.resolveInstallPath('glite-ce-job-purge') self._submitParams.update({'-r': self._ce, '--config-vo': self._configVO }) lvl0_status_ok = r'.*JobID=\[(?P<rawId>\S+)\]\s+Status\s+=\s+\[(?P<status>\S+)\].*' lvl0_status_err = r'.*JobID=\[(?P<rawId>\S+)\]\s+For this job CREAM has returned a fault: MethodName=\[(?P<methodName>.*)\] ' lvl0_status_err += r'Timestamp=\[(?P<timestamp>.*)\] ErrorCode=\[(?P<errorCode>.*)\] ' lvl0_status_err += r'Description=\[(?P<description>.*)\] FaultCause=\[(?P<faultCause>.*)\].*' self._statusRegexLevel0 = [lvl0_status_ok, lvl0_status_err] self._outputRegex = r'.*For JobID \[(?P<rawId>\S+)\] output will be stored in the dir (?P<outputDir>.*)$' self._useDelegate = False if self._useDelegate is False: self._submitParams.update({ '-a': ' ' })
def __init__(self, config, name): cancelExecutor = CancelJobsWithProcessBlind( config, 'qdel', fmt=lambda wmsIDs: [str.join(',', wmsIDs)], unknownID=['Unknown Job Id']) PBSGECommon.__init__(self, config, name, cancelExecutor=cancelExecutor, checkExecutor=CheckJobsMissingState( config, GridEngine_CheckJobs(config)), nodesFinder=GridEngine_Discover_Nodes(config), queuesFinder=GridEngine_Discover_Queues(config)) self._project = config.get('project name', '', onChange=None) self._configExec = utils.resolveInstallPath('qconf')
def __init__(self, config, cmd, args): ProcessCreatorViaStdin.__init__(self, config) (self._cmd, self._args) = (utils.resolveInstallPath(cmd), args)
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec = utils.resolveInstallPath('bsub'), cancelExecutor = LSF_CancelJobs(config), checkExecutor = CheckJobsMissingState(config, LSF_CheckJobs(config)))
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec = utils.resolveInstallPath('job_submit'), checkExecutor = CheckJobsMissingState(config, JMS_CheckJobs(config)), cancelExecutor = CancelJobsWithProcessBlind(config, 'job_cancel', unknownID = 'not in queue !'))
def __init__(self, config, name): PBSGECommon.__init__(self, config, name) self._nodesExec = utils.resolveInstallPath('pbsnodes') self._server = config.get('server', '', onChange = None) self._fqid = lambda wmsId: utils.QM(self._server, '%s.%s' % (wmsId, self._server), wmsId)
def __initcommands(self, **kwargs): self.cmd = resolveInstallPath("ssh") self.cpy = resolveInstallPath("scp") + " -r"
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec = utils.resolveInstallPath('bsub'), statusExec = utils.resolveInstallPath('bjobs'), cancelExec = utils.resolveInstallPath('bkill'))
def __init__(self, config): BackendDiscovery.__init__(self, config) self._configExec = utils.resolveInstallPath('qconf')
def __init__(self, config): ProcessCreatorViaArguments.__init__(self, config) self._cmd = utils.resolveInstallPath('qstat') self._user = config.get('user', os.environ.get('LOGNAME', ''), onChange=None)
def __init__(self, config, name): TimedProxy.__init__(self, config, name) self._infoExec = utils.resolveInstallPath('voms-proxy-info') self._ignoreWarning = config.getBool('ignore warnings', False, onChange = None) self._cache = None
def _initPoolInterfaces(self, config): # check submissal type self.remoteType = config.get( self._getSections("backend"), "remote Type", "").lower() if self.remoteType in ["ssh"]: self.remoteType = poolType.SSH elif self.remoteType in ["gsissh","gssh"]: self.remoteType = poolType.GSISSH elif self.remoteType in ["spool","condor","remote"]: self.remoteType = poolType.SPOOL else: self.remoteType = poolType.LOCAL self.debugOut("Selected pool type: %s" % poolType.enumTypes[self.remoteType]) # get remote destination features user,sched,collector = self._getDestination(config) self.debugOut("Destination:\n user:%s @ sched:%s via collector:%s" % ( QM(user,user,"<local default>"), QM(sched,sched,"<local default>"),QM(collector,collector,"<local default>"))) # prepare commands appropriate for pool type if self.remoteType == poolType.LOCAL or self.remoteType == poolType.SPOOL: self.user=user self.Pool=self.Pool=ProcessHandler.open("LocalProcessHandler") # local and remote use condor tools installed locally - get them self.submitExec = utils.resolveInstallPath('condor_submit') self.statusExec = utils.resolveInstallPath('condor_q') self.historyExec = utils.resolveInstallPath('condor_history') # completed/failed jobs are stored outside the queue self.cancelExec = utils.resolveInstallPath('condor_rm') self.transferExec = utils.resolveInstallPath('condor_transfer_data') # submission might spool to another schedd and need to fetch output self.configValExec = utils.resolveInstallPath('condor_config_val') # service is better when being able to adjust to pool settings if self.remoteType == poolType.SPOOL: # remote requires adding instructions for accessing remote pool self.submitExec+= " %s %s" % (QM(sched,"-remote %s"%sched,""),QM(collector, "-pool %s"%collector, "")) self.statusExec+= " %s %s" % (QM(sched,"-name %s"%sched,""),QM(collector, "-pool %s"%collector, "")) self.historyExec = "false" # disabled for this type self.cancelExec+= " %s %s" % (QM(sched,"-name %s"%sched,""),QM(collector, "-pool %s"%collector, "")) self.transferExec+= " %s %s" % (QM(sched,"-name %s"%sched,""),QM(collector, "-pool %s"%collector, "")) else: # ssh type instructions are passed to the remote host via regular ssh/gsissh host="%s%s"%(QM(user,"%s@" % user,""), sched) if self.remoteType == poolType.SSH: self.Pool=ProcessHandler.open("SSHProcessHandler",remoteHost=host , sshLink=config.getWorkPath(".ssh", self.wmsName+host ) ) else: self.Pool=ProcessHandler.open("GSISSHProcessHandler",remoteHost=host , sshLink=config.getWorkPath(".gsissh", self.wmsName+host ) ) # ssh type instructions rely on commands being available on remote pool self.submitExec = 'condor_submit' self.statusExec = 'condor_q' self.historyExec = 'condor_history' self.cancelExec = 'condor_rm' self.transferExec = "false" # disabled for this type self.configValExec = 'condor_config_val' # test availability of commands testProcess=self.Pool.LoggedProcess("condor_version") self.debugOut("*** Testing remote connectivity:\n%s"%testProcess.cmd) if testProcess.wait()!=0: testProcess.logError(self.errorLog) raise RuntimeError("Failed to access remote Condor tools! The pool you are submitting to is very likely not configured properly.") # get initial workdir on remote pool if config.get( self._getSections("backend"), "remote workdir", ''): uName=self.Pool.LoggedProcess("whoami").getOutput().strip() self.poolWorkDir=os.path.join(config.get( self._getSections("backend"), "remote workdir", ''), uName) pwdProcess=self.Pool.LoggedProcess("mkdir -p %s" % self.poolWorkDir ) else: pwdProcess=self.Pool.LoggedProcess("pwd") self.poolWorkDir=pwdProcess.getOutput().strip() if pwdProcess.wait()!=0: raise RuntimeError("Failed to determine, create or verify base work directory on remote host with code %s!\nThere might be a problem with your credentials or authorisation.\nOutput Message: %s\nError Message: %s" % (pwdProcess.wait(),pwdProcess.getOutput(),pwdProcess.getError()) )
def _initInterfaces(self, **kwargs): def makeArgList(*args): argList = [] for arg in args: try: if isinstance(arg, str): raise argList.extend(arg) except Exception: argList.append(arg) return [ arg for arg in argList if arg ] portArgs = lambda key : self._port and "-%s%s"%(key, self._port) or "" self._exeWrapper = CommandContainer( resolveInstallPath("ssh"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getValidSocketArgs(), portArgs('p'), self._host, " ".join((kwargs["command"], kwargs.get("args",''))) ), lambda **kwargs: "'%(command)s' [via ssh %(URI)s]" % { "command" : kwargs.get("command","<undefined command>"), "URI" : self.URI, }, lambda **kwargs: kwargs.get('args') and "Arguments: '%s'" % kwargs.get('args') or '' ) self._copy = CommandContainer( resolveInstallPath("scp"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getValidSocketArgs(), "-r", portArgs('P'), kwargs["source"], portArgs('P'), kwargs["destination"], ), lambda **kwargs: "'scp' [%(URI)s]", lambda **kwargs: "Transfer: '%(source)' -> '%(destination)'" % kwargs, ) self._delete = CommandContainer( resolveInstallPath("ssh"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getValidSocketArgs(), portArgs('p'), self._host, "rm -rf " + kwargs["target"], ), lambda **kwargs: "'rm' [via ssh %(URI)s]" % kwargs, lambda **kwargs: "Target: '%(target)s'" % kwargs, ) self._socketWrapper = CommandContainer( resolveInstallPath("ssh"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getCurrentSocketArgs(), portArgs('p'), self._host, " ".join((kwargs["command"], kwargs.get("args",''))) ), lambda **kwargs: "'%(command)s' [via ssh %(URI)s (master)]" % { "command" : kwargs.get("command","<undefined command>"), "URI" : self.URI, }, lambda **kwargs: kwargs.get('args') and "Arguments: '%s'" % kwargs.get('args') or '' )
def __init__(self, config, name): PBSGECommon.__init__(self, config, name) self._nodesExec = utils.resolveInstallPath('pbsnodes') self._server = config.get('server', '', onChange=None) self._fqid = lambda wmsId: utils.QM(self._server, '%s.%s' % (wmsId, self._server), wmsId)
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submitExec = utils.pathShare('gc-host.sh'), statusExec = utils.resolveInstallPath('ps'), cancelExec = utils.resolveInstallPath('kill'))
def __init__(self, config, wmsName = None): PBSGECommon.__init__(self, config, wmsName) self.user = config.get('user', os.environ.get('LOGNAME', ''), onChange = None) self.project = config.get('project name', '', onChange = None) self.configExec = utils.resolveInstallPath('qconf')
def __initcommands(self, **kwargs): resolveInstallPath('gsissh') resolveInstallPath('gsiscp')