def __init__(self, config, name): cancel_executor = CancelAndPurgeJobs(config, CREAMCancelJobs(config), CREAMPurgeJobs(config)) GridWMS.__init__( self, config, name, submit_exec=resolve_install_path('glite-ce-job-submit'), output_exec=resolve_install_path('glite-ce-job-output'), check_executor=CREAMCheckJobs(config), cancel_executor=ChunkedExecutor(config, 'cancel', cancel_executor)) self._delegate_exec = resolve_install_path('glite-ce-delegate-proxy') self._use_delegate = config.get_bool('try delegate', True, on_change=None) self._chunk_size = config.get_int('job chunk size', 10, on_change=None) self._submit_args_dict.update({ '-r': self._ce, '--config-vo': self._config_fn }) self._output_regex = r'.*For JobID \[(?P<rawId>\S+)\] output will be stored' + \ ' in the dir (?P<output_dn>.*)$' if self._use_delegate is False: self._submit_args_dict['-a'] = ' '
def __new__(cls, config, name): def _create_backend(wms): try: backend_cls = WMS.get_class(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) wms_config = config.change_view(view_class='TaggedConfigView', set_classes=[backend_cls]) return WMS.create_instance(wms, wms_config, name) wms = config.get('wms', '') if wms: return _create_backend(wms) exc = ExceptionCollector() (wms_search_dict, wms_search_order) = config.get_dict('wms search list', default={'sacct': 'SLURM', 'sgepasswd': 'OGE', 'pbs-config': 'PBS', 'qsub': 'OGE', 'condor_q': 'Condor', 'bsub': 'LSF', 'job_slurm': 'JMS'}, default_order=['sacct', 'sgepasswd', 'pbs-config', 'qsub', 'condor_q', 'bsub', 'job_slurm']) for cmd in wms_search_order: try: resolve_install_path(cmd) except Exception: exc.collect() continue return _create_backend(wms_search_dict[cmd]) # at this point all backends have failed! exc.raise_any(BackendError('No valid local backend found!'))
def __new__(cls, config, name): def _create_backend(wms): try: backend_cls = WMS.get_class(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) wms_config = config.change_view(view_class='TaggedConfigView', set_classes=[backend_cls]) return WMS.create_instance(wms, wms_config, name) wms = config.get('wms', '') if wms: return _create_backend(wms) exc = ExceptionCollector() for cmd, wms in [('sacct', 'SLURM'), ('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('condor_q', 'Condor'), ('bsub', 'LSF'), ('job_slurm', 'JMS')]: try: resolve_install_path(cmd) except Exception: exc.collect() continue return _create_backend(wms) # at this point all backends have failed! exc.raise_any(BackendError('No valid local backend found!'))
def _initInterfaces(self, **kwargs): self._exeWrapper = CommandContainer( resolve_install_path("gsissh"), lambda **kwargs: "%(port)s %(sshargs)s %(socketArgs)s %(host)s %(payload)s" % { "port": (self._port and "-p" + self._port or ""), "sshargs": self._getDefaultArgs(), "socketArgs": self._getValidSocketArgs(), "host": self._host, "payload": self._wrapPayload(kwargs["command"] + " " + kwargs.get( "args", '')) }, lambda **kwargs: "%(command)s via adapter gsissh [URI %(URI)s]" % { "command": kwargs.get("command", "<undefined command>"), "URI": self.URI, }, ) self._copy = CommandContainer( resolve_install_path("gsiscp"), lambda **kwargs: "%(sshargs)s %(socketArgs)s -r %(port)s %(source)s %(port)s %(destination)s" % { "port": (self._port and "-P" + self._port or ""), "sshargs": self._getDefaultArgs(), "socketArgs": self._getValidSocketArgs(), "source": kwargs["source"], "destination": kwargs["destination"], }, lambda **kwargs: "gsiscp") self._delete = CommandContainer( resolve_install_path("gsissh"), lambda **kwargs: "%(port)s %(sshargs)s %(socketArgs)s %(payload)s" % { "port": (self._port and "-p" + self._port or ""), "sshargs": self._getDefaultArgs(), "socketArgs": self._getValidSocketArgs(), "payload": self._wrapPayload("rm -rf " + kwargs["target"]) }, lambda **kwargs: "'rm' via gsissh") self._socketWrapper = CommandContainer( resolve_install_path("gsissh"), lambda **kwargs: "%(port)s %(sshargs)s %(socketArgs)s %(host)s %(payload)s" % { "port": (self._port and "-p" + self._port or ""), "sshargs": self._getDefaultArgs(), "socketArgs": self._getCurrentSocketArgs(), "host": self._host, "payload": self._wrapPayload(kwargs["command"] + " " + kwargs.get( "args", '')) }, lambda **kwargs: "%(command)s via adapter gsissh (master) [URI %(URI)s]" % { "command": kwargs.get("command", "<undefined command>"), "URI": self.URI, }, )
def __init__(self, config, name): cancel_executor = CancelAndPurgeJobs(config, CREAMCancelJobs(config), CREAMPurgeJobs(config)) GridWMS.__init__( self, config, name, submit_exec=resolve_install_path('glite-ce-job-submit'), output_exec=resolve_install_path('glite-ce-job-output'), check_executor=CREAMCheckJobs(config), cancel_executor=ChunkedExecutor(config, 'cancel', cancel_executor)) self._log.info("CreamWMS.__init__") self._delegate_exec = resolve_install_path('glite-ce-delegate-proxy') self._use_delegate = config.get_bool('try delegate', True, on_change=None) self._chunk_size = config.get_int('job chunk size', 10, on_change=None) self._submit_args_dict.update({ '-r': self._ce, '--config-vo': self._config_fn }) self._output_regex = r'.*For JobID \[(?P<rawId>\S+)\] output will be stored' + \ ' in the dir (?P<output_dn>.*)$' self._end_of_proxy_lifetime = None self._set_proxy_lifetime() #if self._use_delegate is False: # self._submit_args_dict['-a'] = ' ' self._lock_filename = os.path.join(os.path.expanduser("~"), ".gcFileLock") self._delegated_proxy_filename = None self._delegated_proxy_lock = os.path.join(os.path.expanduser("~"), ".gcDelegatedProxyLock")
def __init__(self, **kwargs): ProcessHandler.__init__(self, **kwargs) ssh_default_args = ' -vvv -o BatchMode=yes -o ForwardX11=no' self._shell_cmd = resolve_install_path('ssh') + ssh_default_args self._copy_cmd = resolve_install_path('scp') + ssh_default_args + ' -r' self._ssh_link_id = 0 self._ssh_link_args = '' self._ssh_link_timestamp = 0 self._ssh_link_fail_count = 0 self._ssh_link_master_proc = None try: self._remote_host = kwargs['remote_host'] except Exception: raise ConfigError('Request to initialize SSH-Type RemoteProcessHandler without remote host.') try: self._ssh_link_base = os.path.abspath(kwargs['sshLink']) # older ssh/gsissh puts a maximum length limit on control paths, use a different one if len(self._ssh_link_base) >= 107: self._ssh_link_base = os.path.expanduser('~/.ssh/%s' % os.path.basename(self._ssh_link_base)) self._ssh_link = self._ssh_link_base _ssh_link_secure(self._ssh_link, init_dn=True) self._get_ssh_link() except KeyError: clear_current_exception() self._ssh_link = False # test connection once proc_test = self.logged_execute('exit') if proc_test.wait() != 0: raise CondorProcessError('Failed to validate remote connection.', proc_test)
def __init__(self, config): self._state_fn = config.get_work_path('glitewms.info') (self._wms_list_ok, self._wms_list_all, self._ping_dict, self.pos) = self._load_state() self._wms_timeout_dict = {} self._full = config.get_bool('wms discover full', True, on_change=None) self._lcg_infosites_exec = resolve_install_path('lcg-infosites') self._job_list_match_exec = resolve_install_path('glite-wms-job-list-match')
def _initInterfaces(self, **kwargs): def makeArgList(*args): argList = [] for arg in args: try: if isinstance(arg, str): raise argList.extend(arg) except Exception: argList.append(arg) return [arg for arg in argList if arg] portArgs = lambda key: self._port and "-%s%s" % (key, self._port) or "" self._exeWrapper = CommandContainer( resolve_install_path("ssh"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getValidSocketArgs(), portArgs('p'), self._host, " ".join( (kwargs["command"], kwargs.get("args", '')))), lambda **kwargs: "'%(command)s' [via ssh %(URI)s]" % { "command": kwargs.get("command", "<undefined command>"), "URI": self.URI, }, lambda **kwargs: kwargs.get( 'args') and "Arguments: '%s'" % kwargs.get('args') or '') self._copy = CommandContainer( resolve_install_path("scp"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getValidSocketArgs(), "-r", portArgs('P'), kwargs["source"], portArgs('P'), kwargs["destination"], ), lambda **kwargs: "'scp' [%(URI)s]", lambda **kwargs: "Transfer: '%(source)' -> '%(destination)'" % kwargs, ) self._delete = CommandContainer( resolve_install_path("ssh"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getValidSocketArgs(), portArgs('p'), self._host, "rm -rf " + kwargs["target"], ), lambda **kwargs: "'rm' [via ssh %(URI)s]" % kwargs, lambda **kwargs: "Target: '%(target)s'" % kwargs, ) self._socketWrapper = CommandContainer( resolve_install_path("ssh"), lambda **kwargs: makeArgList( self._getDefaultArgs(), self._getCurrentSocketArgs(), portArgs('p'), self._host, " ".join( (kwargs["command"], kwargs.get("args", '')))), lambda **kwargs: "'%(command)s' [via ssh %(URI)s (master)]" % { "command": kwargs.get("command", "<undefined command>"), "URI": self.URI, }, lambda **kwargs: kwargs.get( 'args') and "Arguments: '%s'" % kwargs.get('args') or '')
def __init__(self, **kwargs): ProcessHandler.__init__(self, **kwargs) ssh_default_args = ' -vvv -o BatchMode=yes -o ForwardX11=no' self._shell_cmd = resolve_install_path('ssh') + ssh_default_args self._copy_cmd = resolve_install_path('scp') + ssh_default_args + ' -r' self._ssh_link_id = 0 self._ssh_link_args = '' self._ssh_link_timestamp = 0 self._ssh_link_fail_count = 0 self._ssh_link_master_proc = None try: self._remote_host = kwargs['remote_host'] except Exception: raise ConfigError( 'Request to initialize SSH-Type RemoteProcessHandler without remote host.' ) try: self._ssh_link_base = os.path.abspath(kwargs['sshLink']) # older ssh/gsissh puts a maximum length limit on control paths, use a different one if len(self._ssh_link_base) >= 107: self._ssh_link_base = os.path.expanduser( '~/.ssh/%s' % os.path.basename(self._ssh_link_base)) self._ssh_link = self._ssh_link_base _ssh_link_secure(self._ssh_link, init_dn=True) self._get_ssh_link() except KeyError: clear_current_exception() self._ssh_link = False # test connection once proc_test = self.logged_execute('exit') if proc_test.wait() != 0: raise CondorProcessError('Failed to validate remote connection.', proc_test)
def __init__(self, config, name, check_executor=None): GridWMS.__init__( self, config, name, submit_exec=resolve_install_path('glite-wms-job-submit'), output_exec=resolve_install_path('glite-wms-job-output'), check_executor=check_executor or GridCheckJobs(config, 'glite-wms-job-status'), cancel_executor=GridCancelJobs(config, 'glite-wms-job-cancel')) self._delegate_exec = resolve_install_path( 'glite-wms-job-delegate-proxy') self._submit_args_dict.update({ '-r': self._ce, '--config': self._config_fn }) self._use_delegate = config.get_bool('try delegate', True, on_change=None) self._force_delegate = config.get_bool('force delegate', False, on_change=None) self._discovery_plugin = None if config.get_bool('discover wms', True, on_change=None): self._discovery_plugin = DiscoverGliteEndpointsLazy(config) self._discover_sites = config.get_bool('discover sites', False, on_change=None)
def __init__(self, config, name): GridWMS.__init__(self, config, name, submit_exec=resolve_install_path('edg-job-submit'), output_exec=resolve_install_path('edg-job-get-output'), check_executor=GridCheckJobs(config, 'edg-job-status'), cancel_executor=GridCancelJobs(config, 'edg-job-cancel'), jdl_writer=EDGJDL()) self._submit_args_dict.update({'-r': self._ce, '--config-vo': self._config_fn})
def __init__(self, config, name): deprecated('Please use the GliteWMS backend for grid jobs!') GridWMS.__init__(self, config, name, submit_exec=resolve_install_path('glite-job-submit'), output_exec=resolve_install_path('glite-job-output'), check_executor=GridCheckJobs(config, 'glite-job-status'), cancel_executor=GridCancelJobs(config, 'glite-job-cancel')) self._submit_args_dict.update({'-r': self._ce, '--config-vo': self._config_fn})
def __init__(self, config): self._state_fn = config.get_work_path('glitewms.info') (self._wms_list_ok, self._wms_list_all, self._ping_dict, self.pos) = self._load_state() self._wms_timeout_dict = {} self._full = config.get_bool('wms discover full', True, on_change=None) self._lcg_infosites_exec = resolve_install_path('lcg-infosites') self._job_list_match_exec = resolve_install_path( 'glite-wms-job-list-match')
def __init__(self, config, name): RefreshableAccessToken.__init__(self, config, name) self._kinit_exec = resolve_install_path('kinit') self._klist_exec = resolve_install_path('klist') self._cache = None self._map_auth_name2fn = dict( imap(lambda name: (name, config.get_work_path('proxy.%s' % name)), ['KRB5CCNAME', 'KRBTKFILE'])) with_lock(AFSAccessToken.env_lock, self._backup_tickets, config) self._tickets = config.get_list('tickets', [], on_change=None)
def _init_pool_interface_local(self, config, sched, collector): # submission might spool to another schedd and need to fetch output self._submit_exec = resolve_install_path('condor_submit') self._transfer_exec = resolve_install_path('condor_transfer_data') if self._remote_type == PoolType.SPOOL: if sched: self._submit_exec += ' -remote %s' % sched self._transfer_exec += ' -name %s' % sched if collector: self._submit_exec += ' -pool %s' % collector self._transfer_exec += ' -pool %s' % collector self._proc_factory = ProcessHandler.create_instance('LocalProcessHandler')
def _init_pool_interface_local(self, config, sched, collector): # submission might spool to another schedd and need to fetch output self._submit_exec = resolve_install_path('condor_submit') self._transfer_exec = resolve_install_path('condor_transfer_data') if self._remote_type == PoolType.SPOOL: if sched: self._submit_exec += ' -remote %s' % sched self._transfer_exec += ' -name %s' % sched if collector: self._submit_exec += ' -pool %s' % collector self._transfer_exec += ' -pool %s' % collector self._proc_factory = ProcessHandler.create_instance('LocalProcessHandler')
def _initInterfaces(self, **kwargs): try: copypath = resolve_install_path("rsync") copynice = lambda **kwargs: "copy via rsync" except InstallationError: copypath = resolve_install_path("cp") copynice = lambda **kwargs: "copy via cp" self._copy = CommandContainer( copypath, lambda **kwargs: "-r %s %s" % (kwargs['source'], kwargs['destination']), copynice) self._delete = CommandContainer( resolve_install_path("rm"), lambda *kwargs: "-r " + kwargs['target'], lambda **kwargs: "rm")
def __init__(self, config, name): GridWMS.__init__( self, config, name, submit_exec=resolve_install_path('edg-job-submit'), output_exec=resolve_install_path('edg-job-get-output'), check_executor=GridCheckJobs(config, 'edg-job-status'), cancel_executor=GridCancelJobs(config, 'edg-job-cancel'), jdl_writer=EDGJDL()) self._submit_args_dict.update({ '-r': self._ce, '--config-vo': self._config_fn })
def __init__(self, config, name): deprecated('Please use the GliteWMS backend for grid jobs!') GridWMS.__init__(self, config, name, submit_exec=resolve_install_path('glite-job-submit'), output_exec=resolve_install_path('glite-job-output'), check_executor=GridCheckJobs(config, 'glite-job-status'), cancel_executor=GridCancelJobs( config, 'glite-job-cancel')) self._submit_args_dict.update({ '-r': self._ce, '--config-vo': self._config_fn })
def __init__(self, config, name, check_executor=None): GridWMS.__init__(self, config, name, submit_exec=resolve_install_path('glite-wms-job-submit'), output_exec=resolve_install_path('glite-wms-job-output'), check_executor=check_executor or GridCheckJobs(config, 'glite-wms-job-status'), cancel_executor=GridCancelJobs(config, 'glite-wms-job-cancel')) self._delegate_exec = resolve_install_path('glite-wms-job-delegate-proxy') self._submit_args_dict.update({'-r': self._ce, '--config': self._config_fn}) self._use_delegate = config.get_bool('try delegate', True, on_change=None) self._force_delegate = config.get_bool('force delegate', False, on_change=None) self._discovery_plugin = None if config.get_bool('discover wms', True, on_change=None): self._discovery_plugin = DiscoverGliteEndpointsLazy(config) self._discover_sites = config.get_bool('discover sites', False, on_change=None)
def _get_cms_cert(config): config = config.change_view(set_sections=['cms', 'access', 'proxy']) try: access = AccessToken.create_instance('VomsAccessToken', config, 'cms-proxy') except Exception: if os.environ.get('X509_USER_PROXY'): return os.environ['X509_USER_PROXY'] raise CMSAuthenticationException('Unable to find grid environment') can_submit = ignore_exception(Exception, False, access.can_submit, 5 * 60, True) if not can_submit: logging.getLogger('access.cms').warning('The grid proxy has expired or is invalid!') role = config.get_list('new proxy roles', '', on_change=None) timeout = config.get_time('new proxy timeout', 10, on_change=None) lifetime = config.get_time('new proxy lifetime', 192 * 60, on_change=None) # password in variable name removes it from debug log password = getpass.getpass('Please enter proxy password: '******'voms-proxy-init') proc = LocalProcess(proxy_init_exec, '--voms', str.join(':', ['cms'] + role), '--valid', '%d:%d' % (lifetime / 60, lifetime % 60), logging=False) if password: proc.stdin.write(password + '\n') proc.stdin.close() proc.get_output(timeout=timeout) except Exception: raise CMSAuthenticationException('Unable to create new grid proxy') access = AccessToken.create_instance('VomsAccessToken', config, 'cms-proxy') # new instance can_submit = ignore_exception(Exception, False, access.can_submit, 5 * 60, True) if not can_submit: raise CMSAuthenticationException('Newly created grid proxy is also invalid') return access.get_auth_fn_list()[0]
def _set_proxy_lifetime(self): activity = Activity('Get proxy lifetime...') proc = LocalProcess(resolve_install_path('voms-proxy-info')) output = proc.get_output(timeout=10, raise_errors=False) end_of_proxy = 0 proxy_key = None for l in output.split('\n'): if 'subject' in l: proxy_key = l.encode("hex")[-15:] if 'timeleft' in l: h, m, s = int(l.split(':')[-3]), int(l.split(':')[-2]), int( l.split(':')[-1]) end_of_proxy = time.time() + h * 60 * 60 + m * 60 + s break if end_of_proxy == 0: self._log.warning('couldnt evaluate end of proxy. Output was:') self._log.warning(output) time.sleep(300) self._set_proxy_lifetime() else: self._end_of_proxy_lifetime = end_of_proxy if proxy_key is not None: self._delegated_proxy_filename = os.path.join( os.path.expanduser("~"), ".gcDelegatedProxy" + proxy_key) left_time_str = datetime.fromtimestamp( self._end_of_proxy_lifetime).strftime("%A, %B %d, %Y %I:%M:%S") self._log.info('End of current proxy lifetime: %s' % left_time_str) activity.finish() return 0
def __init__(self, config, name, proxy_exec): TimedAccessToken.__init__(self, config, name) self._proxy_info_exec = resolve_install_path(proxy_exec) self._proxy_fn = config.get('proxy path', '') self._ignore_warning = config.get_bool('ignore warnings', False, on_change=None) self._cache = None
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submit_exec=resolve_install_path('sbatch'), check_executor=CheckJobsMissingState( config, SLURMCheckJobs(config)), cancel_executor=CancelJobsWithProcessBlind( config, 'scancel', unknown_id='not in queue !'))
def __init__(self, config, name, check_executor, cancel_executor, nodes_finder, queues_finder): LocalWMS.__init__(self, config, name, submit_exec=resolve_install_path('qsub'), check_executor=check_executor, cancel_executor=cancel_executor, nodes_finder=nodes_finder, queues_finder=queues_finder) self._shell = config.get('shell', '', on_change=None) self._account = config.get('account', '', on_change=None) self._delay = config.get_bool('delay output', False, on_change=None) self._software_req_lookup = config.get_lookup('software requirement map', {}, single=False, on_change=None)
def __init__(self, config, name): cancel_executor = CancelJobsWithProcessBlind(config, 'qdel', fmt=lambda wms_id_list: [str.join(',', wms_id_list)], unknown_id='Unknown Job Id') PBSGECommon.__init__(self, config, name, cancel_executor=cancel_executor, check_executor=CheckJobsMissingState(config, GridEngineCheckJobs(config)), nodes_finder=GridEngineDiscoverNodes(config), queues_finder=GridEngineDiscoverQueues(config)) self._project = config.get('project name', '', on_change=None) self._config_exec = resolve_install_path('qconf')
def __new__(cls, config, job_limit=-1, job_selector=None): try: resolve_install_path('zip') except Exception: clear_current_exception() return TextFileJobDB.__new__(cls, config, job_limit, job_selector) path_db = config.get_work_path('jobs') db_fn = config.get_work_path('jobs.zip') if os.path.exists(path_db) and os.path.isdir(path_db) and not os.path.exists(db_fn): activity = Activity('Converting job database') new_db = ZippedJobDB(config) try: old_db = TextFileJobDB(config) for jobnum in old_db.get_job_list(): new_db.commit(jobnum, old_db.get_job(jobnum)) except Exception: remove_files([db_fn]) raise activity.finish() return ZippedJobDB.__new__(cls, config, job_limit, job_selector)
def __new__(cls, config, name): def _create_backend(wms): try: backend_cls = WMS.get_class(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) wms_config = config.change_view(view_class='TaggedConfigView', set_classes=[backend_cls]) return WMS.create_instance(wms, wms_config, name) wms = config.get('wms', '') if wms: return _create_backend(wms) exc = ExceptionCollector() (wms_search_dict, wms_search_order) = config.get_dict('wms search list', default={ 'sacct': 'SLURM', 'sgepasswd': 'OGE', 'pbs-config': 'PBS', 'qsub': 'OGE', 'condor_q': 'Condor', 'bsub': 'LSF', 'job_slurm': 'JMS' }, default_order=[ 'sacct', 'sgepasswd', 'pbs-config', 'qsub', 'condor_q', 'bsub', 'job_slurm' ]) for cmd in wms_search_order: try: resolve_install_path(cmd) except Exception: exc.collect() continue return _create_backend(wms_search_dict[cmd]) # at this point all backends have failed! exc.raise_any(BackendError('No valid local backend found!'))
def _purge_done_jobs(self, wms_id_list_done): purge_log_fn = tempfile.mktemp('.log') purge_proc = LocalProcess(resolve_install_path('glite-ce-job-purge'), '--noint', '--logfile', purge_log_fn, str.join(' ', wms_id_list_done)) exit_code = purge_proc.status(timeout=60) if exit_code != 0: if self._explain_error(purge_proc, exit_code): pass else: self._log.log_process(purge_proc) remove_files([purge_log_fn])
def __new__(cls, config, name): def _create_backend(wms): try: backend_cls = WMS.get_class(wms) except Exception: raise BackendError('Unable to load backend class %s' % repr(wms)) wms_config = config.change_view(view_class='TaggedConfigView', set_classes=[backend_cls]) return WMS.create_instance(wms, wms_config, name) wms = config.get('wms', '') if wms: return _create_backend(wms) exc = ExceptionCollector() for cmd, wms in [('sacct', 'SLURM'), ('sgepasswd', 'OGE'), ('pbs-config', 'PBS'), ('qsub', 'OGE'), ('condor_q', 'Condor'), ('bsub', 'LSF'), ('job_slurm', 'JMS')]: try: resolve_install_path(cmd) except Exception: exc.collect() continue return _create_backend(wms) # at this point all backends have failed! exc.raise_any(BackendError('No valid local backend found!'))
def __init__(self, config, name, check_executor, cancel_executor, nodes_finder, queues_finder): LocalWMS.__init__(self, config, name, submit_exec=resolve_install_path('qsub'), check_executor=check_executor, cancel_executor=cancel_executor, nodes_finder=nodes_finder, queues_finder=queues_finder) self._shell = config.get('shell', '', on_change=None) self._account = config.get('account', '', on_change=None) self._delay = config.get_bool('delay output', False, on_change=None) self._software_req_lookup = config.get_lookup( 'software requirement map', {}, single=False, on_change=None)
def __init__(self, config, name): cancel_executor = CancelJobsWithProcessBlind( config, 'qdel', fmt=lambda wms_id_list: [str.join(',', wms_id_list)], unknown_id='Unknown Job Id') PBSGECommon.__init__(self, config, name, cancel_executor=cancel_executor, check_executor=CheckJobsMissingState( config, GridEngineCheckJobs(config)), nodes_finder=GridEngineDiscoverNodes(config), queues_finder=GridEngineDiscoverQueues(config)) self._project = config.get('project name', '', on_change=None) self._config_exec = resolve_install_path('qconf')
def __init__(self, config, name, proxy_exec): TimedAccessToken.__init__(self, config, name) self._proxy_info_exec = resolve_install_path(proxy_exec) self._proxy_fn = config.get('proxy path', '') self._ignore_warning = config.get_bool('ignore warnings', False, on_change=None) self._cache = None
def __init__(self, config, cmd, args): ProcessCreatorViaStdin.__init__(self, config) (self._cmd, self._args) = (resolve_install_path(cmd), args)
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submit_exec=resolve_install_path('bsub'), cancel_executor=LSFCancelJobs(config), check_executor=CheckJobsMissingState(config, LSFCheckJobs(config)))
def __init__(self, config, cmd, args=None, fmt=identity): ProcessCreatorViaArguments.__init__(self, config) (self._cmd, self._args, self._fmt) = (resolve_install_path(cmd), args or [], fmt)
def __init__(self, config): ProcessCreatorViaArguments.__init__(self, config) self._cmd = resolve_install_path('qstat') self._user = config.get('user', get_local_username(), on_change=None)
def __init__(self, config, cmd, args): ProcessCreatorViaStdin.__init__(self, config) (self._cmd, self._args) = (resolve_install_path(cmd), args)
def __init__(self, config): ProcessCreatorViaArguments.__init__(self, config) self._cmd = resolve_install_path('qstat') self._user = config.get('user', get_local_username(), on_change=None)
def __init__(self, config): BackendDiscovery.__init__(self, config) self._exec = resolve_install_path('qstat')
def __init__(self, config, name): LocalWMS.__init__(self, config, name, submit_exec=resolve_install_path('sbatch'), check_executor=CheckJobsMissingState(config, SLURMCheckJobs(config)), cancel_executor=CancelJobsWithProcessBlind(config, 'scancel', unknown_id='not in queue !'))
def __init__(self, config): BackendDiscovery.__init__(self, config) self._timeout = config.get_time('discovery timeout', 30, on_change=None) self._exec = resolve_install_path('pbsnodes')
def __init__(self, config): BackendDiscovery.__init__(self, config) self._exec = resolve_install_path('qstat')
def __init__(self, config): BackendDiscovery.__init__(self, config) self._config_timeout = config.get_time('discovery timeout', 30, on_change=None) self._config_exec = resolve_install_path('qconf')