def __init__(self): base.Base.__init__(self, _ADAPTOR_INFO) # there are no default myproxy contexts self._default_contexts = [] self.base_workdir = ru.get_radical_base('saga') + 'adaptors/myproxy'
def __init__(self, url, session=None, logger=None, cfg=None, posix=True, interactive=True): if logger: self.logger = logger else: self.logger = ru.Logger('radical.saga.pty') if session: self.session = session else: self.session = ss.Session(default=True) self.logger.debug("PTYShell init %s" % self) self.url = url # describes the shell to run self.posix = posix # /bin/sh compatible? self.interactive = interactive # bash -i ? self.latency = 0.0 # set by factory self.cp_slave = None # file copy channel self.initialized = False self.pty_id = PTYShell._pty_id PTYShell._pty_id += 1 name = None if isinstance(cfg, str): name = cfg cfg = None self.cfg = ru.Config('radical.saga.session', name=name, cfg=cfg) self.cfg = self.cfg.pty # get prompt pattern from config, or use default self.prompt = self.cfg.get('prompt_pattern', DEFAULT_PROMPT) self.prompt_re = re.compile("^(.*?)%s" % self.prompt, re.DOTALL) self.logger.info("PTY prompt pattern: %s" % self.prompt) # local dir for file staging caches self.base = ru.get_radical_base('saga') + 'adaptors/shell/' try: ru.rec_makedir(self.base) except OSError as e: raise rse.NoSuccess('could not create staging dir: %s' % e) from e self.factory = supsf.PTYShellFactory() self.pty_info = self.factory.initialize(self.url, self.session, self.prompt, self.logger, self.cfg, self.posix, interactive=self.interactive) self.pty_shell = self.factory.run_shell(self.pty_info) self._trace('init : %s' % self.pty_shell.command) self.initialize()
def __init__(self): a_base.Base.__init__(self, _ADAPTOR_INFO) self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$') self.epoch = datetime(1970, 1, 1) self.purge_on_start = self._cfg['purge_on_start'] self.purge_older_than = self._cfg['purge_older_than'] self.base_workdir = ru.get_radical_base('saga') + 'adaptors/sge'
def setUpClass(cls): cls._base_dir = ru.get_radical_base('utils') cls._pid_str = '%06d' % os.getpid() cls._user = None try: import getpass cls._user = getpass.getuser() except: cls._user = '******' cls._test_cases = [] for f in glob.glob(TEST_CASES_PATH): cls._test_cases.extend(ru.read_json(f))
def __init__(self): a_base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS) self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$') self.epoch = datetime.datetime(1970, 1, 1) # Adaptor Options self.purge_on_start = self._cfg['purge_on_start'] self.purge_older_than = self._cfg['purge_older_than'] self.base_workdir = ru.get_radical_base('saga') + 'adaptors/cobalt' # dictionaries to keep track of certain Cobalt jobs data self._script_file = dict() # location of cobalt script file self._job_current_workdir = dict() # working dir, for status checking
def init_instance(self, adaptor_state, rm_url, session): """ service instance constructor """ self.rm = rm_url self.session = session self.ppn = 0 # check for remove self.jobs = dict() self.cluster_option = '' self.energy_policy_tag = None self.island_count = None self.node_usage = None self.network_mpi = None self.blocking = None self.job_type = 'MPICH' # TODO: Is this a sane default? self.enforce_resource_submission = False self.enforce_consumable_cpus = False self.enforce_consumable_memory = False self.enforce_consumable_virtual_memory = False self.enforce_consumable_large_page_memory = False self.temp_path = ru.get_radical_base('saga') + 'adaptors/loadl_job' # LoadLeveler has two ways of specifying the executable and arguments. # - Explicit: the executable and arguments are specified as parameters. # - Implicit: the (remainder of the) job script is the task. # # Currently we don't know how this policy can be detected at runtime. # We know that providing both will not work in all cases. # # As the IBM Red Book documents the explicit exec only, # we should use that as a default. # Currently we just use a hack to workaround Joule. # # Note: does this now simply become a Joule hack? # # TODO: Split script into submission file and script and use that for # explicit exec? self.explicit_exec = False rm_scheme = rm_url.scheme pty_url = ru.Url(rm_url) # this adaptor supports options that can be passed via the # 'query' component of the job service URL. if rm_url.query is not None: for key, val in parse_qs(rm_url.query).items(): if key == 'cluster': self.cluster_option = " -X %s" % val[0] elif key == 'energy_policy_tag': self.energy_policy_tag = val[0] elif key == 'island_count': self.island_count = val[0] elif key == 'node_usage': self.node_usage = val[0] elif key == 'network_mpi': self.network_mpi = val[0] elif key == 'blocking': self.blocking = val[0] elif key == 'job_type': self.job_type = val[0] elif key == 'enforce_consumable_cpus': self.enforce_consumable_cpus = True self.enforce_resource_submission = True elif key == 'enforce_consumable_memory': self.enforce_consumable_memory = True self.enforce_resource_submission = True elif key == 'enforce_consumable_virtual_memory': self.enforce_consumable_virtual_memory = True self.enforce_resource_submission = True elif key == 'enforce_consumable_large_page_memory': self.enforce_consumable_large_page_memory = True self.enforce_resource_submission = True elif key == 'explicit_exec': self.explicit_exec = True # we need to extract the scheme for PTYShell. That's basically the # job.Service Url without the loadl+ part. We use the PTYShell to execute # loadleveler commands either locally or via gsissh or ssh. if rm_scheme == "loadl": pty_url.scheme = "fork" elif rm_scheme == "loadl+ssh": pty_url.scheme = "ssh" elif rm_scheme == "loadl+gsissh": pty_url.scheme = "gsissh" # these are the commands that we need in order to interact with Load # Leveler. the adaptor will try to find them during initialize(self) # and bail out in case they are note avaialbe. self._commands = {'llq': None, 'llsubmit': None, 'llcancel': None} self.shell = sups.PTYShell(pty_url, self.session) # self.shell.set_initialize_hook(self.initialize) # self.shell.set_finalize_hook(self.finalize) self.initialize() return self.get_api()