def run_async (self, command) : """ Run a shell command, but don't wait for prompt -- just return. It is up to caller to eventually search for the prompt again (see :func:`find_prompt`. Meanwhile, the caller can interact with the called command, via the I/O channels. :type command: string :param command: shell command to run. For async execution, we don't care if the command is doing i/o redirection or not. """ with self.pty_shell.rlock : self._trace ("run async : %s" % command) self.pty_shell.flush () # we expect the shell to be in 'ground state' when running an asyncronous # command -- thus we can check if the shell is alive before doing so, # and restart if needed if not self.pty_shell.alive (recover=True) : raise se.IncorrectState ("Cannot run command:\n%s" \ % self.pty_shell.autopsy ()) try : command = command.strip () self.send ("%s\n" % command) except Exception as e : raise ptye.translate_exception (e)
def get_job (self, job_id, ttype=None) : """ get_job(job_id) Return the job object for a given job id. :param job_id: The id of the job to retrieve :rtype: :class:`saga.job.Job` Job objects are a local representation of a remote stateful entity. The job.Service supports to reconnect to those remote entities:: service = saga.job.Service("fork://localhost") j = service.get_job(my_job_id) if j.get_state() == saga.job.Job.Pending: print "pending" elif j.get_state() == saga.job.Job.Running: print "running" else: print "job is already final!" service.close() """ if not self.valid : raise se.IncorrectState ("This instance was already closed.") return self._adaptor.get_job (job_id, ttype=ttype)
def run_job (self, cmd, host=None, ttype=None) : """ run_job(cmd, host=None) """ if not self.valid : raise se.IncorrectState ("This instance was already closed.") if not cmd: raise se.BadParameter('run_job needs a command to run. Duh!') try: # lets see if the adaptor implements run_job return self._adaptor.run_job (cmd, host, ttype=ttype) except: # fall back to the default implementation below pass # The adaptor has no run_job -- we here provide a generic implementation # FIXME: split should be more clever and respect POSIX shell syntax. args = cmd.split() jd = descr.Description() jd.executable = args[0] jd.arguments = args[1:] job = self.create_job(jd) job.run() return job
def close (self) : """ close() Close the job service instance and disconnect from the (remote) job service if necessary. Any subsequent calls to a job service instance after `close()` was called will fail. Example:: service = saga.job.Service("fork://localhost") # do something with the 'service' object, create jobs, etc... service.close() service.list() # this call will throw an exception .. warning:: While in principle the job service destructor calls `close()` automatically when a job service instance goes out of scope, you **shouldn't rely on it**. Python's garbage collection can be a bit odd at times, so you should always call `close()` explicitly. Especially in a **multi-threaded program** this will help to avoid random errors. """ if not self.valid : raise se.IncorrectState ("This instance was already closed.") self._adaptor.close () self.valid = False
def list(self, ttype=None): """ list() Return a list of the jobs that are managed by this Service instance. .. seealso:: The :data:`~saga.job.Service.jobs` property and the :meth:`~saga.job.Service.list` method are semantically equivalent. :ttype: |param_ttype| :rtype: list of :class:`saga.job.Job` As the job.Service represents a job management backend, list() will return a list of job IDs for all jobs which are known to the backend, and which can potentially be accessed and managed by the application. Example:: service = saga.job.Service("fork://localhost") ids = service.list() for job_id in ids : print job_id service.close() """ if not self.valid: raise se.IncorrectState("This instance was already closed.") return self._adaptor.list(ttype=ttype)
def run_job(self, cmd, host=None, ttype=None): """ run_job(cmd, host=None) .. warning:: |not_implemented| """ if not self.valid: raise se.IncorrectState("This instance was already closed.") if None == host: host = "" # FIXME return self._adaptor.run_job(cmd, host, ttype=ttype)
def get_url (self, ttype=None) : """ get_url() Return the URL this Service instance was created with. .. seealso:: The :data:`~saga.job.Service.url` property and the :meth:`~saga.job.Service.get_url` method are semantically equivalent and only duplicated for convenience. """ if not self.valid : raise se.IncorrectState ("This instance was already closed.") return self._adaptor.get_url (ttype=ttype)
def send (self, data) : """ send data to the shell. No newline is appended! """ with self.pty_shell.rlock : if not self.pty_shell.alive (recover=False) : raise se.IncorrectState ("Cannot send data:\n%s" \ % self.pty_shell.autopsy ()) try : self.pty_shell.write ("%s" % data) except Exception as e : raise ptye.translate_exception (e)
def __init__(self, _method_type='run', _adaptor=None, _adaptor_state={}, _ttype=None): ''' _adaptor`` references the adaptor class instance which created this task instance. The ``_method_type`` parameter is flattened into the job constructor to satisfy the bulk optimization properties of the saga.Task class, whose interface is implemented by saga.job.Job. ``_method_type`` specifies the SAGA API method which task is representing. For jobs, that is the 'run' method. We don't have a create classmethod -- jobs are never constructed by the user ''' if not _adaptor: raise se.IncorrectState("saga.job.Job constructor is private") self._valid = False # we need to keep _method_type around, for the task interface (see # :class:`saga.Task`) self._method_type = _method_type # We need to specify a schema for adaptor selection -- and # simply choose the first one the adaptor offers. schema = _adaptor.get_schemas()[0] if 'job_schema' in _adaptor_state: schema = _adaptor_state['job_schema'] self._base = super(Job, self) self._base.__init__(schema, _adaptor, _adaptor_state, ttype=None) # set attribute interface properties self._attributes_allow_private(True) self._attributes_extensible(False) self._attributes_camelcasing(True) # register properties with the attribute interface self._attributes_register(STATE, UNKNOWN, sa.ENUM, sa.SCALAR, sa.READONLY) self._attributes_register(EXIT_CODE, None, sa.INT, sa.SCALAR, sa.READONLY) self._attributes_register(CREATED, None, sa.INT, sa.SCALAR, sa.READONLY) self._attributes_register(STARTED, None, sa.INT, sa.SCALAR, sa.READONLY) self._attributes_register(FINISHED, None, sa.INT, sa.SCALAR, sa.READONLY) self._attributes_register(EXECUTION_HOSTS, None, sa.STRING, sa.VECTOR, sa.READONLY) self._attributes_register(ID, None, sa.STRING, sa.SCALAR, sa.READONLY) self._attributes_register(SERVICE_URL, None, sa.URL, sa.SCALAR, sa.READONLY) self._attributes_set_enums(STATE, [ UNKNOWN, NEW, PENDING, RUNNING, DONE, FAILED, CANCELED, SUSPENDED ]) self._attributes_set_getter(STATE, self.get_state) self._attributes_set_getter(ID, self.get_id) self._attributes_set_getter(EXIT_CODE, self._get_exit_code) self._attributes_set_getter(CREATED, self._get_created) self._attributes_set_getter(STARTED, self._get_started) self._attributes_set_getter(FINISHED, self._get_finished) self._attributes_set_getter(EXECUTION_HOSTS, self._get_execution_hosts) self._attributes_set_getter(SERVICE_URL, self._get_service_url) self._valid = True
def create_job (self, job_desc, ttype=None) : """ create_job(job_desc) Create a new job.Job instance from a :class:`~saga.job.Description`. The resulting job instance is in :data:`~saga.job.NEW` state. :param job_desc: job description to create the job from :type job_desc: :data:`saga.job.Description` :param ttype: |param_ttype| :rtype: :class:`saga.job.Job` or |rtype_ttype| create_job() accepts a job description, which described the application instance to be created by the backend. The create_job() method is not actually attempting to *run* the job, but merely parses the job description for syntactic and semantic consistency. The job returned object is thus not in 'Pending' or 'Running', but rather in 'New' state. The actual submission is performed by calling run() on the job object. Example:: # A job.Description object describes the executable/application and its requirements job_desc = saga.job.Description() job_desc.executable = '/bin/sleep' job_desc.arguments = ['10'] job_desc.output = 'myjob.out' job_desc.error = 'myjob.err' service = saga.job.Service('local://localhost') job = service.create_job(job_desc) # Run the job and wait for it to finish job.run() print "Job ID : %s" % (job.job_id) job.wait() # Get some info about the job print "Job State : %s" % (job.state) print "Exitcode : %s" % (job.exit_code) service.close() """ if not self.valid : raise se.IncorrectState ("This instance was already closed.") jd_copy = descr.Description() job_desc._attributes_deep_copy (jd_copy) # do some sanity checks: if the adaptor has specified a set of supported # job description attributes, we scan the given description for any # mismatches, and complain then. adaptor_info = self._adaptor._adaptor.get_info () if 'capabilities' in adaptor_info and \ 'jdes_attributes' in adaptor_info['capabilities'] : # this is the list of key supported by the adaptor. These # attributes may be set to non-default values supported_keys = adaptor_info['capabilities']['jdes_attributes'] # use an empty job description to compare default values jd_default = descr.Description () for key in jd_copy.list_attributes () : val = jd_copy .get_attribute (key) default = jd_default.get_attribute (key) # Also, we make string compares case insensitive if isinstance (val, basestring) : val = val .lower () if isinstance (default, basestring) : default = default.lower () # supported keys are also valid, as are keys with default or # None values if key not in supported_keys and \ val != default and \ val : msg = "'JobDescription.%s' (%s) is not supported by adaptor %s" \ % (key, val, adaptor_info['name']) raise se.BadParameter._log (self._logger, msg) # make sure at least 'executable' is defined if jd_copy.executable is None: raise se.BadParameter("No executable defined") # convert environment to string if jd_copy.attribute_exists ('Environment') : for (key, value) in jd_copy.environment.iteritems(): jd_copy.environment[key] = str(value) return self._adaptor.create_job (jd_copy, ttype=ttype)
def run_sync (self, command, iomode=None, new_prompt=None) : """ Run a shell command, and report exit code, stdout and stderr (all three will be returned in a tuple). The call will block until the command finishes (more exactly, until we find the prompt again on the shell's I/O stream), and cannot be interrupted. :type command: string :param command: shell command to run. :type iomode: enum :param iomode: Defines how stdout and stderr are captured. :type new_prompt: string :param new_prompt: regular expression matching the prompt after command succeeded. We expect the ``command`` to not to do stdio redirection, as this is we want to capture that separately. We *do* allow pipes and stdin/stdout redirection. Note that SEPARATE mode will break if the job is run in the background The following iomode values are valid: * *IGNORE:* both stdout and stderr are discarded, `None` will be returned for each. * *MERGED:* both streams will be merged and returned as stdout; stderr will be `None`. This is the default. * *SEPARATE:* stdout and stderr will be captured separately, and returned individually. Note that this will require at least one more network hop! * *STDOUT:* only stdout is captured, stderr will be `None`. * *STDERR:* only stderr is captured, stdout will be `None`. * *None:* do not perform any redirection -- this is effectively the same as `MERGED` If any of the requested output streams does not return any data, an empty string is returned. If the command to be run changes the prompt to be expected for the shell, the ``new_prompt`` parameter MUST contain a regex to match the new prompt. The same conventions as for set_prompt() hold -- i.e. we expect the prompt regex to capture the exit status of the process. """ with self.pty_shell.rlock : self._trace ("run sync : %s" % command) self.pty_shell.flush () # we expect the shell to be in 'ground state' when running a syncronous # command -- thus we can check if the shell is alive before doing so, # and restart if needed if not self.pty_shell.alive (recover=True) : raise se.IncorrectState ("Can't run command -- shell died:\n%s" \ % self.pty_shell.autopsy ()) try : command = command.strip () if command.endswith ('&') : raise se.BadParameter ("run_sync can only run foreground jobs ('%s')" \ % command) redir = "" _err = "/tmp/saga-python.ssh-job.stderr.$$" if iomode == IGNORE : redir = " 1>>/dev/null 2>>/dev/null" if iomode == MERGED : redir = " 2>&1" if iomode == SEPARATE : redir = " 2>%s" % _err if iomode == STDOUT : redir = " 2>/dev/null" if iomode == STDERR : redir = " 2>&1 1>/dev/null" if iomode == None : redir = "" self.logger.debug ('run_sync: %s%s' % (command, redir)) self.pty_shell.write ( "%s%s\n" % (command, redir)) # If given, switch to new prompt pattern right now... prompt = self.prompt if new_prompt : prompt = new_prompt # command has been started - now find prompt again. fret, match = self.pty_shell.find ([prompt], timeout=-1.0) # blocks if fret == None : # not find prompt after blocking? BAD! Restart the shell self.finalize (kill_pty=True) raise se.IncorrectState ("run_sync failed, no prompt (%s)" % command) ret, txt = self._eval_prompt (match, new_prompt) stdout = None stderr = None if iomode == None : iomode = STDOUT if iomode == IGNORE : pass if iomode == MERGED : stdout = txt if iomode == STDOUT : stdout = txt if iomode == SEPARATE or \ iomode == STDERR : stdout = txt self.pty_shell.write (" cat %s\n" % _err) fret, match = self.pty_shell.find ([self.prompt], timeout=-1.0) # blocks if fret == None : # not find prompt after blocking? BAD! Restart the shell self.finalize (kill_pty=True) raise se.IncorrectState ("run_sync failed, no prompt (%s)" \ % command) _ret, _stderr = self._eval_prompt (match) if _ret : raise se.IncorrectState ("run_sync failed, no stderr (%s: %s)" \ % (_ret, _stderr)) stderr = _stderr if iomode == STDERR : # got stderr in branch above stdout = None return (ret, stdout, stderr) except Exception as e : raise ptye.translate_exception (e)
def _check (self) : if not hasattr (self, '_adaptor') : raise se.IncorrectState ("object is not fully initialized")