Ejemplo n.º 1
0
 def trash(self, job, who):
     script = self._get_script('trash')
     with _translate_failures():
         raw_who = self._encode_owner(who)
         raw_result = script(keys=[job.owner_key, self.listings_key,
                                   job.last_modified_key, self.trash_key],
                             args=[raw_who, job.key,
                                   self._dumps(timeutils.utcnow())])
         result = self._loads(raw_result)
     status = result['status']
     if status != self.SCRIPT_STATUS_OK:
         reason = result.get('reason')
         if reason == self.SCRIPT_UNKNOWN_JOB:
             raise exc.NotFound("Job %s not found to be"
                                " trashed" % (job.uuid))
         elif reason == self.SCRIPT_UNKNOWN_OWNER:
             raise exc.NotFound("Can not trash job %s"
                                " which we can not determine"
                                " the owner of" % (job.uuid))
         elif reason == self.SCRIPT_NOT_EXPECTED_OWNER:
             raw_owner = result.get('owner')
             if raw_owner:
                 owner = self._decode_owner(raw_owner)
                 raise exc.JobFailure("Can not trash job %s"
                                      " which is not owned by %s (it is"
                                      " actively owned by %s)"
                                      % (job.uuid, who, owner))
             else:
                 raise exc.JobFailure("Can not trash job %s"
                                      " which is not owned by %s"
                                      % (job.uuid, who))
         else:
             raise exc.JobFailure("Failure to trash job %s,"
                                  " unknown internal error (reason=%s)"
                                  % (job.uuid, reason))
Ejemplo n.º 2
0
    def connect(self):
        self.close()
        if self._owns_client:
            self._client = self._make_client(self._conf)
        with _translate_failures():
            # The client maintains a connection pool, so do a ping and
            # if that works then assume the connection works, which may or
            # may not be continuously maintained (if the server dies
            # at a later time, we will become aware of that when the next
            # op occurs).
            self._client.ping()
            is_new_enough, redis_version = ru.is_server_new_enough(
                self._client, self.MIN_REDIS_VERSION)
            if not is_new_enough:
                wanted_version = ".".join([str(p)
                                           for p in self.MIN_REDIS_VERSION])
                if redis_version:
                    raise exc.JobFailure("Redis version %s or greater is"
                                         " required (version %s is to"
                                         " old)" % (wanted_version,
                                                    redis_version))
                else:
                    raise exc.JobFailure("Redis version %s or greater is"
                                         " required" % (wanted_version))
            else:
                self._redis_version = redis_version
                script_params = {
                    # Status field values.
                    'ok': self.SCRIPT_STATUS_OK,
                    'error': self.SCRIPT_STATUS_ERROR,

                    # Known error reasons (when status field is error).
                    'not_expected_owner': self.SCRIPT_NOT_EXPECTED_OWNER,
                    'unknown_owner': self.SCRIPT_UNKNOWN_OWNER,
                    'unknown_job': self.SCRIPT_UNKNOWN_JOB,
                    'already_claimed': self.SCRIPT_ALREADY_CLAIMED,
                }
                prepared_scripts = {}
                for n, raw_script_tpl in six.iteritems(self.SCRIPT_TEMPLATES):
                    script_tpl = string.Template(raw_script_tpl)
                    script_blob = script_tpl.substitute(**script_params)
                    script = self._client.register_script(script_blob)
                    prepared_scripts[n] = script
                self._scripts.update(prepared_scripts)
                self._closed = False
Ejemplo n.º 3
0
    def post_scheduled(self,
                       schedule,
                       name,
                       flow_factory,
                       factory_args=None,
                       factory_kwargs=None,
                       store=None,
                       priority=JobPriority.NORMAL):
        """Post a scheduled job.

        This takes a crontab-like schedule and will run the job on that
        schedule. Each time a job is finished, it will be re-posted at the next
        start time for that schedule.
        """
        try:
            delay = _next_scheduled_delay(schedule)
        except ValueError:
            raise excp.JobFailure(
                "Schedule '%s' for scheduled job is not "
                "valid", schedule)
        else:
            if delay is None:
                raise excp.JobFailure(
                    "Schedule '%s' for scheduled job does "
                    "not include any future times", schedule)

        post_data = self._prep_job_posting(
            name,
            {
                'schedule': schedule,
                'run_at': int(time.time() + delay)
            },
            flow_factory,
            factory_args=factory_args,
            factory_kwargs=factory_kwargs,
            store=store,
            priority=priority,
        )
        return self._do_job_posting(*post_data)
Ejemplo n.º 4
0
 def claim(self, job, who, expiry=None):
     if expiry is None:
         # On the lua side none doesn't translate to nil so we have
         # do to this string conversion to make sure that we can tell
         # the difference.
         ms_expiry = "none"
     else:
         ms_expiry = int(expiry * 1000.0)
         if ms_expiry <= 0:
             raise ValueError("Provided expiry (when converted to"
                              " milliseconds) must be greater"
                              " than zero instead of %s" % (expiry))
     script = self._get_script('claim')
     with _translate_failures():
         raw_who = self._encode_owner(who)
         raw_result = script(keys=[job.owner_key, self.listings_key,
                                   job.last_modified_key],
                             args=[raw_who, job.key,
                                   # NOTE(harlowja): we need to send this
                                   # in as a blob (even if it's not
                                   # set/used), since the format can not
                                   # currently be created in lua...
                                   self._dumps(timeutils.utcnow()),
                                   ms_expiry])
         result = self._loads(raw_result)
     status = result['status']
     if status != self.SCRIPT_STATUS_OK:
         reason = result.get('reason')
         if reason == self.SCRIPT_UNKNOWN_JOB:
             raise exc.NotFound("Job %s not found to be"
                                " claimed" % (job.uuid))
         elif reason == self.SCRIPT_ALREADY_CLAIMED:
             raw_owner = result.get('owner')
             if raw_owner:
                 owner = self._decode_owner(raw_owner)
                 raise exc.UnclaimableJob("Job %s already"
                                          " claimed by %s"
                                          % (job.uuid, owner))
             else:
                 raise exc.UnclaimableJob("Job %s already"
                                          " claimed" % (job.uuid))
         else:
             raise exc.JobFailure("Failure to claim job %s,"
                                  " unknown internal error (reason=%s)"
                                  % (job.uuid, reason))
Ejemplo n.º 5
0
 def abandon(self, job, who):
     with self._wrap(job.uuid,
                     job.path,
                     fail_msg_tpl="Abandonment failure: %s"):
         try:
             owner_data = self._get_owner_and_data(job)
             lock_data, lock_stat, data, data_stat = owner_data
         except k_exceptions.NoNodeError:
             excp.raise_with_cause(
                 excp.NotFound, "Can not abandon a job %s"
                 " which we can not determine"
                 " the owner of" % (job.uuid))
         if lock_data.get("owner") != who:
             raise excp.JobFailure("Can not abandon a job %s"
                                   " which is not owned by %s" %
                                   (job.uuid, who))
         txn = self._client.transaction()
         txn.delete(job.lock_path, version=lock_stat.version)
         kazoo_utils.checked_commit(txn)
Ejemplo n.º 6
0
 def trash(self, job, who):
     with self._wrap(job.uuid, job.path, fail_msg_tpl="Trash failure: %s"):
         try:
             owner_data = self._get_owner_and_data(job)
             lock_data, lock_stat, data, data_stat = owner_data
         except k_exceptions.NoNodeError:
             excp.raise_with_cause(
                 excp.NotFound, "Can not trash a job %s"
                 " which we can not determine"
                 " the owner of" % (job.uuid))
         if lock_data.get("owner") != who:
             raise excp.JobFailure("Can not trash a job %s"
                                   " which is not owned by %s" %
                                   (job.uuid, who))
         trash_path = job.path.replace(self.path, self.trash_path)
         value = misc.binary_encode(zag_json.dumps(data))
         txn = self._client.transaction()
         txn.create(trash_path, value=value)
         txn.delete(job.lock_path, version=lock_stat.version)
         txn.delete(job.path, version=data_stat.version)
         kazoo_utils.checked_commit(txn)
Ejemplo n.º 7
0
 def _do_job_posting(self, name, job_uuid, job_posting, book, details,
                     job_priority):
     with _translate_failures():
         sequence = self._client.incr(self.sequence_key)
         job_posting.update({
             'sequence': sequence,
         })
     with _translate_failures():
         raw_posting = self._dumps(job_posting)
         raw_job_uuid = six.b(job_uuid)
         was_posted = bool(self._client.hsetnx(self.listings_key,
                                               raw_job_uuid, raw_posting))
         if not was_posted:
             raise exc.JobFailure("New job located at '%s[%s]' could not"
                                  " be posted" % (self.listings_key,
                                                  raw_job_uuid))
         else:
             return RedisJob(self, name, sequence, raw_job_uuid,
                             uuid=job_uuid, details=details,
                             created_on=job_posting['created_on'],
                             book=book, book_data=job_posting.get('book'),
                             backend=self._persistence,
                             priority=job_priority)