def post(self, name, book=None, details=None): job_uuid = uuidutils.generate_uuid() posting = base.format_posting(job_uuid, name, created_on=timeutils.utcnow(), book=book, details=details) with _translate_failures(): sequence = self._client.incr(self.sequence_key) posting.update({ 'sequence': sequence, }) with _translate_failures(): raw_posting = self._dumps(posting) raw_job_uuid = six.b(job_uuid) was_posted = bool(self._client.hsetnx(self.listings_key, raw_job_uuid, raw_posting)) if not was_posted: raise exc.JobFailure("New job located at '%s[%s]' could not" " be posted" % (self.listings_key, raw_job_uuid)) else: return RedisJob(self, name, sequence, raw_job_uuid, uuid=job_uuid, details=details, created_on=posting['created_on'], book=book, book_data=posting.get('book'), backend=self._persistence)
def post(self, name, book=None, details=None, priority=base.JobPriority.NORMAL): # NOTE(harlowja): Jobs are not ephemeral, they will persist until they # are consumed (this may change later, but seems safer to do this until # further notice). job_priority = base.JobPriority.convert(priority) job_uuid = uuidutils.generate_uuid() job_posting = base.format_posting(job_uuid, name, book=book, details=details, priority=job_priority) raw_job_posting = misc.binary_encode(jsonutils.dumps(job_posting)) with self._wrap(job_uuid, None, fail_msg_tpl="Posting failure: %s", ensure_known=False): job_path = self._client.create(self._job_base, value=raw_job_posting, sequence=True, ephemeral=False) job = ZookeeperJob(self, name, self._client, job_path, backend=self._persistence, book=book, details=details, uuid=job_uuid, book_data=job_posting.get('book'), priority=job_priority) with self._job_cond: self._known_jobs[job_path] = job self._job_cond.notify_all() self._try_emit(base.POSTED, details={'job': job}) return job