def _update_atom_details(self, ad, txn, create_missing=False): # Determine whether the desired data exists or not. ad_path = paths.join(self.atom_path, ad.uuid) e_ad = None try: ad_data, _zstat = self._client.get(ad_path) except k_exc.NoNodeError: # Not-existent: create or raise exception. raise exc.NotFound("No atom details found with id: %s" % ad.uuid) else: # Existent: read it out. try: ad_data = misc.decode_json(ad_data) ad_cls = logbook.atom_detail_class(ad_data['type']) e_ad = ad_cls.from_dict(ad_data['atom']) except KeyError: pass # Update and write it back if e_ad: e_ad = e_ad.merge(ad) else: e_ad = ad ad_data = base._format_atom(e_ad) txn.set_data(ad_path, misc.binary_encode(jsonutils.dumps(ad_data))) return e_ad
def post(self, name, book=None, details=None, priority=base.JobPriority.NORMAL): # NOTE(harlowja): Jobs are not ephemeral, they will persist until they # are consumed (this may change later, but seems safer to do this until # further notice). job_priority = base.JobPriority.convert(priority) job_uuid = uuidutils.generate_uuid() job_posting = base.format_posting(job_uuid, name, book=book, details=details, priority=job_priority) raw_job_posting = misc.binary_encode(jsonutils.dumps(job_posting)) with self._wrap(job_uuid, None, fail_msg_tpl="Posting failure: %s", ensure_known=False): job_path = self._client.create(self._job_base, value=raw_job_posting, sequence=True, ephemeral=False) job = ZookeeperJob(self, name, self._client, job_path, backend=self._persistence, book=book, details=details, uuid=job_uuid, book_data=job_posting.get('book'), priority=job_priority) with self._job_cond: self._known_jobs[job_path] = job self._job_cond.notify_all() self._try_emit(base.POSTED, details={'job': job}) return job
def _update_flow_details(self, fd, txn, create_missing=False): # Determine whether the desired data exists or not fd_path = paths.join(self.flow_path, fd.uuid) try: fd_data, _zstat = self._client.get(fd_path) except k_exc.NoNodeError: # Not-existent: create or raise exception if create_missing: txn.create(fd_path) e_fd = logbook.FlowDetail(name=fd.name, uuid=fd.uuid) else: raise exc.NotFound("No flow details found with id: %s" % fd.uuid) else: # Existent: read it out e_fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data)) # Update and write it back e_fd = e_fd.merge(fd) fd_data = e_fd.to_dict() txn.set_data(fd_path, misc.binary_encode(jsonutils.dumps(fd_data))) for ad in fd: ad_path = paths.join(fd_path, ad.uuid) # NOTE(harlowja): create an entry in the flow detail path # for the provided atom detail so that a reference exists # from the flow detail to its atom details. if not self._client.exists(ad_path): txn.create(ad_path) e_fd.add(self._update_atom_details(ad, txn, create_missing=True)) return e_fd
def _update_atom_details(self, ad, txn, create_missing=False): # Determine whether the desired data exists or not. ad_path = paths.join(self.atom_path, ad.uuid) e_ad = None try: ad_data, _zstat = self._client.get(ad_path) except k_exc.NoNodeError: # Not-existent: create or raise exception. if not create_missing: raise exc.NotFound("No atom details found with" " id: %s" % ad.uuid) else: txn.create(ad_path) else: # Existent: read it out. try: ad_data = misc.decode_json(ad_data) ad_cls = logbook.atom_detail_class(ad_data['type']) e_ad = ad_cls.from_dict(ad_data['atom']) except KeyError: pass # Update and write it back if e_ad: e_ad = e_ad.merge(ad) else: e_ad = ad ad_data = base._format_atom(e_ad) txn.set_data(ad_path, misc.binary_encode(jsonutils.dumps(ad_data))) return e_ad
def register_entity(self, entity): entity_type = entity.kind if entity_type == c_base.Conductor.ENTITY_KIND: entity_path = k_paths.join(self.entity_path, entity_type) try: self._client.ensure_path(entity_path) self._client.create(k_paths.join(entity_path, entity.name), value=misc.binary_encode( jsonutils.dumps(entity.to_dict())), ephemeral=True) except k_exceptions.NodeExistsError: pass except self._client.handler.timeout_exception: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, operation" " timed out" % (entity.name, entity_path)) except k_exceptions.SessionExpiredError: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, session" " expired" % (entity.name, entity_path)) except k_exceptions.KazooException: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, internal" " error" % (entity.name, entity_path)) else: raise excp.NotImplementedError( "Not implemented for other entity type '%s'" % entity_type)
def _change_owner(self, new_owner): children = self.client.storage.get_children("/taskflow", only_direct=False) altered = 0 for p, data in six.iteritems(children): if p.endswith(".lock"): self.client.set(p, misc.binary_encode(jsonutils.dumps({"owner": new_owner}))) altered += 1 return altered
def _change_owner(self, new_owner): children = self.client.storage.get_children("/taskflow", only_direct=False) altered = 0 for p, data in six.iteritems(children): if p.endswith(".lock"): self.client.set(p, misc.binary_encode( jsonutils.dumps({'owner': new_owner}))) altered += 1 return altered
def _create_logbook(lb_path, txn): lb_data = p_utils.format_logbook(lb, created_at=None) txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(paths.join(lb_path, fd.uuid)) fd_path = paths.join(self.flow_path, fd.uuid) fd_data = jsonutils.dumps(p_utils.format_flow_detail(fd)) txn.create(fd_path, misc.binary_encode(fd_data)) for td in fd: # NOTE(harlowja): create an entry in the flow detail path # for the provided task detail so that a reference exists # from the flow detail to its task details. txn.create(paths.join(fd_path, td.uuid)) td_path = paths.join(self.task_path, td.uuid) td_data = jsonutils.dumps(p_utils.format_task_detail(td)) txn.create(td_path, misc.binary_encode(td_data)) return lb
def _create_logbook(lb_path, txn): lb_data = lb.to_dict(marshal_time=True) txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(paths.join(lb_path, fd.uuid)) fd_path = paths.join(self.flow_path, fd.uuid) fd_data = jsonutils.dumps(fd.to_dict()) txn.create(fd_path, misc.binary_encode(fd_data)) for ad in fd: # NOTE(harlowja): create an entry in the flow detail path # for the provided atom detail so that a reference exists # from the flow detail to its atom details. txn.create(paths.join(fd_path, ad.uuid)) ad_path = paths.join(self.atom_path, ad.uuid) ad_data = base._format_atom(ad) txn.create(ad_path, misc.binary_encode(jsonutils.dumps(ad_data))) return lb
def _format_job(self, job): posting = { 'uuid': job.uuid, 'name': job.name, } if job.details is not None: posting['details'] = job.details if job.book is not None: posting['book'] = { 'name': job.book.name, 'uuid': job.book.uuid, } return misc.binary_encode(jsonutils.dumps(posting))
def claim(self, job, who): def _unclaimable_try_find_owner(cause): try: owner = self.find_owner(job) except Exception: owner = None if owner: message = "Job %s already claimed by '%s'" % (job.uuid, owner) else: message = "Job %s already claimed" % (job.uuid) excp.raise_with_cause(excp.UnclaimableJob, message, cause=cause) with self._wrap(job.uuid, job.path, fail_msg_tpl="Claiming failure: %s"): # NOTE(harlowja): post as json which will allow for future changes # more easily than a raw string/text. value = jsonutils.dumps({ 'owner': who, }) # Ensure the target job is still existent (at the right version). job_data, job_stat = self._client.get(job.path) txn = self._client.transaction() # This will abort (and not create the lock) if the job has been # removed (somehow...) or updated by someone else to a different # version... txn.check(job.path, version=job_stat.version) txn.create(job.lock_path, value=misc.binary_encode(value), ephemeral=True) try: kazoo_utils.checked_commit(txn) except k_exceptions.NodeExistsError as e: _unclaimable_try_find_owner(e) except kazoo_utils.KazooTransactionException as e: if len(e.failures) < 2: raise else: if isinstance(e.failures[0], k_exceptions.NoNodeError): excp.raise_with_cause( excp.NotFound, "Job %s not found to be claimed" % job.uuid, cause=e.failures[0]) if isinstance(e.failures[1], k_exceptions.NodeExistsError): _unclaimable_try_find_owner(e.failures[1]) else: excp.raise_with_cause( excp.UnclaimableJob, "Job %s claim failed due to transaction" " not succeeding" % (job.uuid), cause=e)
def join(self, key_piece, *more_key_pieces): """Create and return a namespaced key from many segments. NOTE(harlowja): all pieces that are text/unicode are converted into their binary equivalent (if they are already binary no conversion takes place) before being joined (as redis expects binary keys and not unicode/text ones). """ namespace_pieces = [] if self._namespace is not None: namespace_pieces = [self._namespace, self.NAMESPACE_SEP] else: namespace_pieces = [] key_pieces = [key_piece] if more_key_pieces: key_pieces.extend(more_key_pieces) for i in compat_range(0, len(namespace_pieces)): namespace_pieces[i] = misc.binary_encode(namespace_pieces[i]) for i in compat_range(0, len(key_pieces)): key_pieces[i] = misc.binary_encode(key_pieces[i]) namespace = b"".join(namespace_pieces) key = self.KEY_PIECE_SEP.join(key_pieces) return namespace + key
def _update_logbook(lb_path, lb_data, txn): e_lb = p_utils.unformat_logbook(lb.uuid, misc.decode_json(lb_data)) e_lb = p_utils.logbook_merge(e_lb, lb) lb_data = p_utils.format_logbook(e_lb, created_at=lb.created_at) txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: fd_path = paths.join(lb_path, fd.uuid) if not self._client.exists(fd_path): # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(fd_path) e_fd = self._update_flow_details(fd, txn, create_missing=True) e_lb.add(e_fd) return e_lb
def register_entity(self, entity): entity_type = entity.kind if entity_type == 'conductor': entity_path = k_paths.join(self.entity_path, entity_type) self._client.ensure_path(entity_path) conductor_name = entity.name self._client.create(k_paths.join(entity_path, conductor_name), value=misc.binary_encode( jsonutils.dumps(entity.to_dict())), ephemeral=True) else: raise excp.NotImplementedError( "Not implemented for other entity type '%s'" % entity_type)
def _update_logbook(lb_path, lb_data, txn): e_lb = logbook.LogBook.from_dict(misc.decode_json(lb_data), unmarshal_time=True) e_lb = e_lb.merge(lb) lb_data = e_lb.to_dict(marshal_time=True) txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: fd_path = paths.join(lb_path, fd.uuid) if not self._client.exists(fd_path): # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(fd_path) e_fd = self._update_flow_details(fd, txn, create_missing=True) e_lb.add(e_fd) return e_lb
def post(self, name, book=None, details=None): def format_posting(job_uuid): posting = { 'uuid': job_uuid, 'name': name, } if details: posting['details'] = details else: posting['details'] = {} if book is not None: posting['book'] = { 'name': book.name, 'uuid': book.uuid, } return posting # NOTE(harlowja): Jobs are not ephemeral, they will persist until they # are consumed (this may change later, but seems safer to do this until # further notice). job_uuid = uuidutils.generate_uuid() with self._wrap(job_uuid, None, "Posting failure: %s", ensure_known=False): job_posting = format_posting(job_uuid) job_posting = misc.binary_encode(jsonutils.dumps(job_posting)) job_path = self._client.create(self._job_base, value=job_posting, sequence=True, ephemeral=False) job = ZookeeperJob(name, self, self._client, self._persistence, job_path, book=book, details=details, uuid=job_uuid) with self._job_cond: self._known_jobs[job_path] = job self._job_cond.notify_all() self._emit(base.POSTED, details={'job': job}) return job
def post(self, name, book, details=None): def format_posting(job_uuid): posting = { 'uuid': job_uuid, 'name': name, } if details: posting['details'] = details else: posting['details'] = {} if book is not None: posting['book'] = { 'name': book.name, 'uuid': book.uuid, } return posting # NOTE(harlowja): Jobs are not ephemeral, they will persist until they # are consumed (this may change later, but seems safer to do this until # further notice). job_uuid = uuidutils.generate_uuid() with self._wrap(job_uuid, None, "Posting failure: %s", ensure_known=False): job_posting = format_posting(job_uuid) job_posting = misc.binary_encode(jsonutils.dumps(job_posting)) job_path = self._client.create(self._job_base, value=job_posting, sequence=True, ephemeral=False) job = ZookeeperJob(name, self, self._client, self._persistence, job_path, book=book, details=details, uuid=job_uuid) self._job_cond.acquire() try: self._known_jobs[job_path] = job self._job_cond.notify_all() finally: self._job_cond.release() self._emit(jobboard.POSTED, details={'job': job}) return job
def test_posting_owner_lost(self): with base.connect_close(self.board): with base.flush(self.client): j = self.board.post('test', p_utils.temporary_log_book()) self.assertEqual(states.UNCLAIMED, j.state) with base.flush(self.client): self.board.claim(j, self.board.name) self.assertEqual(states.CLAIMED, j.state) # Forcefully delete the owner from the backend storage to make # sure the job becomes unclaimed (this may happen if some admin # manually deletes the lock). paths = list(six.iteritems(self.client.storage.paths)) for (path, value) in paths: if path in self.bad_paths: continue if path.endswith('lock'): value['data'] = misc.binary_encode(jsonutils.dumps({})) self.assertEqual(states.UNCLAIMED, j.state)
def trash(self, job, who): with self._wrap(job.uuid, job.path, "Trash failure: %s"): try: owner_data = self._get_owner_and_data(job) lock_data, lock_stat, data, data_stat = owner_data except k_exceptions.NoNodeError: excp.raise_with_cause(excp.JobFailure, "Can not trash a job %s" " which we can not determine" " the owner of" % (job.uuid)) if lock_data.get("owner") != who: raise excp.JobFailure("Can not trash a job %s" " which is not owned by %s" % (job.uuid, who)) trash_path = job.path.replace(self.path, self.trash_path) value = misc.binary_encode(jsonutils.dumps(data)) txn = self._client.transaction() txn.create(trash_path, value=value) txn.delete(job.lock_path, version=lock_stat.version) txn.delete(job.path, version=data_stat.version) kazoo_utils.checked_commit(txn)
def trash(self, job, who): with self._wrap(job.uuid, job.path, fail_msg_tpl="Trash failure: %s"): try: owner_data = self._get_owner_and_data(job) lock_data, lock_stat, data, data_stat = owner_data except k_exceptions.NoNodeError: excp.raise_with_cause( excp.NotFound, "Can not trash a job %s" " which we can not determine" " the owner of" % (job.uuid)) if lock_data.get("owner") != who: raise excp.JobFailure("Can not trash a job %s" " which is not owned by %s" % (job.uuid, who)) trash_path = job.path.replace(self.path, self.trash_path) value = misc.binary_encode(jsonutils.dumps(data)) txn = self._client.transaction() txn.create(trash_path, value=value) txn.delete(job.lock_path, version=lock_stat.version) txn.delete(job.path, version=data_stat.version) kazoo_utils.checked_commit(txn)
def claim(self, job, who): _check_who(who) with self._wrap(job.uuid, job.path, "Claiming failure: %s"): # NOTE(harlowja): post as json which will allow for future changes # more easily than a raw string/text. value = jsonutils.dumps({ 'owner': who, }) try: self._client.create(job.lock_path, value=misc.binary_encode(value), ephemeral=True) except k_exceptions.NodeExistsException: # Try to see if we can find who the owner really is... try: owner = self.find_owner(job) except Exception: owner = None if owner: msg = "Job %s already claimed by '%s'" % (job.uuid, owner) else: msg = "Job %s already claimed" % (job.uuid) raise excp.UnclaimableJob(msg)
def _update_task_details(self, td, txn, create_missing=False): # Determine whether the desired data exists or not. td_path = paths.join(self.task_path, td.uuid) try: td_data, _zstat = self._client.get(td_path) except k_exc.NoNodeError: # Not-existent: create or raise exception. if create_missing: txn.create(td_path) e_td = logbook.TaskDetail(name=td.name, uuid=td.uuid) else: raise exc.NotFound("No task details found with id: %s" % td.uuid) else: # Existent: read it out. e_td = p_utils.unformat_task_detail(td.uuid, misc.decode_json(td_data)) # Update and write it back e_td = p_utils.task_details_merge(e_td, td) td_data = p_utils.format_task_detail(e_td) txn.set_data(td_path, misc.binary_encode(jsonutils.dumps(td_data))) return e_td
def claim(self, job, who): _check_who(who) job_path, lock_path = _get_paths(self.path, job.uuid) with self._wrap(job.uuid, "Claiming failure: %s"): # NOTE(harlowja): post as json which will allow for future changes # more easily than a raw string/text. value = jsonutils.dumps({ 'owner': who, }) try: self._client.create(lock_path, value=misc.binary_encode(value), ephemeral=True) except k_exceptions.NodeExistsException: # Try to see if we can find who the owner really is... try: owner = self.find_owner(job) except Exception: owner = None if owner: msg = "Job %s already claimed by '%s'" % (job.uuid, owner) else: msg = "Job %s already claimed" % (job.uuid) raise excp.UnclaimableJob(msg)
def _set_item(self, path, value, transaction): data = misc.binary_encode(jsonutils.dumps(value)) if not self._client.exists(path): transaction.create(path, data) else: transaction.set_data(path, data)
def test_unicode_other_encoding(self): result = misc.binary_encode(u'mañana', 'latin-1') self.assertIsInstance(result, six.binary_type) self.assertEqual(result, u'mañana'.encode('latin-1'))
def _check(self, data, expected_result): result = misc.binary_encode(data) self.assertIsInstance(result, six.binary_type) self.assertEqual(result, expected_result)
def _write_to(self, filename, contents): contents = misc.binary_encode(contents, encoding=self.backend.encoding) with open(filename, 'wb') as fp: fp.write(contents) self.backend.file_cache.pop(filename, None)
def test_unicode_other_encoding(self): result = misc.binary_encode(u"mañana", "latin-1") self.assertIsInstance(result, six.binary_type) self.assertEqual(result, u"mañana".encode("latin-1"))