def _update_flow_details(self, fd, txn, create_missing=False): # Determine whether the desired data exists or not fd_path = paths.join(self.flow_path, fd.uuid) try: fd_data, _zstat = self._client.get(fd_path) except k_exc.NoNodeError: # Not-existent: create or raise exception if create_missing: txn.create(fd_path) e_fd = logbook.FlowDetail(name=fd.name, uuid=fd.uuid) else: raise exc.NotFound("No flow details found with id: %s" % fd.uuid) else: # Existent: read it out e_fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data)) # Update and write it back e_fd = e_fd.merge(fd) fd_data = e_fd.to_dict() txn.set_data(fd_path, misc.binary_encode(jsonutils.dumps(fd_data))) for ad in fd: ad_path = paths.join(fd_path, ad.uuid) # NOTE(harlowja): create an entry in the flow detail path # for the provided atom detail so that a reference exists # from the flow detail to its atom details. if not self._client.exists(ad_path): txn.create(ad_path) e_fd.add(self._update_atom_details(ad, txn, create_missing=True)) return e_fd
def register_entity(self, entity): entity_type = entity.kind if entity_type == c_base.Conductor.ENTITY_KIND: entity_path = k_paths.join(self.entity_path, entity_type) try: self._client.ensure_path(entity_path) self._client.create(k_paths.join(entity_path, entity.name), value=misc.binary_encode( jsonutils.dumps(entity.to_dict())), ephemeral=True) except k_exceptions.NodeExistsError: pass except self._client.handler.timeout_exception: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, operation" " timed out" % (entity.name, entity_path)) except k_exceptions.SessionExpiredError: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, session" " expired" % (entity.name, entity_path)) except k_exceptions.KazooException: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, internal" " error" % (entity.name, entity_path)) else: raise excp.NotImplementedError( "Not implemented for other entity type '%s'" % entity_type)
def getRack(): try: def watcher(event): logger.info("/rack children changed, need update memory") getRack() zk_client.get('/rack', watcher) children = zk_client.get_children('/rack') for child in children: rack_name = child.encode('utf-8') RACK_STORE[rack_name] = [] path1 = join('/rack', rack_name) children1 = zk_client.get_children(path1) for child1 in children1: store_id = child1.encode('utf-8') RACK_STORE[rack_name].append(store_id) path2 = join(path1, store_id) data, stat = zk_client.get(path2) if data: parsed_data = json.loads(data) ip = parsed_data['stat'].split(':')[0].encode('utf-8') STORE_TO_IP[store_id] = ip IP_TO_STORE[ip] = store_id STORE_RACK[store_id] = rack_name STORE_INFO[FREE_VOLUME_KEY+store_id] = -1 STORE_INFO[VOLUME_KEY+store_id] = 0 else: logger.warn("getRack() called zk data is None path: %s", path2) return False return True except Exception as ex: logger.error("getRack() called error: %s", str(ex)) return False
def __next__(self): # FIXME: need children watcher if self._last is not None: self._children.pop(0) self._last = None while True: if len(self._children) == 0: try: self._children = self._client.zk.retry(self._client.zk.get_children, self._path) # FIXME: retry? except kazoo.exceptions.NoNodeError: raise NoNodeError self._children = list(sorted(self._children)) if len(self._children) == 0: raise StopIteration name = self._children[0] path = join(self._path, name) try: self._client.zk.create(join(path, "__lock__"), ephemeral=True) except (kazoo.exceptions.NoNodeError, kazoo.exceptions.NodeExistsError): self._children.pop(0) # FIXME: need a watcher continue self._last = name return _decode_value(self._client.zk.get(path)[0])
def create_or_update_limit(self, for_who, resource, limit, kind='upper_bound'): processor = self.processors.get(kind) if not processor: raise exceptions.UnsupportedKind( "Unsupported kind '%s' requested" " for resource '%s' owned by '%s'" % (kind, resource, for_who)) who_path = paths.join(self.uri.path, for_who) self.client.ensure_path(who_path) resource_path = paths.join(who_path, resource) try: self.client.create(resource_path, json.dumps({ 'kind': kind, 'details': processor.create(limit), })) except kazoo_exceptions.NodeExistsError: blob, znode = self.client.get(resource_path) stored = json.loads(blob) if stored['kind'] != kind: raise exceptions.UnsupportedKind( "Can only update limits of the same" " kind, %s != %s" % (kind, stored['kind'])) else: stored['details'] = processor.update(stored['details'], limit) # Ensure we pass in the version that we read this on so # that if it was changed by some other actor that we can # avoid overwriting that value (and retry, or handle in some # other manner). self.client.set(resource_path, json.dumps(stored), version=znode.version)
def clear_all(self, delete_dirs=True): """Delete all data transactionally.""" with self._exc_wrapper(): txn = self._client.transaction() # Delete all data under logbook path. for lb_uuid in self._client.get_children(self.book_path): lb_path = paths.join(self.book_path, lb_uuid) for fd_uuid in self._client.get_children(lb_path): txn.delete(paths.join(lb_path, fd_uuid)) txn.delete(lb_path) # Delete all data under flow detail path. for fd_uuid in self._client.get_children(self.flow_path): fd_path = paths.join(self.flow_path, fd_uuid) for ad_uuid in self._client.get_children(fd_path): txn.delete(paths.join(fd_path, ad_uuid)) txn.delete(fd_path) # Delete all data under atom detail path. for ad_uuid in self._client.get_children(self.atom_path): ad_path = paths.join(self.atom_path, ad_uuid) txn.delete(ad_path) # Delete containing directories. if delete_dirs: txn.delete(self.book_path) txn.delete(self.atom_path) txn.delete(self.flow_path) k_utils.checked_commit(txn)
def clear_all(self, delete_dirs=True): """Delete all data transactioanlly.""" with self._exc_wrapper(): with self._client.transaction() as txn: # Delete all data under logbook path. for lb_uuid in self._client.get_children(self.book_path): lb_path = paths.join(self.book_path, lb_uuid) for fd_uuid in self._client.get_children(lb_path): txn.delete(paths.join(lb_path, fd_uuid)) txn.delete(lb_path) # Delete all data under flowdetail path. for fd_uuid in self._client.get_children(self.flow_path): fd_path = paths.join(self.flow_path, fd_uuid) for td_uuid in self._client.get_children(fd_path): txn.delete(paths.join(fd_path, td_uuid)) txn.delete(fd_path) # Delete all data under taskdetail path. for td_uuid in self._client.get_children(self.task_path): td_path = paths.join(self.task_path, td_uuid) txn.delete(td_path) # Delete containing directories. if delete_dirs: txn.delete(self.book_path) txn.delete(self.task_path) txn.delete(self.flow_path)
def register_entity(self, entity): entity_type = entity.kind if entity_type == c_base.Conductor.ENTITY_KIND: entity_path = k_paths.join(self.entity_path, entity_type) try: self._client.ensure_path(entity_path) self._client.create(k_paths.join(entity_path, entity.name), value=misc.binary_encode( zag_json.dumps(entity.to_dict())), ephemeral=True) except k_exceptions.NodeExistsError: pass except self._client.handler.timeout_exception: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, operation" " timed out" % (entity.name, entity_path)) except k_exceptions.SessionExpiredError: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, session" " expired" % (entity.name, entity_path)) except k_exceptions.KazooException: excp.raise_with_cause( excp.JobFailure, "Can not register entity %s under %s, internal" " error" % (entity.name, entity_path)) else: raise excp.NotImplementedError( "Not implemented for other entity type '%s'" % entity_type)
def clear_all(self, delete_dirs=True): """Delete all data transactionally.""" with self._exc_wrapper(): with self._client.transaction() as txn: # Delete all data under logbook path. for lb_uuid in self._client.get_children(self.book_path): lb_path = paths.join(self.book_path, lb_uuid) for fd_uuid in self._client.get_children(lb_path): txn.delete(paths.join(lb_path, fd_uuid)) txn.delete(lb_path) # Delete all data under flow detail path. for fd_uuid in self._client.get_children(self.flow_path): fd_path = paths.join(self.flow_path, fd_uuid) for ad_uuid in self._client.get_children(fd_path): txn.delete(paths.join(fd_path, ad_uuid)) txn.delete(fd_path) # Delete all data under atom detail path. for ad_uuid in self._client.get_children(self.atom_path): ad_path = paths.join(self.atom_path, ad_uuid) txn.delete(ad_path) # Delete containing directories. if delete_dirs: txn.delete(self.book_path) txn.delete(self.atom_path) txn.delete(self.flow_path)
def _destroy_logbook(lb_uuid, txn): lb_path = paths.join(self.book_path, lb_uuid) if not self._client.exists(lb_path): raise exc.NotFound("No logbook found with id: %s" % lb_uuid) for fd_uuid in self._client.get_children(lb_path): _destroy_flow_details(fd_uuid, txn) txn.delete(paths.join(lb_path, fd_uuid)) txn.delete(lb_path)
def _destroy_flow_details(fd_uuid, txn): fd_path = paths.join(self.flow_path, fd_uuid) if not self._client.exists(fd_path): raise exc.NotFound("No flow details found with id: %s" % fd_uuid) for ad_uuid in self._client.get_children(fd_path): _destroy_atom_details(ad_uuid, txn) txn.delete(paths.join(fd_path, ad_uuid)) txn.delete(fd_path)
def __init__(self, backend, client): self._backend = backend self._client = client self._book_path = paths.join(self._backend.path, "books") self._flow_path = paths.join(self._backend.path, "flow_details") self._atom_path = paths.join(self._backend.path, "atom_details") with self._exc_wrapper(): # NOOP if already started. self._client.start()
def consume(self, for_who, resource, amount): who_path = paths.join(self.uri.path, for_who) resource_path = paths.join(who_path, resource) blob, znode = self.client.get(resource_path) new_stored = self._try_consume( for_who, resource, json.loads(blob), amount) # Ensure we pass in the version that we read this on so # that if it was changed by some other actor that we can # avoid overwriting that value (and retry, or handle in some # other manner). self.client.set(resource_path, json.dumps(new_stored), version=znode.version)
def register_entity(self, entity): entity_type = entity.kind if entity_type == 'conductor': entity_path = k_paths.join(self.entity_path, entity_type) self._client.ensure_path(entity_path) conductor_name = entity.name self._client.create(k_paths.join(entity_path, conductor_name), value=misc.binary_encode( jsonutils.dumps(entity.to_dict())), ephemeral=True) else: raise excp.NotImplementedError( "Not implemented for other entity type '%s'" % entity_type)
def __init__(self, name, conf, client=None, persistence=None, emit_notifications=True): super(ZookeeperJobBoard, self).__init__(name, conf) if client is not None: self._client = client self._owned = False else: self._client = kazoo_utils.make_client(self._conf) self._owned = True path = str(conf.get("path", self.DEFAULT_PATH)) if not path: raise ValueError("Empty zookeeper path is disallowed") if not k_paths.isabs(path): raise ValueError("Zookeeper path must be absolute") self._path = path self._trash_path = self._path.replace(k_paths.basename(self._path), self.TRASH_FOLDER) # The backend to load the full logbooks from, since what is sent over # the data connection is only the logbook uuid and name, and not the # full logbook. self._persistence = persistence # Misc. internal details self._known_jobs = {} self._job_cond = threading.Condition() self._open_close_lock = threading.RLock() self._client.add_listener(self._state_change_listener) self._bad_paths = frozenset([path]) self._job_watcher = None # Since we use sequenced ids this will be the path that the sequences # are prefixed with, for example, job0000000001, job0000000002, ... self._job_base = k_paths.join(path, self.JOB_PREFIX) self._worker = None self._emit_notifications = bool(emit_notifications) self._connected = False
def increment(self): with self._client.zk.Lock(join(self._path, "__lock__")): old = self.get() new = old + 1 self._client.zk.set(self._path, _encode_value(new)) get_logger().debug("Value changed: %d -> %d", old, new, comment=self._path) return old
def _on_job_posting(self, children): LOG.debug("Got children %s under path %s", children, self.path) child_paths = [k_paths.join(self.path, c) for c in children] # Remove jobs that we know about but which are no longer children with self._job_mutate: removals = set() for path, (_job, posting_state) in six.iteritems(self._known_jobs): if posting_state != _READY: continue if path not in child_paths: removals.add(path) for path in removals: self._remove_job(path) # Ensure that we have a job record for each new job that has appeared for path in child_paths: if path in self._bad_paths: continue with self._job_mutate: if path not in self._known_jobs: # Fire off the request to populate this job asynchronously. # # This method is called from a asynchronous handler so it's # better to exit from this quickly to allow other # asynchronous handlers to be executed. func = functools.partial(self._process_child, path=path) result = self._client.get_async(path) result.rawlink(func)
def _update_atom_details(self, ad, txn, create_missing=False): # Determine whether the desired data exists or not. ad_path = paths.join(self.atom_path, ad.uuid) e_ad = None try: ad_data, _zstat = self._client.get(ad_path) except k_exc.NoNodeError: # Not-existent: create or raise exception. if not create_missing: raise exc.NotFound("No atom details found with" " id: %s" % ad.uuid) else: txn.create(ad_path) else: # Existent: read it out. try: ad_data = misc.decode_json(ad_data) ad_cls = logbook.atom_detail_class(ad_data['type']) e_ad = ad_cls.from_dict(ad_data['atom']) except KeyError: pass # Update and write it back if e_ad: e_ad = e_ad.merge(ad) else: e_ad = ad ad_data = base._format_atom(e_ad) txn.set_data(ad_path, misc.binary_encode(jsonutils.dumps(ad_data))) return e_ad
def _update_atom_details(self, ad, txn, create_missing=False): # Determine whether the desired data exists or not. ad_path = paths.join(self.atom_path, ad.uuid) e_ad = None try: ad_data, _zstat = self._client.get(ad_path) except k_exc.NoNodeError: # Not-existent: create or raise exception. raise exc.NotFound("No atom details found with id: %s" % ad.uuid) else: # Existent: read it out. try: ad_data = misc.decode_json(ad_data) ad_cls = logbook.atom_detail_class(ad_data['type']) e_ad = ad_cls.from_dict(ad_data['atom']) except KeyError: pass # Update and write it back if e_ad: e_ad = e_ad.merge(ad) else: e_ad = ad ad_data = base._format_atom(e_ad) txn.set_data(ad_path, misc.binary_encode(jsonutils.dumps(ad_data))) return e_ad
def __init__(self, name, conf, client=None, persistence=None, emit_notifications=True): super(ZookeeperJobBoard, self).__init__(name, conf) if client is not None: self._client = client self._owned = False else: self._client = kazoo_utils.make_client(self._conf) self._owned = True path = str(conf.get("path", "/taskflow/jobs")) if not path: raise ValueError("Empty zookeeper path is disallowed") if not k_paths.isabs(path): raise ValueError("Zookeeper path must be absolute") self._path = path # The backend to load the full logbooks from, since whats sent over # the zookeeper data connection is only the logbook uuid and name, and # not currently the full logbook (later when a zookeeper backend # appears we can likely optimize for that backend usage by directly # reading from the path where the data is stored, if we want). self._persistence = persistence # Misc. internal details self._known_jobs = {} self._job_lock = threading.RLock() self._job_cond = threading.Condition(self._job_lock) self._open_close_lock = threading.RLock() self._client.add_listener(self._state_change_listener) self._bad_paths = frozenset([path]) self._job_watcher = None # Since we use sequenced ids this will be the path that the sequences # are prefixed with, for example, job0000000001, job0000000002, ... self._job_base = k_paths.join(path, JOB_PREFIX) self._worker = None self._emit_notifications = bool(emit_notifications)
def test_register_entity(self): conductor_name = "conductor-abc@localhost:4123" entity_instance = entity.Entity("conductor", conductor_name, {}) with base.connect_close(self.board): self.board.register_entity(entity_instance) # Check '.entity' node has been created self.assertTrue(self.board.entity_path in self.client.storage.paths) conductor_entity_path = k_paths.join(self.board.entity_path, 'conductor', conductor_name) self.assertTrue(conductor_entity_path in self.client.storage.paths) conductor_data = ( self.client.storage.paths[conductor_entity_path]['data']) self.assertTrue(len(conductor_data) > 0) self.assertDictEqual({ 'name': conductor_name, 'kind': 'conductor', 'metadata': {}, }, jsonutils.loads(misc.binary_decode(conductor_data))) entity_instance_2 = entity.Entity("non-sense", "other_name", {}) with base.connect_close(self.board): self.assertRaises(excp.NotImplementedError, self.board.register_entity, entity_instance_2)
def test_register_entity(self): conductor_name = "conductor-abc@localhost:4123" entity_instance = entity.Entity("conductor", conductor_name, {}) with base.connect_close(self.board): self.board.register_entity(entity_instance) # Check '.entity' node has been created self.assertTrue(self.board.entity_path in self.client.storage.paths) conductor_entity_path = k_paths.join(self.board.entity_path, 'conductor', conductor_name) self.assertTrue(conductor_entity_path in self.client.storage.paths) conductor_data = ( self.client.storage.paths[conductor_entity_path]['data']) self.assertTrue(len(conductor_data) > 0) self.assertDictEqual( { 'name': conductor_name, 'kind': 'conductor', 'metadata': {}, }, zag_json.loads(misc.binary_decode(conductor_data))) entity_instance_2 = entity.Entity("non-sense", "other_name", {}) with base.connect_close(self.board): self.assertRaises(excp.NotImplementedError, self.board.register_entity, entity_instance_2)
def _on_job_posting(self, children, delayed=True): LOG.debug("Got children %s under path %s", children, self.path) child_paths = [] for c in children: if c.endswith(LOCK_POSTFIX) or not c.startswith(JOB_PREFIX): # Skip lock paths or non-job-paths (these are not valid jobs) continue child_paths.append(k_paths.join(self.path, c)) # Remove jobs that we know about but which are no longer children with self._job_lock: removals = set() for path, _job in six.iteritems(self._known_jobs): if path not in child_paths: removals.add(path) for path in removals: self._remove_job(path) # Ensure that we have a job record for each new job that has appeared for path in child_paths: if path in self._bad_paths: continue with self._job_lock: if path not in self._known_jobs: # Fire off the request to populate this job asynchronously. # # This method is *usually* called from a asynchronous # handler so it's better to exit from this quickly to # allow other asynchronous handlers to be executed. request = self._client.get_async(path) child_proc = functools.partial(self._process_child, path) if delayed: request.rawlink(child_proc) else: child_proc(request)
def consume_many(self, for_who, resources, amounts): who_path = paths.join(self.uri.path, for_who) values_to_save = [] for resource, amount in zip(resources, amounts): resource_path = paths.join(who_path, resource) blob, znode = self.client.get(resource_path) new_stored = self._try_consume(for_who, resource, json.loads(blob), amount) values_to_save.append((resource_path, json.dumps(new_stored), znode.version)) # Commit all changes at once, so that we can ensure that all the # changes will happen, or none will... if values_to_save: with self.client.transaction() as txn: for path, value, version in values_to_save: txn.set_data(path, value, version=version)
def paths_join(*args): lpaths = [] for arg in args: if isinstance(arg, six.binary_type): lpaths.append(arg.decode('ascii')) else: lpaths.append(arg) return paths.join(*lpaths)
def _get_task_details(self, td_uuid): td_path = paths.join(self.task_path, td_uuid) try: td_data, _zstat = self._client.get(td_path) except k_exc.NoNodeError: raise exc.NotFound("No task details found with id: %s" % td_uuid) else: return p_utils.unformat_task_detail(td_uuid, misc.decode_json(td_data))
def save_logbook(self, lb): """Save (update) a log_book transactionally.""" def _create_logbook(lb_path, txn): lb_data = lb.to_dict(marshal_time=True) txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(paths.join(lb_path, fd.uuid)) fd_path = paths.join(self.flow_path, fd.uuid) fd_data = jsonutils.dumps(fd.to_dict()) txn.create(fd_path, misc.binary_encode(fd_data)) for ad in fd: # NOTE(harlowja): create an entry in the flow detail path # for the provided atom detail so that a reference exists # from the flow detail to its atom details. txn.create(paths.join(fd_path, ad.uuid)) ad_path = paths.join(self.atom_path, ad.uuid) ad_data = base._format_atom(ad) txn.create(ad_path, misc.binary_encode(jsonutils.dumps(ad_data))) return lb def _update_logbook(lb_path, lb_data, txn): e_lb = logbook.LogBook.from_dict(misc.decode_json(lb_data), unmarshal_time=True) e_lb = e_lb.merge(lb) lb_data = e_lb.to_dict(marshal_time=True) txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: fd_path = paths.join(lb_path, fd.uuid) if not self._client.exists(fd_path): # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(fd_path) e_fd = self._update_flow_details(fd, txn, create_missing=True) e_lb.add(e_fd) return e_lb with self._exc_wrapper(): with self._client.transaction() as txn: # Determine whether the desired data exists or not. lb_path = paths.join(self.book_path, lb.uuid) try: lb_data, _zstat = self._client.get(lb_path) except k_exc.NoNodeError: # Create a new logbook since it doesn't exist. e_lb = _create_logbook(lb_path, txn) else: # Otherwise update the existing logbook instead. e_lb = _update_logbook(lb_path, lb_data, txn) # Finally return (updated) logbook. return e_lb
def _get_atom_details(self, ad_uuid): ad_path = paths.join(self.atom_path, ad_uuid) try: ad_data, _zstat = self._client.get(ad_path) except k_exc.NoNodeError: raise exc.NotFound("No atom details found with id: %s" % ad_uuid) else: ad_data = misc.decode_json(ad_data) ad_cls = logbook.atom_detail_class(ad_data['type']) return ad_cls.from_dict(ad_data['atom'])
def save_logbook(self, lb): """Save (update) a log_book transactionally.""" def _create_logbook(lb_path, txn): lb_data = lb.to_dict(marshal_time=True) txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(paths.join(lb_path, fd.uuid)) fd_path = paths.join(self.flow_path, fd.uuid) fd_data = jsonutils.dumps(fd.to_dict()) txn.create(fd_path, misc.binary_encode(fd_data)) for ad in fd: # NOTE(harlowja): create an entry in the flow detail path # for the provided atom detail so that a reference exists # from the flow detail to its atom details. txn.create(paths.join(fd_path, ad.uuid)) ad_path = paths.join(self.atom_path, ad.uuid) ad_data = base._format_atom(ad) txn.create(ad_path, misc.binary_encode(jsonutils.dumps(ad_data))) return lb def _update_logbook(lb_path, lb_data, txn): e_lb = logbook.LogBook.from_dict(misc.decode_json(lb_data), unmarshal_time=True) e_lb = e_lb.merge(lb) lb_data = e_lb.to_dict(marshal_time=True) txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: fd_path = paths.join(lb_path, fd.uuid) if not self._client.exists(fd_path): # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(fd_path) e_fd = self._update_flow_details(fd, txn, create_missing=True) e_lb.add(e_fd) return e_lb with self._exc_wrapper(): txn = self._client.transaction() # Determine whether the desired data exists or not. lb_path = paths.join(self.book_path, lb.uuid) try: lb_data, _zstat = self._client.get(lb_path) except k_exc.NoNodeError: # Create a new logbook since it doesn't exist. e_lb = _create_logbook(lb_path, txn) else: # Otherwise update the existing logbook instead. e_lb = _update_logbook(lb_path, lb_data, txn) k_utils.checked_commit(txn) return e_lb
def _create_logbook(lb_path, txn): lb_data = p_utils.format_logbook(lb, created_at=None) txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(paths.join(lb_path, fd.uuid)) fd_path = paths.join(self.flow_path, fd.uuid) fd_data = jsonutils.dumps(p_utils.format_flow_detail(fd)) txn.create(fd_path, misc.binary_encode(fd_data)) for td in fd: # NOTE(harlowja): create an entry in the flow detail path # for the provided task detail so that a reference exists # from the flow detail to its task details. txn.create(paths.join(fd_path, td.uuid)) td_path = paths.join(self.task_path, td.uuid) td_data = jsonutils.dumps(p_utils.format_task_detail(td)) txn.create(td_path, misc.binary_encode(td_data)) return lb
def _get_flow_details(self, fd_uuid): fd_path = paths.join(self.flow_path, fd_uuid) try: fd_data, _zstat = self._client.get(fd_path) except k_exc.NoNodeError: raise exc.NotFound("No flow details found with id: %s" % fd_uuid) fd = p_utils.unformat_flow_detail(fd_uuid, misc.decode_json(fd_data)) for td_uuid in self._client.get_children(fd_path): fd.add(self._get_task_details(td_uuid)) return fd
def _get_flow_details(self, fd_uuid): fd_path = paths.join(self.flow_path, fd_uuid) try: fd_data, _zstat = self._client.get(fd_path) except k_exc.NoNodeError: raise exc.NotFound("No flow details found with id: %s" % fd_uuid) fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data)) for ad_uuid in self._client.get_children(fd_path): fd.add(self._get_atom_details(ad_uuid)) return fd
def save_logbook(self, lb): """Save (update) a log_book transactionally.""" def _create_logbook(lb_path, txn): lb_data = p_utils.format_logbook(lb, created_at=None) txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(paths.join(lb_path, fd.uuid)) fd_path = paths.join(self.flow_path, fd.uuid) fd_data = jsonutils.dumps(p_utils.format_flow_detail(fd)) txn.create(fd_path, misc.binary_encode(fd_data)) for td in fd: # NOTE(harlowja): create an entry in the flow detail path # for the provided task detail so that a reference exists # from the flow detail to its task details. txn.create(paths.join(fd_path, td.uuid)) td_path = paths.join(self.task_path, td.uuid) td_data = jsonutils.dumps(p_utils.format_task_detail(td)) txn.create(td_path, misc.binary_encode(td_data)) return lb def _update_logbook(lb_path, lb_data, txn): e_lb = p_utils.unformat_logbook(lb.uuid, misc.decode_json(lb_data)) e_lb = p_utils.logbook_merge(e_lb, lb) lb_data = p_utils.format_logbook(e_lb, created_at=lb.created_at) txn.set_data(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: fd_path = paths.join(lb_path, fd.uuid) if not self._client.exists(fd_path): # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(fd_path) e_fd = self._update_flow_details(fd, txn, create_missing=True) e_lb.add(e_fd) return e_lb with self._exc_wrapper(): with self._client.transaction() as txn: # Determine whether the desired data exists or not. lb_path = paths.join(self.book_path, lb.uuid) try: lb_data, _zstat = self._client.get(lb_path) except k_exc.NoNodeError: # Create a new logbook since it doesn't exist. e_lb = _create_logbook(lb_path, txn) else: # Otherwise update the existing logbook instead. e_lb = _update_logbook(lb_path, lb_data, txn) # Finally return (updated) logbook. return e_lb
def _paths_join(arg, *more_args): """Converts paths into a string (unicode).""" args = [arg] args.extend(more_args) cleaned_args = [] for arg in args: if isinstance(arg, six.binary_type): cleaned_args.append(arg.decode('ascii')) else: cleaned_args.append(arg) return paths.join(*cleaned_args)
def _get_logbook(self, lb_uuid): lb_path = paths.join(self.book_path, lb_uuid) try: lb_data, _zstat = self._client.get(lb_path) except k_exc.NoNodeError: raise exc.NotFound("No logbook found with id: %s" % lb_uuid) else: lb = p_utils.unformat_logbook(lb_uuid, misc.decode_json(lb_data)) for fd_uuid in self._client.get_children(lb_path): lb.add(self._get_flow_details(fd_uuid)) return lb
def init(client, fatal=False): for path in ( INPUT_PATH, READY_PATH, RUNNING_PATH, CONTROL_JOBS_PATH, HEAD_PATH, JOBS_COUNTER_PATH, join(STATE_PATH, STATE_SPLITTER), join(STATE_PATH, STATE_WORKER), join(STATE_PATH, STATE_COLLECTOR), USER_PATH, ): try: client.create(path, makepath=True) _logger.info("Created zoo path: %s", path) except NodeExistsError: level = ( logging.ERROR if fatal else logging.DEBUG ) _logger.log(level, "Zoo path is already exists: %s", path) if fatal: raise
def _create_logbook(lb_path, txn): lb_data = lb.to_dict(marshal_time=True) txn.create(lb_path, misc.binary_encode(jsonutils.dumps(lb_data))) for fd in lb: # NOTE(harlowja): create an entry in the logbook path # for the provided flow detail so that a reference exists # from the logbook to its flow details. txn.create(paths.join(lb_path, fd.uuid)) fd_path = paths.join(self.flow_path, fd.uuid) fd_data = jsonutils.dumps(fd.to_dict()) txn.create(fd_path, misc.binary_encode(fd_data)) for ad in fd: # NOTE(harlowja): create an entry in the flow detail path # for the provided atom detail so that a reference exists # from the flow detail to its atom details. txn.create(paths.join(fd_path, ad.uuid)) ad_path = paths.join(self.atom_path, ad.uuid) ad_data = base._format_atom(ad) txn.create(ad_path, misc.binary_encode(jsonutils.dumps(ad_data))) return lb
def init(client, fatal=False): for path in ( INPUT_PATH, READY_PATH, RUNNING_PATH, CONTROL_JOBS_PATH, HEAD_PATH, JOBS_COUNTER_PATH, join(STATE_PATH, STATE_SPLITTER), join(STATE_PATH, STATE_WORKER), join(STATE_PATH, STATE_COLLECTOR), USER_PATH, ): try: client.create(path, makepath=True) _logger.info("Created zoo path: %s", path) except NodeExistsError: level = (logging.ERROR if fatal else logging.DEBUG) _logger.log(level, "Zoo path is already exists: %s", path) if fatal: raise
def _get_logbook(self, lb_uuid): lb_path = paths.join(self.book_path, lb_uuid) try: lb_data, _zstat = self._client.get(lb_path) except k_exc.NoNodeError: raise exc.NotFound("No logbook found with id: %s" % lb_uuid) else: lb = logbook.LogBook.from_dict(misc.decode_json(lb_data), unmarshal_time=True) for fd_uuid in self._client.get_children(lb_path): lb.add(self._get_flow_details(fd_uuid)) return lb
def _paths_join(arg, *more_args): """Converts paths into a string (unicode).""" args = [arg] args.extend(more_args) cleaned_args = [] for arg in args: if isinstance(arg, bytes): cleaned_args.append( encodeutils.safe_decode(arg, incoming='ascii') ) else: cleaned_args.append(arg) return paths.join(*cleaned_args)
def _on_job_posting(self, children, delayed=True): LOG.debug("Got children %s under path %s", children, self.path) child_paths = [] for c in children: if (c.endswith(self.LOCK_POSTFIX) or not c.startswith(self.JOB_PREFIX)): # Skip lock paths or non-job-paths (these are not valid jobs) continue child_paths.append(k_paths.join(self.path, c)) # Figure out what we really should be investigating and what we # shouldn't (remove jobs that exist in our local version, but don't # exist in the children anymore) and accumulate all paths that we # need to trigger population of (without holding the job lock). investigate_paths = [] pending_removals = [] with self._job_cond: for path in six.iterkeys(self._known_jobs): if path not in child_paths: pending_removals.append(path) for path in child_paths: if path in self._bad_paths: continue # This pre-check will *not* guarantee that we will not already # have the job (if it's being populated elsewhere) but it will # reduce the amount of duplicated requests in general; later when # the job information has been populated we will ensure that we # are not adding duplicates into the currently known jobs... if path in self._known_jobs: continue if path not in investigate_paths: investigate_paths.append(path) if pending_removals: with self._job_cond: am_removed = 0 try: for path in pending_removals: am_removed += int(self._remove_job(path)) finally: if am_removed: self._job_cond.notify_all() for path in investigate_paths: # Fire off the request to populate this job. # # This method is *usually* called from a asynchronous handler so # it's better to exit from this quickly to allow other asynchronous # handlers to be executed. request = self._client.get_async(path) if delayed: request.rawlink(functools.partial(self._process_child, path)) else: self._process_child(path, request, quiet=False)
def is_still_owner(self): if not self.acquired: return False try: data, _znode = self._client.get( paths.join(self._lock.path, self._lock.node)) return data == self._lock.data except (self._client.handler.timeout_exception, exceptions.ConnectionLoss, exceptions.ConnectionDropped, exceptions.NoNodeError): return False except exceptions.KazooException as e: utils.raise_with_cause(tooz.ToozError, "operation error: %s" % (e), cause=e)
def get(self): # FIXME: need children watcher if self._last is not None: self._children.pop(0) self._last = None while True: if len(self._children) == 0: self._children = self._client.retry(self._client.get_children, self._path) self._children = list(sorted(self._children)) if len(self._children) == 0: return None name = self._children[0] path = join(self._path, name) try: self._client.create(join(path, LOCK), ephemeral=True) except (NoNodeError, NodeExistsError): self._children.pop(0) # FIXME: need a watcher continue self._last = name return self._client.get(path)[0]