class ZKState(): def __init__(self, path, timeout=30): super(ZKState, self).__init__() self._zk = KazooClient(hosts=ZK_HOSTS, timeout=timeout) self._zk.start(timeout=timeout) self._path = path self._zk.ensure_path(path) def processed(self): return self._zk.exists(self._path + "/complete") def process_start(self): if self.processed(): return False if self._zk.exists(self._path + "/processing"): return False try: self._zk.create(self._path + "/processing", ephemeral=True) return True except NodeExistsError: # another process wins return False def process_end(self): self._zk.create(self._path + "/complete") self._zk.delete(self._path + "/processing") def process_abort(self): try: self._zk.delete(self._path + "/processing") except NoNodeError: pass def close(self): self._zk.stop() self._zk.close()
class Zookeeper(KeyManager): def __init__(self, hosts): self._hosts = hosts self.zk = KazooClient(hosts=hosts) self.zk.start() def get(self, key): result = self.zk.get(key)[0] if result == "": result = [] children = self.zk.get_children(key) for i in children: result.append({'name': i, 'value': self.zk.get(os.path.join(key, i))[0]}) return result else: return self.zk.get(key)[0] def set(self, key, data): try: self.zk.set(key, data.encode('utf-8')) except: self.zk.create(key, data.encode('utf-8')) def mkdir(self, key): self.set(key, "") def close(self): self.zk.stop() self.zk.close() @property def hosts(self): return self._hosts
def zookeeper_coordinator(hosts: Set[str] = {"localhost:2181"}, timeout=300.0) -> Any: client = KazooClient(hosts=",".join(hosts), timeout=timeout) client.start(timeout=timeout) yield _ZookeeperCoordinator(client) client.stop() client.close()
def achieve_consensus(self): """Trigger consensus logic and handle errors.""" log.info('Set up ZK client using host(s): %s', self._hosts) zk = KazooClient(hosts=self._hosts) # Initialize ZK connection state variable, which is shared across # threads. It is updated from a change listener function which is # invoked from within a Kazoo connection management thread, see # http://kazoo.readthedocs.org/en/latest/api/handlers/threading.html. self._connected = False zk.add_listener(self._zk_state_change_listener) zk.start() # Wait for handling thread to update connection status. (As of non- # determinism around GIL context switches there is otherwise no # guarantee that the status is updated within # `_run_consensus_procedure`). while not self._connected: time.sleep(0.01) self._zk = zk try: # This may raise ConnectionLost or various # kazoo.exceptions.* types. return self._run_consensus_procedure() finally: log.info('Shut down ZK client.') try: zk.stop() finally: zk.close()
def readAMHostPort(self): amHost = "" amSecuredPort = "" zk = None try: zk = KazooClient(hosts=self.zk_quorum, read_only=True) zk.start() data, stat = zk.get(self.zk_reg_path) logger.debug("Registry Data: %s" % (data.decode("utf-8"))) sliderRegistry = json.loads(data) amUrl = sliderRegistry["payload"]["internalView"]["endpoints"]["org.apache.slider.agents"]["address"] amHost = amUrl.split("/")[2].split(":")[0] amSecuredPort = amUrl.split(":")[2].split("/")[0] # the port needs to be utf-8 encoded amSecuredPort = amSecuredPort.encode('utf8', 'ignore') except Exception: # log and let empty strings be returned logger.error("Could not connect to zk registry at %s in quorum %s" % (self.zk_reg_path, self.zk_quorum)) pass finally: if not zk == None: zk.stop() zk.close() logger.info("AM Host = %s, AM Secured Port = %s" % (amHost, amSecuredPort)) return amHost, amSecuredPort
def bounce_lock_zookeeper( name: str, system_paasta_config: Optional[SystemPaastaConfig] = None ) -> Iterator: """Acquire a bounce lock in zookeeper for the name given. The name should generally be the service namespace being bounced. This is a contextmanager. Please use it via 'with bounce_lock(name):'. :param name: The lock name to acquire""" if system_paasta_config is None: system_paasta_config = load_system_paasta_config() zk = KazooClient( hosts=system_paasta_config.get_zk_hosts(), timeout=ZK_LOCK_CONNECT_TIMEOUT_S, ) zk.start() lock = zk.Lock(f"{ZK_LOCK_PATH}/{name}") try: lock.acquire(timeout=1) # timeout=0 throws some other strange exception yield except LockTimeout: raise LockHeldException("Service %s is already being bounced!" % name) else: lock.release() finally: zk.stop() zk.close()
class ZKBase(object): """ scheduler需要定义以下函数: - callback(KazooState.*): 处理CONN: LOST,SUSPENDED - """ def __init__(self,conf,scheduler): self.conf = conf self.state = "STOPPED" self.zk = KazooClient(self.conf.processor.zk_url, timeout=self.conf.processor.timeout) try: self.zk.start() except Exception: LOG.error("Start zk error: %s" % traceback.format_exc(limit=2)) self.zk.add_listener(self.listen) self.scheduler = scheduler self.state = "RUNNING" # STOPPED self.state_changes = False def listen(self, state): if state == KazooState.LOST or state == KazooState.SUSPENDED: LOG.error("Session state change: %s" % state) if self.state == "STOPPED": return self.state_changes = True self.scheduler.callback(state) elif state == KazooState.CONNECTED: LOG.info("Connectied to ZK.") else: LOG.error("Session timeout. Cannot conect to ZK.") def reinit(self,state): while self.zk.state != KazooState.CONNECTED: LOG.info("Restart zk connection until connected.") try: self.zk.restart() except Exception: LOG.error("Reinit: %s" % traceback.format_exc(limit=1)) self.state_changes = False def create(self,path, value='', acl=None, ephemeral=False, sequence=False, makepath=False): LOG.info("Create: path %s" % path) self.zk.create(path=path, value=value.encode(), ephemeral=ephemeral, sequence=sequence, makepath=makepath) def get(self,path,watch=None): self.zk.get(path,watch) def get_children(self,path,watch=None): return self.zk.get_children(path,watch=watch) def terminate(self): LOG.info("Terminate ZK connection.") self.state = "STOPPED" self.zk.stop() self.zk.close()
class ZookeeperClient(object): def __init__(self, server_list): self._retry = KazooRetry(max_tries=None, max_delay=300, sleep_func=gevent.sleep) self._zk_client = KazooClient( hosts=','.join(server_list), timeout=400, handler=SequentialGeventHandler(), logger=logger, connection_retry=self._retry, command_retry=self._retry) def connect(self): self._zk_client.start() def disconnect(self): self._zk_client.stop() self._zk_client.close() def create_node(self, path, value=None): if value is None: value = uuid.uuid4() try: self._zk_client.create(path, str(value), makepath=True) except NodeExistsError: self._zk_client.set(path, str(value))
class ZookeeperClient(object): def __init__(self, server_list): self._retry = KazooRetry(max_tries=None, max_delay=300, sleep_func=gevent.sleep) self._zk_client = KazooClient(hosts=','.join(server_list), timeout=400, handler=SequentialGeventHandler(), logger=logger, connection_retry=self._retry, command_retry=self._retry) def connect(self): self._zk_client.start() def disconnect(self): self._zk_client.stop() self._zk_client.close() def create_node(self, path, value=None): if value is None: value = uuid.uuid4() try: self._zk_client.create(path, str(value), makepath=True) except NodeExistsError: self._zk_client.set(path, str(value))
def create_from_zookeeper(cls, zkconnect): log.info("Connecting to zookeeper {0}".format(zkconnect)) try: zk = KazooClient(zkconnect) zk.start() except Exception as e: raise ZookeeperException("Cannot connect to Zookeeper: {0}".format(e)) # Get broker list cluster = cls() add_brokers_from_zk(cluster, zk) # Get current partition state log.info("Getting partition list from Zookeeper") for topic in zk.get_children("/brokers/topics"): zdata, zstat = zk.get("/brokers/topics/{0}".format(topic)) add_topic_with_replicas(cluster, topic, json.loads(zdata)) if cluster.num_topics() == 0: raise ZookeeperException("The cluster specified does not have any topics") log.info("Closing connection to zookeeper") zk.stop() zk.close() return cluster
def _get_hiveserver2_info_with_zookeeper(self, host, port, zookeeper_name_space): """Get hiveserver2 URL information from zookeeper.""" from kazoo.client import KazooClient from kazoo.retry import KazooRetry hosts = host.split(',') zk_hosts = ','.join( list(map(lambda x: ':'.join([x, str(port)]), hosts))) conn_retry_policy = KazooRetry(max_tries=-1, delay=0.1, max_delay=0.1) cmd_retry_policy = KazooRetry(max_tries=3, delay=0.3, backoff=1, max_delay=1, ignore_expire=False) zk = KazooClient(hosts=zk_hosts, connection_retry=conn_retry_policy, command_retry=cmd_retry_policy) zk.start() children = zk.get_children('/' + zookeeper_name_space) nodes = self.get_hiveserver2_info(children) zk.stop() zk.close() if len(nodes) == 0: from kazoo.exceptions import ZookeeperError raise ZookeeperError( "Can not find child in zookeeper path({}).".format( zookeeper_name_space)) return nodes
def processTransfer(): try: conn = psycopg2.connect(dbConnectStr) cur = conn.cursor() zk = KazooClient(hosts=zkHost) zk.start() transferq = LockingQueue(zk, '/transfer/') while True: rawCode = transferq.get() proposal = rawCode.decode().strip() transferq.consume() # print(" proposal = {0} ".format(proposal)) ints = datetime.now() inload = os.getloadavg()[0] pro1 = Popen(['/usr/bin/python36', './processproptran.py', proposal], stdin=None, stdout=None) pro1.wait() outts = datetime.now() outload = os.getloadavg()[0] # insert the runtime info into c* cluster = Cluster(cfg.cassCluster) session = cluster.connect(cfg.cassKeyspace) stmt = SimpleStatement("""insert into runstat(id,executable,ints,inload,outts,outload) values (%s, %s, %s, %s, %s, %s)""", consistency_level=ConsistencyLevel.ANY) session.execute(stmt, (uuid.uuid4(), executable, ints, inload, outts, outload)) except psycopg2.Error as err: print("SQLError {0}".format(err)) finally: zk.stop() zk.close() cur.close() conn.close()
def save2ownershipcatalog(pq, verdict, proposal, rawtext, symbol, noteId, quantity, target): zk = KazooClient(hosts=zkHost) zk.start() zkc = zk.Counter("/ownershipId", default=0x700) zkc += 1 ownershipId = zkc.value print("ownershipId={0}".format(ownershipId)) zkc = zk.Counter("/noteId", default=0x700) zkc += 1 rowId = zkc.value print("rowId={0}".format(rowId)) zk.stop() zk.close() sha256 = hashlib.sha256() sha256.update("{0}{1}".format(noteId.strip(),target.strip()).encode('utf-8')) hashcode = sha256.hexdigest() try: conn = psycopg2.connect(dbConnectStr) cur = conn.cursor() cur.execute("""insert into ownership0(id, symbol,"noteId", quantity,owner,updated,"hashCode")values(%s,%s,%s,%s,%s,now(),%s) """,[int(ownershipId),symbol.strip(),noteId.strip(), quantity.strip(), target.strip(), hashcode.strip()]) conn.commit() #save the entry to note_catalog table sha256 = hashlib.sha256() sha256.update("{0}{1}".format(noteId.strip(),target.strip()).encode('utf-8')) #hashcode = sha256.hexdigest() cur.execute("""insert into note_catalog(id, pq , verdict, proposal, note, recipient, hook, stmt, setup, "hashCode")values(%s,%s,%s,%s,%s,%s,%s,%s,now(),%s) """,[int(rowId), pq.strip(), verdict.strip(), proposal.strip(), "{0}||{1}||{2}".format(symbol.strip(),noteId.strip(),quantity.strip()) ,target.strip(),'',rawtext.strip(), hashcode.strip()]) conn.commit() except psycopg2.Error as err: print("SQLError {0}".format(err)) finally: cur.close() conn.close()
class PinotZk(object): def __init__(self, config, logger, fabric): self.config = config self.fabric = fabric self.logger = logger self.zk = None def get_handle(self): host = self.config.get_zk_host(self.fabric) if not self.zk: try: self.zk = KazooClient(hosts=host) self.zk.start() except kazoo.exceptions.KazooException: error = 'Failed connecting to zk {0}'.format(host) self.logger.exception(error) raise PinotException(error) return self.zk def close(self): if self.zk: self.zk.stop() self.zk.close()
def create_from_zookeeper(cls, zkconnect, default_retention=1): log.info("Connecting to zookeeper {0}".format(zkconnect)) try: zk = KazooClient(zkconnect) zk.start() except Exception as e: raise ZookeeperException( "Cannot connect to Zookeeper: {0}".format(e)) # Get broker list cluster = cls(retention=default_retention) add_brokers_from_zk(cluster, zk) # Get current partition state log.info("Getting partition list from Zookeeper") for topic in zk.get_children("/brokers/topics"): zdata, zstat = zk.get("/brokers/topics/{0}".format(topic)) add_topic_with_replicas(cluster, topic, json.loads(zdata)) set_topic_retention(cluster.topics[topic], zk) if cluster.num_topics() == 0: raise ZookeeperException( "The cluster specified does not have any topics") log.info("Closing connection to zookeeper") zk.stop() zk.close() return cluster
def get_values_kafak(self, groupName, topicName): kafka_values = dict() broker = SimpleClient(kafka_conn) zk = KazooClient(hosts=zookeepers_conn, read_only=True) zk.start() logsize = 0 if topicName: logsize = 0 partitions = broker.get_partition_ids_for_topic(topicName) responses = broker.send_offset_fetch_request(groupName, [OffsetFetchRequestPayload(topicName, p) for p in partitions], fail_on_error=True) latest_offset = 0 for res in responses: if topicName != "test": latest_offset += res[2] for partition in partitions: log = "/consumers/%s/offsets/%s/%s" % (groupName, topicName, partition) if zk.exists(log): data, stat = zk.get(log) logsize += int(data) lag = latest_offset - logsize broker.close() zk.stop() zk.close() kafka_values['offset'] = latest_offset kafka_values['logsize'] = logsize kafka_values['lag'] = lag return kafka_values
def test_end_of_watches_session(started_cluster): fake_zk1 = None fake_zk2 = None try: fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk1.start() fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk2.start() fake_zk1.create("/test_end_of_watches_session") dummy_set = 0 def dummy_callback(event): nonlocal dummy_set dummy_set += 1 print(event) for child_node in range(100): fake_zk1.create("/test_end_of_watches_session/" + str(child_node)) fake_zk1.get_children("/test_end_of_watches_session/" + str(child_node), watch=dummy_callback) fake_zk2.get_children("/test_end_of_watches_session/" + str(0), watch=dummy_callback) fake_zk2.get_children("/test_end_of_watches_session/" + str(1), watch=dummy_callback) fake_zk1.stop() fake_zk1.close() for child_node in range(100): fake_zk2.create("/test_end_of_watches_session/" + str(child_node) + "/" + str(child_node), b"somebytes") assert dummy_set == 2 finally: for zk in [fake_zk1, fake_zk2]: stop_zk(zk)
class ZKData(object): def __init__(self): super(ZKData,self).__init__() options={"max_tries":-1,"max_delay":5,"ignore_expire":True} self._zk=KazooClient(hosts=ZK_HOSTS,connection_retry=options) try: self._zk.start(timeout=3600) except: print(traceback.format_exc(), flush=True) def set(self, path, value): value=json.dumps(value).encode('utf-8') try: self._zk.create(path, value, makepath=True) except NodeExistsError: self._zk.set(path,value) def get(self, path): try: value, stat= self._zk.get(path) if not value: return {} return json.loads(value.decode('utf-8')) except Exception as e: return {} def close(self): self._zk.stop() self._zk.close()
class ZKData(object): def __init__(self): super(ZKData, self).__init__() self._zk = KazooClient(hosts=ZK_HOSTS) self._zk.start() def set(self, path, value): value = json.dumps(value).encode('utf-8') if self._zk.exists(path): try: self._zk.set(path, value) return except NoNodeError: pass try: self._zk.create(path, value, makepath=True) except NodeExistsError: pass def get(self, path): try: value, stat = self._zk.get(path) if not value: return {} return json.loads(value.decode('utf-8')) except Exception as e: return {} def close(self): self._zk.stop() self._zk.close()
def zk_client(three_master_cluster: Cluster) -> KazooClient: """ ZooKeeper client connected to a given DC/OS cluster. """ zk_hostports = ','.join([ '{}:2181'.format(m.public_ip_address) for m in three_master_cluster.masters ]) retry_policy = KazooRetry( max_tries=-1, delay=1, backoff=1, max_delay=600, ignore_expire=True, ) zk_client = KazooClient( hosts=zk_hostports, # Avoid failure due to client session timeout. timeout=40, # Work around https://github.com/python-zk/kazoo/issues/374 connection_retry=retry_policy, command_retry=retry_policy, ) zk_client.start() try: yield zk_client finally: zk_client.stop() zk_client.close()
def zk_server(tmpdir): zk_container_name = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) # TODO(cmaloney): Add a python context manager for dockerized daemons subprocess.check_call([ 'docker', 'run', '-d', '-p', '2181:2181', '-p', '2888:2888', '-p', '3888:3888', '--name', zk_container_name, 'jplock/zookeeper' ]) conn_retry_policy = KazooRetry(max_tries=-1, delay=0.1, max_delay=0.1) cmd_retry_policy = KazooRetry(max_tries=3, delay=0.3, backoff=1, max_delay=1, ignore_expire=False) zk = KazooClient(hosts=zk_hosts, connection_retry=conn_retry_policy, command_retry=cmd_retry_policy) zk.start() children = zk.get_children('/') for child in children: if child == 'zookeeper': continue zk.delete('/' + child, recursive=True) yield zk zk.stop() zk.close() subprocess.check_call(['docker', 'rm', '-f', zk_container_name])
class ZkOpers(object): zk = None rootPath = "/letv/javaContainer/jetty" confOpers = ConfigFileOpers() ''' classdocs ''' def __init__(self): ''' Constructor ''' self.zkaddress, self.zkport = get_zk_address() self.retry = KazooRetry(max_tries=3, delay=0.5) self.zk = KazooClient(hosts=self.zkaddress+':'+str(self.zkport), connection_retry=self.retry) self.zk.start() #self.zk = self.ensureinstance() logging.info("instance zk client (%s:%s)" % (self.zkaddress, self.zkport)) def close(self): try: self.zk.stop() self.zk.close() logging.info("stop the zk client successfully") except Exception, e: logging.error(e)
class ZProducer(object): """ A base Zookeeper producer to be used by other producer classes Args hosts: Comma-separated list of hosts to connect to (e.g. 127.0.0.1:2181,127.0.0.1:2182) topic - The kafka topic to send messages to chroot - The kafka subdirectory to search for brokers """ producer_kls = None def __init__(self, hosts, topic, chroot='/', **kwargs): if self.producer_kls is None: raise NotImplemented("Producer class needs to be mentioned") self.zkclient = KazooClient(hosts=hosts) self.zkclient.start() # Start the producer instance self.client = get_client(self.zkclient, chroot=chroot) self.producer = self.producer_kls(self.client, topic, **kwargs) # Stop Zookeeper self.zkclient.stop() self.zkclient.close() self.zkclient = None def stop(self): self.producer.stop() self.client.close()
class ZkOpers(object): zk = None rootPath = "/mad3310/docker" ''' classdocs ''' def __init__(self): ''' Constructor ''' self.zkaddress, self.zkport = get_zk_address() if "" != self.zkaddress and "" != self.zkport: self.DEFAULT_RETRY_POLICY = KazooRetry( max_tries=None, max_delay=10000, ) self.zk = KazooClient(hosts=self.zkaddress + ':' + str(self.zkport), connection_retry=self.DEFAULT_RETRY_POLICY, timeout=20) self.zk.add_listener(self.listener) self.zk.start() logging.info("instance zk client (%s:%s)" % (self.zkaddress, self.zkport)) def close(self): try: self.zk.stop() self.zk.close() except Exception, e: logging.error(e)
class ValidatorDetector: def __init__(self): self.zk = KazooClient( hosts="bigdata1:2181,bigdata2:2181,bigdata3:2181") self.validator_children_watcher = ChildrenWatch(client=self.zk, path="/servers", func=self.watcher_func) self.zk.start() def watcher_func(self, children): print("----------------------start-----------------------------") for c in children: data, stat = self.zk.get('/servers/' + c) if stat: print("Children is:" + c + "\n" + "Data is:", str(data, encoding='utf-8')) print("-----------------------end----------------------------") print("\n\n") def create_node(self, hostname): self.zk.create('/servers/server', bytes(hostname, encoding='utf-8'), ephemeral=True, sequence=True, makepath=True) def close_client(self, hostname): self.zk.close()
class ZooKeeper(object): def __init__(self, hosts="127.0.0.1:2181", prefix="/"): prefix = prefix.rstrip("/") if prefix and not prefix.startswith("/"): raise ValueError("prefix must start with /") self._prefix = prefix self._zk = KazooClient(hosts=hosts, handler=_get_zk_handler()) self._zk.start() def __del__(self): self.close() def _path(self, path): if self._prefix: return "{0}/{1}".format(self._prefix, path) return path def close(self): if not self._zk: return try: self._zk.stop() self._zk.close() except Exception: pass finally: self._zk = None def create(self, path, value="", makepath=True): self._zk.create(self._path(path), value, makepath=makepath) def delete(self, path, recursive=True): self._zk.delete(self._path(path), recursive=recursive) def set(self, path, value, makepath=True): path = self._path(path) try: self._zk.set(path, value) except zkexc.NoNodeError: if not makepath: raise self._zk.create(path, value, makepath=True) def get(self, path, none=True): try: return self._zk.get(self._path(path)) except zkexc.NoNodeError: if none: return None raise def ls(self, path, none=True): try: return self._zk.get_children(self._path(path)) except zkexc.NoNodeError: if none: return None raise
def user(proj=None): session = request.environ.get('beaker.session') if not session or not session.has_key('trinity_user'): redirect('/', code=302) result = {} result['user'] = session['trinity_user'] user = request.query.opereduser user = user if user else result['user'] result['opereduser'] = user result['my_roles'] = user_map[result['user']][1] result['roles'] = ',' + user_map[user][1].lstrip('(').rstrip(')') + ',' result['projs'] = ',' + user_map[user][2].lstrip('(').rstrip(')') + ',' zk = KazooClient(hosts['dev']) projects = [] userName = request.forms.get('userName') password = request.forms.get('password') oldpassword = request.forms.get('oldpassword') newpassword = request.forms.get('newpassword') opereduser = request.forms.get('opereduser') rolelist = request.forms.get('rolelist') projectlist = request.forms.get('projectlist') try: zk.start() projects = getProjects(zk) result['projects'] = projects if proj == 'modifyPassword' and opereduser: if result['user'] == 'admin' and opereduser != 'admin': modifyPassword(opereduser, newpassword) user_map[opereduser][0] = sha1(newpassword).hexdigest() redirect('/user', code=302) else: if user_map[user][0] == sha1(oldpassword).hexdigest(): modifyPassword(user, newpassword) user_map[user][0] = sha1(newpassword).hexdigest() redirect('/user', code=302) else: result['warn'] = '旧密码不正确!' if proj == 'addUser': if result['user'] == 'admin' and userName: addUser(userName, password) user_info = [sha1(password).hexdigest(), '()', '()'] user_map[userName] = user_info redirect('/user', code=302) else: result['warn'] = '没有权限添加用户!' if proj == 'modifyAuthority': if result['user'] == 'admin' and opereduser: modifyAuthority(opereduser, rolelist, projectlist) user_map[opereduser][1] = '(' + rolelist + ')' user_map[opereduser][2] = '(' + projectlist + ')' redirect('/user?opereduser='******'warn'] = '没有权限修改项目!' finally: zk.stop() zk.close() if result['user'] == 'admin': result['userlist'] = user_map.keys() return result
class ZkHelper(object): def __init__(self, address='', port=''): assert address and port self.zk_address = address self.zk_port = port self.retry = KazooRetry(max_delay=10000, max_tries=None) self.zk = KazooClient(hosts='%s:%s' % (self.zk_address, self.zk_port), connection_retry=self.retry, timeout=20) self.zk.add_listener(self._listener) self.zk.start() logging.info("instance zk client start (%s:%s)" % (self.zk_address, self.zk_port)) @staticmethod def _listener(state): if state == KazooState.LOST: logging.info( "zk connect lost, stop this connection and then start new one!" ) elif state == KazooState.SUSPENDED: logging.info( "zk connect suspended, stop this connection and then start new one!" ) def write(self, path, data): self.zk.ensure_path(path) self.retry(self.zk.set, path, data) logging.info("write data:%s to path:%s" % (data, path)) def ensure_path(self, path): self.zk.ensure_path(path) def get_lock(self, path): return self.zk.Lock(path, threading.currentThread()) def read(self, path): if self.zk.exists(path): data = self.retry(self.zk.get, path) logging.info("read data:%s from path:%s" % (data, path)) return data[0] logging.info("path:%s not exist" % path) def get_children_list(self, path): if self.zk.exists(path): data = self.retry(self.zk.get_children, path) logging.info("get children:%s from path:%s" % (data, path)) return data logging.info("path:%s not exist" % path) def exists(self, path): return self.zk.exists(path) def get_lock(self, path): lock = self.retry(self.zk.Lock, path, threading.current_thread()) return lock def close(self): self.zk.close()
class Callback: def __init__(self, zk_hosts, zk_root): self.zk = KazooClient(zk_hosts) self.root = zk_root self.event = threading.Event() self.tasks = {} def get_task(self, task_id): node = '/{}/tasks/{}'.format(self.root, task_id) data, _ = self.zk.get(node) task = json.loads(data.decode()) targets = {} for target in self.zk.get_children('{}/targets'): path = '{}/targets/{}'.format(node, target) status, _ = self.zk.get(path) targets[target] = status.decode() task['targets'] = targets return task def delete(self, task_id): callback_node = '/{}/callback/{}'.format(self.root, task_id) task_node = '/{}/tasks/{}'.format(self.root, task_id) tx = self.zk.transaction() tx.delete(callback_node) tx.delete(task_node) tx.commit() def run(self, task_id): task = self.get_task(task_id) try: requests.post(task['callback'], json=task) self.delete(task_id) except Exception as e: logging.error(e) def watch_tasks(self, tasks): for task_id in set(tasks).difference(self.tasks): self.run(task_id) self.tasks = tasks return not self.event.is_set() def watch(self): ChildrenWatch(self.zk, '/{}/callback'.format(self.root), self.watch_tasks) def compensate(self): while not self.event.is_set(): for task in self.zk.get_children('/{}/callback'.format(self.root)): self.run(task) self.event.wait(10) def start(self): self.zk.start() self.watch() self.compensate() def shutdown(self): self.event.set() self.zk.close()
def test_end_of_session(started_cluster): fake_zk1 = None fake_zk2 = None genuine_zk1 = None genuine_zk2 = None try: fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk1.start() fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk2.start() genuine_zk1 = cluster.get_kazoo_client('zoo1') genuine_zk1.start() genuine_zk2 = cluster.get_kazoo_client('zoo1') genuine_zk2.start() fake_zk1.create("/test_end_of_session") genuine_zk1.create("/test_end_of_session") fake_ephemeral_event = None def fake_ephemeral_callback(event): print("Fake watch triggered") nonlocal fake_ephemeral_event fake_ephemeral_event = event genuine_ephemeral_event = None def genuine_ephemeral_callback(event): print("Genuine watch triggered") nonlocal genuine_ephemeral_event genuine_ephemeral_event = event assert fake_zk2.exists("/test_end_of_session") is not None assert genuine_zk2.exists("/test_end_of_session") is not None fake_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True) genuine_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True) assert fake_zk2.exists("/test_end_of_session/ephemeral_node", watch=fake_ephemeral_callback) is not None assert genuine_zk2.exists("/test_end_of_session/ephemeral_node", watch=genuine_ephemeral_callback) is not None print("Stopping genuine zk") genuine_zk1.stop() print("Closing genuine zk") genuine_zk1.close() print("Stopping fake zk") fake_zk1.stop() print("Closing fake zk") fake_zk1.close() assert fake_zk2.exists("/test_end_of_session/ephemeral_node") is None assert genuine_zk2.exists("/test_end_of_session/ephemeral_node") is None assert fake_ephemeral_event == genuine_ephemeral_event finally: for zk in [fake_zk1, fake_zk2, genuine_zk1, genuine_zk2]: stop_zk(zk)
def zk_client(hosts: str = ','.join(ConfigParams().zookeepernodes)): zk = KazooClient(hosts) zk.start() try: yield zk finally: zk.stop() zk.close()
def get_num_znodes(host): temp = {} zk = KazooClient(hosts=host, read_only=True) zk.start() temp["gmas"] = len(zk.get_children('/home/gmas')) temp["amas"] = len(zk.get_children('/home/amas')) zk.stop() zk.close() return temp
def _zk_client(self): zk = KazooClient(hosts=self._hosts) zk.start() try: yield zk finally: zk.stop() zk.close()
class ZooKeeperTestMixin(object): zk_hosts = None _zk_hosts_internal = None zk_base_path = None proxy = None def setup_zookeeper(self, base_path_prefix="/int_tests", use_proxy=False): zk_hosts = os.environ.get("ZK_HOSTS") if not zk_hosts: raise unittest.SkipTest("export ZK_HOSTS env to run ZooKeeper integration tests") if use_proxy: hosts_list = zk_hosts.split(",") if len(hosts_list) == 1: self.proxy = SocatProxy(zk_hosts) self.proxy.start() self.zk_hosts = self.proxy.address else: proxies = [SocatProxy(host) for host in hosts_list] self.proxy = MultiProxy(proxies) self.proxy.start() self.zk_hosts = ",".join(proxy.address for proxy in proxies) self._zk_hosts_internal = zk_hosts else: self.zk_hosts = self._zk_hosts_internal = zk_hosts self.zk_base_path = base_path_prefix + uuid.uuid4().hex if os.environ.get('EPU_USE_GEVENT'): from kazoo.handlers.gevent import SequentialGeventHandler handler = SequentialGeventHandler() self.use_gevent = True else: handler = None self.use_gevent = False self.kazoo = KazooClient(self._zk_hosts_internal + self.zk_base_path, handler=handler) self.kazoo.start() def teardown_zookeeper(self): if self.kazoo: try: self.kazoo.delete("/", recursive=True) self.kazoo.stop() self.kazoo.close() except Exception: log.exception("Problem tearing down ZooKeeper") if self.proxy: self.proxy.stop() cleanup_zookeeper = teardown_zookeeper
class ZkUtil(object): def __init__(self): self.zk = KazooClient(hosts=CONFIG["ZK_HOST"]) try: self.zk.start() except KazooTimeoutError as e: e.__traceback__ @property def client(self): return self.zk def exists(self, path: str): if self.zk.client_state == KeeperState.CONNECTED: return self.zk.exists(path) else: return None def get_value(self, path: str): if self.zk.client_state == KeeperState.CONNECTED: return self.zk.get(path)[0].decode() else: return None def create(self, path: str, value): if isinstance(value, int): value = str(value) if self.zk.client_state == KeeperState.CONNECTED: if self.zk.exists(path): self.zk.delete(path, recursive=True) self.zk.create(path=path, value=value.encode(), makepath=True) def create_ephemeral(self, path: str, value): if isinstance(value, int): value = str(value) if self.zk.client_state == KeeperState.CONNECTED: if self.zk.exists(path): self.zk.delete(path, recursive=True) self.zk.create(path=path, value=value.encode(), ephemeral=True, makepath=True) def delete(self, path): self.zk.delete(path, recursive=True) def counter(self, path): if self.zk.client_state == KeeperState.CONNECTED: if self.zk.exists(path): self.zk.delete(path, recursive=True) return self.zk.Counter(path) else: return 0 def stop(self): try: self.zk.close() except Exception as e: e.__traceback__
def processSymbol(): try: conn = psycopg2.connect(dbConnectStr) cur = conn.cursor() zk = KazooClient(hosts=zkHost) zk.start() symbolq = LockingQueue(zk, cfg.symbol) while True: rawCode = symbolq.get() ints = datetime.now() inload = os.getloadavg()[0] symbol = rawCode.decode().split('||')[0] globalId = rawCode.decode().split('||')[1] symbolq.consume() alias = '' while not alias: print("loop for the alias of {0}".format(globalId)) cur.execute(""" select alias from player0 where "globalId" = %s """, [globalId]) res = cur.fetchone() conn.commit() if res: alias = res[0] print("process symbol:{0} alias:{1} globalId:{2}".format(symbol, alias, globalId)) lock0 = zk.Lock(symbol, 'jg') with lock0: # the operation cmd = "cd /{4}/;{3}/openssl-1.0.2o/apps/openssl genrsa 2048| {3}/openssl-1.0.2o/apps/openssl asn1parse|/{4}/parseoutput.pl; /usr/bin/perl /{4}/makeissuer.pl '{0}' '{1}' '{2}'".format(alias, symbol, globalId, baseDir, workshopInstance) f = os.popen(cmd) while True: en = f.readline() if en == '': break cur.execute(""" update symbol_redo set progress= 0, setup=now() where symbol=%s """, [symbol]) conn.commit() outts = datetime.now() outload = os.getloadavg()[0] # insert the runtime info into c* cluster = Cluster(cfg.cassCluster) session = cluster.connect(cfg.cassKeyspace) stmt = SimpleStatement("""insert into runstat(id,executable,ints,inload,outts,outload) values (%s, %s, %s, %s, %s, %s)""", consistency_level=ConsistencyLevel.ANY) session.execute(stmt, (uuid.uuid4(), executable, ints, inload, outts, outload)) except psycopg2.Error as err: print("SQLError {0}".format(err)) finally: zk.stop() zk.close() cur.close() conn.close()
def processdig(self,pq,step0): #print "pq = {0}\n step0 = {1}".format(pq, step0) cl = Cluster(['192.168.0.64']) se = cl.connect('md') r = se.execute("select d, textid from player0 where pq= %s",[pq]) d = r[0][0] src = r[0][1] #The transaction source cmd = "/home/u/md/step1 {0} {1} {2}".format(pq, d, step0) f = os.popen(cmd) dig = '' while True: en = f.readline() if(en == ''): break dig += en # print "step1 = {0}".format(dig) cmd = "/home/u/md/step2 {0} {1}".format(pq,dig) f = os.popen(cmd) note = '' while True: en = f.readline() if(en == ''): break; note += en note = note.strip() #transfer it to literal text print "step2 = {0}".format(note) st = self.encodefromhex(note) print "note = {0}".format(st) target = st.split('->')[1][:-2] #the version is appended at the end of the target version = target.split('@@')[1]; target = target.split('@@')[0]; print "target = {0}, version = {1}\n".format(target, version) noteid = st.split('->')[0][2:] reply = "{0} does not own the note:{1}".format(src, noteid) #lock the noteid, and perform the check + update zk = KazooClient(hosts='192.168.0.64:2181') zk.start() lock0 = zk.Lock(noteid, "jg") print "put the lock @{0}".format(noteid) with lock0: if(self.verify(src, noteid, version ) == True): self.save2db(pq, step0, dig, noteid, src, target, version) reply = '^^'+pq+'@@'+dig.strip()+'$$' zk.stop() zk.close() print reply count = len(reply) co2 = "%8d" % count self.request.send(co2) self.request.send(reply)
def get_clusterstate_json(self): logging.basicConfig() zk = KazooClient(hosts=self.zk_server_port, read_only=True) zk.start() data, stat = zk.get("/clusterstate.json") self.clusterstate_json = json.loads(data.decode("utf-8")) zk.stop() zk.close()
def _zk_client( self): # TODO: replace with openeogeotrellis.utils.zk_client() zk = KazooClient(hosts=self._hosts) zk.start() try: yield zk finally: zk.stop() zk.close()
def _clean_up_zookeeper_autoscaling(context): """If max_instances was set for autoscaling, clean up zookeeper""" client = KazooClient(hosts='%s/mesos-testcluster' % get_service_connection_string('zookeeper'), read_only=True) client.start() try: client.delete('/autoscaling', recursive=True) except NoNodeError: pass client.stop() client.close()
def inform_zookeeper(topic): zk = KazooClient(hosts="127.0.0.1:2182") zk.start() publish_logger.debug("nodes contained in zookeeper root directory: " + str(zk.get_children("/"))) topic_name = "/" + topic if topic not in zk.get_children("/"): zk.create(topic_name, b'127.0.0.1:2182') zk.stop() zk.close()
def _clean_up_zookeeper_autoscaling(context): """If max_instances was set for autoscaling, clean up zookeeper""" if 'max_instances' in context: client = KazooClient(hosts='%s/mesos-testcluster' % get_service_connection_string('zookeeper'), read_only=True) client.start() try: client.delete('/autoscaling', recursive=True) except NoNodeError: pass client.stop() client.close()
def _receive(self): server, port, buf_que = self._createThriftServer() kazoo_client = KazooClient(self.zk_address) kazoo_client.start() path = '%s/%s:%d' % (self.zk_path, socket.gethostname(), port) kazoo_client.create(path, ephemeral=True, makepath=True) while not server._stop: try: message = buf_que.pop() yield message except: time.sleep(0.1) server.close() kazoo_client.close()
def read(self): log.info('Set up ZK client using host(s): %s', self._hosts) zk = KazooClient(hosts=self._hosts) zk.start() self._zk = zk try: # This may raise various kazoo.exceptions.* types. data, stat = self._readloop() finally: log.info('Shut down ZK client.') try: zk.stop() finally: zk.close() log.info('Foreign payload stat: %s', stat) return data
class DMSInventoryManager(object): def __init__(self): config = ServiceContext().getConfigService() self.zk_address = config.get("Inventory","zk_address") self.root_path = config.get("Inventory","zk_root_path") self.zk_client = KazooClient(hosts=self.zk_address) def start(self): self.zk_client.start() def stop(self): self.zk_client.stop() self.zk_client.close() def getservice(self,accountId): """ :param accountId: :return: """ services = [] service_path = os.path.join(self.root_path,accountId,"services") children = self.zk_client.get_children(service_path) for child in children: services.append(child) return services def getinstancebyservice(self,accountId,service): parent_path = os.path.join(self.root_path,accountId,"services",service,"instances") nodes = [] children = self.zk_client.get_children(parent_path) for child in children: nodepath = os.path.join(parent_path,child) print nodepath data,stats = self.zk_client.get(nodepath) map = json.loads(data) ret = {} ret["vmType"] = service ret["accountId"] = accountId ret["stackId"] = map.get("id","") ret["vmManagementIP"] = map.get("manageip",None) ret["vmPublicIP"] = map.get("publicip",None) ret["vmServiceIP"] = map.get("serviceip",None) ret["eventName"] = "CREATE_VM" nodes.append(ret) return nodes
def manager(proj=None, env='dev'): session=request.environ.get('beaker.session') if not session or not session.has_key('trinity_user'): redirect('/', code=302) result={} result['user']=session['trinity_user'] result['roles']=user_map[result['user']][1] result['user_projects']=user_map[result['user']][2] if not hosts.has_key(env): keys=sorted(hosts.keys()) env=keys[0] zk=KazooClient(hosts[env]) projects=[] key=request.forms.get('configkey') value=request.forms.get('configvalue') comment=request.forms.get('configcomment') action=request.forms.get('action') try: zk.start() projects=getProjects(zk) result['projects']=sorted(projects, key=lambda a:a.split('-')[0]) result['current']=proj if proj else projects[0] result['env']=env result['hosts']=hosts if action: zkOp=zkOperator(zk, result['current'], env, action) log.info('[%s:%s] %s is %s key: %s ===> %s -- %s' % (env, result['current'], result['user'], action, key, value, comment)) zkOp(key, value, comment) if env not in result['roles'] or result['current'] not in result['user_projects']: result['warn']='对不起,您没有'+env+'环境的操作权限!' return result zkOp=zkOperator(zk, result['current'], env, 'get') result['config']=zkOp() if action == 'delete' or action == 'add': redirect('/manager/'+result['current']+'/'+result['env'], code=302) finally: zk.stop() zk.close() return result
def wrapper(): try: conn = psycopg2.connect(dbConnectStr) cur = conn.cursor() zk = KazooClient(hosts=zkHost) zk.start() propTranq = LockingQueue(zk, cfg.proposeTran) while True: rawC = propTranq.get() ints = datetime.now() inload = os.getloadavg()[0] alias = rawC.decode().split('&&')[0] rawCode = rawC.decode().split('&&')[1] lastsig = rawC.decode().split('&&')[2] entryId = rawC.decode().split('&&')[3] globalId = rawC.decode().split('&&')[4] pro1 = Popen(['/usr/bin/python36','/{0}/payer{1}.py'.format(workshopInstance, alias), rawCode, lastsig, globalId], stdin=None, stdout=None) pro1.wait() cur.execute("""update propose_transfer set progress = 0 , setup = now() where id = %s """, [entryId]) conn.commit() propTranq.consume() outts = datetime.now() outload = os.getloadavg()[0] # insert the runtime info into c* cluster = Cluster(cfg.cassCluster) session = cluster.connect(cfg.cassKeyspace) stmt = SimpleStatement("""insert into runstat(id,executable,ints,inload,outts,outload) values (%s, %s, %s, %s, %s, %s)""", consistency_level=ConsistencyLevel.ANY) session.execute(stmt, (uuid.uuid4(), executable, ints, inload, outts, outload)) except psycopg2.Error as err: print("SQLError {0}".format(err)) finally: zk.stop() zk.close() cur.close() conn.close()
def zk_server(tmpdir): zk_container_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) # TODO(cmaloney): Add a python context manager for dockerized daemons subprocess.check_call(['docker', 'run', '-d', '-p', '2181:2181', '-p', '2888:2888', '-p', '3888:3888', '--name', zk_container_name, 'jplock/zookeeper']) conn_retry_policy = KazooRetry(max_tries=-1, delay=0.1, max_delay=0.1) cmd_retry_policy = KazooRetry(max_tries=3, delay=0.3, backoff=1, max_delay=1, ignore_expire=False) zk = KazooClient(hosts=zk_hosts, connection_retry=conn_retry_policy, command_retry=cmd_retry_policy) zk.start() children = zk.get_children('/') for child in children: if child == 'zookeeper': continue zk.delete('/' + child, recursive=True) yield zk zk.stop() zk.close() subprocess.check_call(['docker', 'rm', '-f', zk_container_name])
def create_from_zookeeper(cls, zkconnect): log.info("Connecting to zookeeper {0}".format(zkconnect)) try: zk = KazooClient(zkconnect) zk.start() except Exception as e: raise ZookeeperException("Cannot connect to Zookeeper: {0}".format(e)) # Get broker list cluster = cls() for b in zk.get_children("/brokers/ids"): broker_data, bstat = zk.get("/brokers/ids/{0}".format(b)) cluster.add_broker(Broker.create_from_json(int(b), broker_data)) if cluster.num_brokers() == 0: raise ZookeeperException("The cluster specified does not have any brokers") # Get current partition state log.info("Getting partition list from Zookeeper") for topic in zk.get_children("/brokers/topics"): zdata, zstat = zk.get("/brokers/topics/{0}".format(topic)) zj = json.loads(zdata) newtopic = Topic(topic, len(zj['partitions'])) for partition in zj['partitions']: for i, replica in enumerate(zj['partitions'][partition]): if replica not in cluster.brokers: # Hit a replica that's not in the ID list (which means it's dead) # We'll add it, but trying to get sizes will fail as we don't have a hostname cluster.add_broker(Broker(replica, None)) newtopic.partitions[int(partition)].add_replica(cluster.brokers[replica], i) cluster.add_topic(newtopic) if cluster.num_topics() == 0: raise ZookeeperException("The cluster specified does not have any topics") log.info("Closing connection to zookeeper") zk.stop() zk.close() return cluster
def save2db(self, pq, step0, dig, note, src, target, ver): cl = Cluster(['192.168.0.64']) se = cl.connect('md') zk = KazooClient(hosts='192.168.0.64:2181') zk.start() zkc = zk.Counter("/globalid", default=0x700) zkc += 1 entryId = zkc.value zk.stop() zk.close() se.execute("insert into logentry0(id, pq, step0, dig, note, setup, dest, ver) values(%s, %s, %s, %s, %s, dateof(now()), %s, %s)", [int(entryId), pq.strip(), step0.strip(), dig.strip(), note.strip(), target.strip(), int(ver)]) #update the ownership if src != 'centralbank': se.execute("update ownership0 set owner = %s , ver = %s,updated= dateof(now()) where note = %s", [target.strip(), int(ver), note]) else: zk = KazooClient(hosts='192.168.0.64:2181') zk.start() zkc = zk.Counter("/globalid", default=0x700) zkc += 1 id2 = zkc.value zk.stop() zk.close() se.execute("insert into ownership0(id,note,owner,updated, ver) values(%s,%s,%s,dateof(now()), 0 )", [int(id2),note,target])
def save2db(self, pq, step0, dig, note, src, target): #print "pq = {0} step0 = {1}".format(pq, step0) cl = Cluster(['XXXX']) se = cl.connect('nv') zk = KazooClient(hosts='XXXX:2181') zk.start() zkc = zk.Counter("/globalid", default=0x8000) zkc += 1 entryId = zkc.value zk.stop() zk.close() se.execute("insert into logentry0(id,pq,step0,dig,note,valid,setup,dest) values(%s, %s, %s, %s, %s, 1, dateof(now()),%s)", [int(entryId), pq.strip(), step0.strip(), dig.strip(), note.strip(),target.strip()]) #update the ownership if src != 'centralbank': se.execute("update ownership0 set owner = %s , updated= dateof(now()) where note = %s", [target, note]) else: zk = KazooClient(hosts='XXXX:2181') zk.start() zkc = zk.Counter("/globalid", default=0x8000) zkc += 1 id2 = zkc.value zk.stop() zk.close() se.execute("insert into ownership0(id,note,owner,updated) values(%s,%s,%s,dateof(now()))", [int(id2),note,target])
def report(): zk_quorums = os.getenv('MONITORED_ZOOKEEPER_QUORUMS') if zk_quorums is None: raise RuntimeError('MONITORED_ZOOKEEPER_QUORUMS not found') for zk_quorum in (x for x in zk_quorums.split('|') if x): zk = KazooClient(hosts=zk_quorum) zk.start() for metric in ELAPSED_SECONDS_METRICS: metric_path = STATUS_ROOT + metric if zk.exists(metric_path) is None: continue value, _ = zk.get(metric_path) time_ = int(time.time()) elapsed = max(0, time_ - int(value) / 1000) tags = { 'zkquorum': zk_quorum.replace(',', '_') } print format_tsd_key(metric + '.elapsedSeconds', elapsed, time_, tags) zk.stop() zk.close()
class TestServiceDiscovery(unittest.TestCase): def setUp(self): self.maxDiff = None logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s[%(lineno)d] %(threadName)s %(message)s", level=logging.WARN) self.log = logging.getLogger() self.basePath = "/discovery_test_%x" % int(time.time()) self.log.info("Using base path: %s" % self.basePath) self.client = KazooClient(hosts="127.0.0.1:2181") self.client.start() self._clean() self.discovery = ServiceDiscovery(self.client, self.basePath) def _clean(self): try: self.client.delete(self.basePath,recursive=True) except NoNodeError: pass def tearDown(self): self.discovery.close() self._clean() self.client.stop() self.client.close() def test_paths(self): svc1 = ServiceInstance.builder().id("instance1").name("service1").build() svc2 = ServiceInstance.builder().id("instance2").name("service1").build() svc3 = ServiceInstance.builder().id("foo1").name("foo").build() self.assertEquals(self.basePath + "/service1/instance1", self.discovery.pathForInstance(svc1.getName(),svc1.getId())) self.assertEquals(self.basePath + "/service1/instance2", self.discovery.pathForInstance(svc2.getName(),svc2.getId())) self.assertEquals(self.basePath + "/foo/foo1", self.discovery.pathForInstance(svc3.getName(),svc3.getId())) def test_reg_and_dereg(self): svc1 = ServiceInstance.builder().id("instance1").name("service1").build() svc2 = ServiceInstance.builder().id("instance2").name("service1").build() svc3 = ServiceInstance.builder().id("foo1").name("foo").build() self.discovery.registerService(svc1) self.discovery.registerService(svc2) self.discovery.registerService(svc3) self.assertTrue(self.client.exists(self.discovery.pathForInstance(svc1.getName(),svc1.getId()))) self.assertTrue(self.client.exists(self.discovery.pathForInstance(svc2.getName(),svc2.getId()))) self.assertTrue(self.client.exists(self.discovery.pathForInstance(svc3.getName(),svc3.getId()))) self.discovery.unregisterService(svc1) self.discovery.unregisterService(svc2) self.discovery.unregisterService(svc3) self.assertFalse(self.client.exists(self.discovery.pathForInstance(svc1.getName(),svc1.getId()))) self.assertFalse(self.client.exists(self.discovery.pathForInstance(svc2.getName(),svc2.getId()))) self.assertFalse(self.client.exists(self.discovery.pathForInstance(svc3.getName(),svc3.getId()))) def test_query(self): svc1 = ServiceInstance.builder().id("instance1").name("service1").build() svc2 = ServiceInstance.builder().id("instance2").name("service1").build() svc3 = ServiceInstance.builder().id("foo1").name("foo").build() self.discovery.registerService(svc1) self.discovery.registerService(svc2) self.discovery.registerService(svc3) self.assertEquals(sorted(["foo", "service1"]), sorted(self.discovery.queryForNames())) instances = self.discovery.queryForInstances("service1") self.assertEquals(2, len(instances)) self.assertEquals(sorted([svc1, svc2]), sorted(instances)) # make sure unregister works self.discovery.unregisterService(svc2) instances = self.discovery.queryForInstances("service1") self.assertEquals(1, len(instances)) self.assertEquals(sorted([svc1]), sorted(instances)) instance = self.discovery.queryForInstance("service1","instance1") self.assertTrue(instance) self.assertEquals(svc1, instance)
class ZookeeperClient(object): def __init__(self, hosts=None, read_only=True): self.hosts = hosts if hosts else ENSEMBLE.get() self.read_only = read_only hdfs = cluster.get_hdfs() if hdfs is None: raise ZookeeperConfigurationException('No [hdfs] configured in hue.ini.') if hdfs.security_enabled: self.sasl_server_principal = PRINCIPAL_NAME.get() else: self.sasl_server_principal = None self.zk = KazooClient(hosts=self.hosts, read_only=self.read_only, sasl_server_principal=self.sasl_server_principal) def start(self): """Start the zookeeper session.""" self.zk.start() def stop(self): """Stop the zookeeper session, but leaves the socket open.""" self.zk.stop() def close(self): """Closes a stopped zookeeper socket.""" self.zk.close() def get_children_data(self, namespace): children = self.zk.get_children(namespace) children_data = [] for node in children: data, stat = self.zk.get("%s/%s" % (namespace, node)) children_data.append(data) return children_data def path_exists(self, namespace): return self.zk.exists(namespace) is not None def set(self, path, value, version=-1): return self.zk.set(path, value, version) def copy_path(self, namespace, filepath): if self.read_only: raise ReadOnlyClientException('Cannot execute copy_path when read_only is set to True.') self.zk.ensure_path(namespace) for dir, subdirs, files in os.walk(filepath): path = dir.replace(filepath, '').strip('/') if path: node_path = '%s/%s' % (namespace, path) self.zk.create(path=node_path, value='', makepath=True) for filename in files: node_path = '%s/%s/%s' % (namespace, path, filename) with open(os.path.join(dir, filename), 'r') as f: file_content = f.read() self.zk.create(path=node_path, value=file_content, makepath=True) def delete_path(self, namespace): if self.read_only: raise ReadOnlyClientException('Cannot execute delete_path when read_only is set to True.') self.zk.delete(namespace, recursive=True) def __enter__(self): """Start a zookeeper session and return a `with` context.""" self.zk.start() return self def __exit__(self, exc_type, exc_value, traceback): """Stops and closes zookeeper session at the end of the `with` context.""" try: self.stop() finally: self.close()
class ZooKeeper(object): # Constants used by the REST API: LIVE_NODES_ZKNODE = '/live_nodes' ALIASES = '/aliases.json' CLUSTER_STATE = '/clusterstate.json' SHARDS = 'shards' REPLICAS = 'replicas' STATE = 'state' ACTIVE = 'active' LEADER = 'leader' BASE_URL = 'base_url' TRUE = 'true' FALSE = 'false' COLLECTION = 'collection' def __init__(self, zkServerAddress, zkClientTimeout=15, zkClientConnectTimeout=15): if KazooClient is None: logging.error('ZooKeeper requires the `kazoo` library to be installed') raise RuntimeError self.collections = {} self.liveNodes = {} self.aliases = {} self.state = None self.zk = KazooClient(zkServerAddress, read_only=True) self.zk.start() def connectionListener(state): if state == KazooState.LOST: self.state = state elif state == KazooState.SUSPENDED: self.state = state self.zk.add_listener(connectionListener) @self.zk.DataWatch(ZooKeeper.CLUSTER_STATE) def watchClusterState(data, *args, **kwargs): if not data: LOG.warning("No cluster state available: no collections defined?") else: self.collections = json.loads(data.decode('utf-8')) LOG.info('Updated collections: %s', self.collections) @self.zk.ChildrenWatch(ZooKeeper.LIVE_NODES_ZKNODE) def watchLiveNodes(children): self.liveNodes = children LOG.info("Updated live nodes: %s", children) @self.zk.DataWatch(ZooKeeper.ALIASES) def watchAliases(data, stat): if data: json_data = json.loads(data.decode('utf-8')) if ZooKeeper.COLLECTION in json_data: self.aliases = json_data[ZooKeeper.COLLECTION] else: LOG.warning('Expected to find %s in alias update %s', ZooKeeper.COLLECTION, json_data.keys()) else: self.aliases = None LOG.info("Updated aliases: %s", self.aliases) def __del__(self): # Avoid leaking connection handles in Kazoo's atexit handler: self.zk.stop() self.zk.close() def getHosts(self, collname, only_leader=False, seen_aliases=None): if self.aliases and collname in self.aliases: return self.getAliasHosts(collname, only_leader, seen_aliases) hosts = [] if collname not in self.collections: raise SolrError("Unknown collection: %s", collname) collection = self.collections[collname] shards = collection[ZooKeeper.SHARDS] for shardname in shards.keys(): shard = shards[shardname] if shard[ZooKeeper.STATE] == ZooKeeper.ACTIVE: replicas = shard[ZooKeeper.REPLICAS] for replicaname in replicas.keys(): replica = replicas[replicaname] if replica[ZooKeeper.STATE] == ZooKeeper.ACTIVE: if not only_leader or (replica.get(ZooKeeper.LEADER, None) == ZooKeeper.TRUE): base_url = replica[ZooKeeper.BASE_URL] if base_url not in hosts: hosts.append(base_url) return hosts def getAliasHosts(self, collname, only_leader, seen_aliases): if seen_aliases: if collname in seen_aliases: LOG.warn("%s in circular alias definition - ignored", collname) return [] else: seen_aliases = [] seen_aliases.append(collname) collections = self.aliases[collname].split(",") hosts = [] for collection in collections: for host in self.getHosts(collection, only_leader, seen_aliases): if host not in hosts: hosts.append(host) return hosts def getRandomURL(self, collname): return random.choice(self.getHosts(collname, only_leader=False)) + "/" + collname def getLeaderURL(self, collname): return random.choice(self.getHosts(collname, only_leader=True)) + "/" + collname
class ZkClient(): """ Java modifiers: private static Type: Logger """ LOG = get_logger(__name__) """ Java modifiers: final static Type: int """ DEFAULT_CONNECTION_TIMEOUT = 60 * 1000 """ Java modifiers: final static Type: int """ DEFAULT_SESSION_TIMEOUT = 30 * 1000 # """ # # Parameters: # IZkConnection connection # int connectionTimeout # PathBasedZkSerializer zkSerializer # """ # def __init__(self, connection, connectionTimeout=DEFAULT_SESSION_TIMEOUT, zkSerializer=ByteArraySerializer()): ## super(connection, connectionTimeout, ByteArraySerializer()) # self._zkSerializer = zkSerializer # # StackTraceElement[] ## calls = Thread.currentThread().getStackTrace() ## calls = traceback.print_stack() # LOG.info("create a new zkclient. " + repr(traceback.extract_stack())) # # # """1 # # Parameters: # IZkConnection connection # int connectionTimeout # ZkSerializer zkSerializer # """ # def __init__(self, connection, connectionTimeout, zkSerializer): # this(connection, connectionTimeout, BasicZkSerializer(zkSerializer)) # # # """ # # Parameters: # IZkConnection connection # int connectionTimeout # """ # def __init__(self, connection, connectionTimeout=sys.maxint): # this(connection, connectionTimeout, SerializableSerializer()) # # # """ # # Parameters: # String zkServers # int sessionTimeout # int connectionTimeout # ZkSerializer zkSerializer # """ # def __init__(self, zkServers, sessionTimeout, connectionTimeout, zkSerializer): # this.__init__((zkServers, sessionTimeout), connectionTimeout, zkSerializer) # # """ Parameters: String zkServers int sessionTimeout int connectionTimeout PathBasedZkSerializer zkSerializer """ # DEFAULT_ZK_SERIALIZER = ChainedPathZkSerializer.builder(ZNRecordStreamingSerializer()).serialize(propertyStorePath, ByteArraySerializer()).build() # TODO: more serilizer? def __init__(self, zkServers, sessionTimeout=DEFAULT_SESSION_TIMEOUT, connectionTimeout=DEFAULT_CONNECTION_TIMEOUT, zkSerializer=BasicZkSerializer): self._connection = KazooClient(hosts=zkServers, timeout=sessionTimeout) self._zkSerializer = zkSerializer self._connection.start(connectionTimeout) self.LOG.info("create a new zkclient. " + repr(traceback.extract_stack())) # this(ZkConnection(zkServers, sessionTimeout), connectionTimeout, zkSerializer) # # # """ # # Parameters: # String zkServers # int sessionTimeout # int connectionTimeout # """ # def __init__(self, zkServers, sessionTimeout, connectionTimeout): # this(ZkConnection(zkServers, sessionTimeout), connectionTimeout, SerializableSerializer()) # # # """ # # Parameters: # String zkServers # int connectionTimeout # """ # def __init__(self, zkServers, connectionTimeout): # this(ZkConnection(zkServers), connectionTimeout, SerializableSerializer()) # # # """ # # Parameters: # String zkServers # """ # def __init__(self, zkServers): # this(ZkConnection(zkServers), Integer.MAX_VALUE, SerializableSerializer()) # # # static def setZkSerializer(self, zkSerializer): """ Returns void Parameters: zkSerializer: ZkSerializer @Override """ _zkSerializer = BasicZkSerializer(zkSerializer) def setZkSerializer(self, zkSerializer): """ Returns void Parameters: zkSerializer: PathBasedZkSerializer """ _zkSerializer = zkSerializer def getConnection(self): """ Returns IZkConnection """ return self._connection def close(self): """ Returns void @Override Throws: ZkInterruptedException """ # StackTraceElement[] # calls = Thread.currentThread().getStackTrace() self.LOG.info("closing a zkclient. zookeeper: " + repr(self._connection) + ", callStack: " + traceback.extract_stack()) if self._connection: self._connection.close() def getStat(self, path): """ Returns Stat Parameters: path: String """ stat = self._connection.exists(path) return stat # long # startT = System.nanoTime() # try: # # Stat # stat = retryUntilConnected(Callable<Stat>() { # # def call(self): # """ # Returns Stat # @Override # # # Throws: # Exception # """ # # Stat # stat = ((ZkConnection) self._connection).getZookeeper().exists(path, False) # return stat # # }) # return stat # final: # # long # endT = System.nanoTime() # if LOG.isDebugEnabled(): # LOG.debug("exists, path: " + str(path)+ ", time: " + str((endT - startT) )+ " ns") # def hasListeners(self,path): # ''' given a path, find out if there is listener for the path # ''' # ret = False # for childWatcher in self._connection._child_watchers: # if childWatcher. def exists(self, path, watch=None): """ Returns boolean Parameters: path: Stringwatch: boolean @Override Java modifiers: protected """ # long # if not watch: # watch = path in self._connection._child_watchers or path in self._connection._data_watchers # in java, watch is a boolean # in kazoo, it is a call back function # stat = self._connection.exists(path, watch) stat = self._connection.exists(path) return stat # startT = System.nanoTime() # try: # return retryUntilConnected(Callable<Boolean>() { # # def call(self): # """ # Returns Boolean # @Override # # # Throws: # Exception # """ # return self._connection.exists(path, watch) # # }) # final: # # long # endT = System.nanoTime() # if LOG.isDebugEnabled(): # LOG.debug("exists, path: " + str(path)+ ", time: " + str((endT - startT) )+ " ns") def getChildren(self, path, watch=None): """ Returns List<String> Parameters: path: Stringwatch: boolean @Override Java modifiers: protected """ return self._connection.get_children(path, watch) # # long # startT = System.nanoTime() # try: # return retryUntilConnected(Callable<List<String>>() { # # def call(self): # """ # Returns List<String> # @Override # # # Throws: # Exception # """ # return self._connection.getChildren(path, watch) # # }) # final: # # long # endT = System.nanoTime() # if LOG.isDebugEnabled(): # LOG.debug("getChildren, path: " + str(path)+ ", time: " + str((endT - startT) )+ " ns") def deserialize(self, data, path): """ Returns T Parameters: data: byte[]path: String # Annotation: @SuppressWarnings("unchecked") Parameterized: <T extends Object> """ if data == None: return None return self._zkSerializer.deserialize(data, path) def copyStat(self, src, dest): for fieldName in src._fields: setattr(dest, fieldName, getattr(src, fieldName)) def readData(self, *args): if len(args)==3 and isinstance(args[1], HelixZNodeStat) and isinstance(args[2], bool): return self.readDataAndStat(*args) elif len(args)==2 and isinstance(args[1],bool): # readData(path, nullIfNoExist ) return self.readDataAndStat(args[0], HelixZNodeStat(), args[1]) elif len(args)>=2 and isinstance(args[1], HelixZNodeStat): return self.readDataStatInternal(*args) else: raise IllegalArgumentException("Wrong args: %s" % args) def readDataStatInternal(self, path, stat, watch=None): """ Returns T Parameters: path: Stringstat: Statwatch: boolean @Override # Annotation: @SuppressWarnings("unchecked") Java modifiers: protected Parameterized: <T extends Object> """ data, statRet = self._connection.get(path, watch) self.copyStat(statRet, stat) # copy over the stats return self.deserialize(data, path) # # # long # startT = System.nanoTime() # try: # # byte[] # data = retryUntilConnected(Callable<byte[]>() { # # def call(self): # """ # Returns byte[] # @Override # # # Throws: # Exception # """ # return self._connection.readData(path, stat, watch) # # }) # return (T) deserialize(data, path) # final: # # long # endT = System.nanoTime() # if LOG.isDebugEnabled(): # LOG.debug("getData, path: " + str(path)+ ", time: " + str((endT - startT) )+ " ns") # # def readDataAndStat(self, path, stat, returnNullIfPathNotExists): """ Returns T Parameters: path: Stringstat: StatreturnNullIfPathNotExists: boolean # Annotation: @SuppressWarnings("unchecked") Parameterized: <T extends Object> """ # T data = None try: data = self.readDataStatInternal(path, stat) except NoNodeException, e: if not returnNullIfPathNotExists: raise e return data
class KazooCommandProxy(): def __init__(self, module): self.module = module self.zk = KazooClient(module.params['hosts']) def absent(self): return self._absent(self.module.params['name']) def exists(self, znode): return self.zk.exists(znode) def list(self): children = self.zk.get_children(self.module.params['name']) return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.', 'znode': self.module.params['name']} def present(self): return self._present(self.module.params['name'], self.module.params['value']) def get(self): return self._get(self.module.params['name']) def shutdown(self): self.zk.stop() self.zk.close() def start(self): self.zk.start() def wait(self): return self._wait(self.module.params['name'], self.module.params['timeout']) def _absent(self, znode): if self.exists(znode): self.zk.delete(znode, recursive=self.module.params['recursive']) return True, {'changed': True, 'msg': 'The znode was deleted.'} else: return True, {'changed': False, 'msg': 'The znode does not exist.'} def _get(self, path): if self.exists(path): value, zstat = self.zk.get(path) stat_dict = {} for i in dir(zstat): if not i.startswith('_'): attr = getattr(zstat, i) if isinstance(attr, (int, str)): stat_dict[i] = attr result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value, 'stat': stat_dict} else: result = False, {'msg': 'The requested node does not exist.'} return result def _present(self, path, value): if self.exists(path): (current_value, zstat) = self.zk.get(path) if value != current_value: self.zk.set(path, to_bytes(value)) return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path, 'value': value} else: return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value} else: self.zk.create(path, to_bytes(value), makepath=True) return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value} def _wait(self, path, timeout, interval=5): lim = time.time() + timeout while time.time() < lim: if self.exists(path): return True, {'msg': 'The node appeared before the configured timeout.', 'znode': path, 'timeout': timeout} else: time.sleep(interval) return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout, 'znode': path}
#!/usr/bin/env python #coding=utf-8 import os import sys from kazoo.client import KazooClient from kazoo.client import KazooState from kazoo.exceptions import KazooException if __name__ == '__main__': zkHosts = input("please input zookeeper hosts info:") serviceName = input("please input sevice name") hostInfo = input("please input auth ip:port") zk = KazooClient(hosts= zkHosts) try: zk.start() #check path info: # except KazooException as e: print(e) finally: zk.close()