Ejemplo n.º 1
0
def main_loop():
    logging.basicConfig()

    zk = KazooClient(hosts=zk_connect_string)
    zk.start()

    # make sure the root folders for the sendgraph and the schedules exist
    zk.ensure_path(metrics_zk_path)
    zk.ensure_path(schedule_zk_path)

    for topology in zk.get_children(metrics_zk_path):
        topology_metrics_zk_path = metrics_zk_path + "/" + topology
        print("registering watcher schedule for " + topology_metrics_zk_path)

        # register a data watch for each
        def watchFunc(data, stat, event):
            #print("watch called")
            if event is not None and event.type == EventType.CHANGED:
                print("new sendgraph data for {0} at {1}".format(topology, byteArrayToInt(data)))
                schedule(zk, topology)
            return True  # returning false will disable the watch

        # install data watch
        #DataWatch(zk, topology_metrics_zk_path, func=watchFunc)

        # if there is some data already, schedule immediately
        if len(zk.get_children(topology_metrics_zk_path)):
            print("existing sendgraph data for {0}".format(topology))
            schedule(zk, topology)
Ejemplo n.º 2
0
def cluster_state(pathVariable):
    try:
        zk_host = zk_map[pathVariable]
        zk = KazooClient(hosts=zk_host, read_only=True)
        zk.start()
        if pathVariable.find('kafka') > 0:
            nodes = zk.get_children('/brokers/ids')
            brokers = ""
            for id in nodes:
                data, stat = zk.get('/brokers/ids/'+id)
                jdata = json.loads(data)
                brokers += jdata['host']+"\n"
            return 'There are '+str(len(nodes))+\
                   ' brokers running\nids: '+\
                   ','.join(nodes)+'\nbrokers:'+\
                   brokers+'\nZK:'+zk_host+\
                   '\nThe cluster looks healthy. ', 200, {'Content-Type': 'text/plain; charset=utf-8'}
        else:
            data, stat = zk.get('/hbase/master')
            start = data.find('bach-')
            end = data.find('.bloomberg')
            hmaster = data[start:end]
            data = zk.get_children('/hbase/rs')
            rs = ""
            for node in data:
               rs += node+"\n"
            return "Its a hadoop cluster\n"+\
            'hmaster :'+hmaster+\
            '\nRegionServers :'+ rs+\
            '\nZK: '+zk_host+\
            '\nThe cluster looks healthy.', 200, {'Content-Type': 'text/plain; charset=utf-8'}
        zk.stop()
    except:
        return 'Cluster seems down'
Ejemplo n.º 3
0
    def init_codis_info(self):
        if self.has_init():
            return

        # start zookeeper client
        zk_client = KazooClient(hosts=self.zk_addr)
        zk_client.start()

        # get codis server information
        zk_servers_dir = "/zk/codis/db_%s/servers" % self.product_name
        for zk_server in zk_client.get_children(zk_servers_dir):
            zk_server_path = '/'.join((zk_servers_dir, zk_server))
            for server in zk_client.get_children(zk_server_path):
                server_path = '/'.join((zk_server_path, server))
                data, stat = zk_client.get(server_path)
                server_info = json.loads(data)
                group_id = server_info.get('group_id')
                server_type = server_info.get('type')
                server_addr = server_info.get('addr')
                self.add_codis_server(group_id, server_type, server_addr)

        # get codis proxy information
        zk_proxy_dir = "/zk/codis/db_%s/proxy" % self.product_name
        for zk_proxy in zk_client.get_children(zk_proxy_dir):
            zk_proxy_path = '/'.join((zk_proxy_dir, zk_proxy))
            data, stat = zk_client.get(zk_proxy_path)
            proxy_info = json.loads(data)
            self.add_proxy(proxy_info['id'], proxy_info['addr'], proxy_info['debug_var_addr'], proxy_info['state'])

        self.redis_client.init_connection(self.get_group_info(), self.get_proxy_info())
        self.init_done()
        return None
Ejemplo n.º 4
0
def test__(ip="10.233.94.51", port="2181"):
    try:
        zk = KazooClient(hosts=ip + ":" + str(port), read_only=True)
        zk.start()
        print zk.get_children("/")
    except Exception, e:
        print e
        pass
Ejemplo n.º 5
0
def getAllServiceProviderConsumer(ip="10.233.94.51",
                                  port="2181",
                                  pathFilter=None,
                                  absolutePath=None):
    try:
        zk = KazooClient(hosts=ip + ":" + str(port), read_only=True)
        zk.start()
        if absolutePath:
            result = zk.get_children(absolutePath)
            return result, None, None
        else:
            service_list = zk.get_children("/dubbo")

        provider_ip_dict = {}
        consumer_address_dict = {}
        service_filter_dict = {}
        for service in service_list:
            #选择包含serviceFilter的service,否则跳过
            if pathFilter != None and pathFilter != "" and service.lower(
            ).find(pathFilter.lower()) == -1:
                continue

            #包含serviceFilter
            service_filter_dict[service] = None

            try:
                provider_address_list = zk.get_children("/dubbo/" + service +
                                                        "/providers")
                for provider_address in provider_address_list:
                    #添加service ip, port ,servicetype
                    provider_ip_dict[provider_address[:provider_address.
                                                      find(service)].replace(
                                                          "%3A%2F%2F",
                                                          " ").replace(
                                                              "%2F",
                                                              " ").replace(
                                                                  "%3A",
                                                                  " ")] = None
            except Exception, e:
                logging.info(traceback.format_exc())

            try:
                consumer_address_list = zk.get_children("/dubbo/" + service +
                                                        "/consumers")
                for consumer_address in consumer_address_list:
                    #整个添加
                    consumer_address_dict[
                        consumer_address[:consumer_address.find(service).
                                         replace("%3A%2F%2F", " ").
                                         replace("%2F", " ").
                                         replace("%3A", " ")]] = None

            except Exception, e:
                logging.info(traceback.format_exc())
Ejemplo n.º 6
0
def _get_topics(cluster):
	""" Method to get the topic list of a given cluster """
	topic_list = []
	error = 0
	try:
		zk = KazooClient(hosts=cluster['zk_host_ports'])
		zk.start()
		topics = zk.get_children(cluster['topics_path'])
	except NoNodeError:
		error = 2
		return topic_list, error
	except:
		error = 1
		return topic_list, error
	else:
		for topic in topics:
			t = {'id':topic}
			topic_path = cluster['topics_path'] + "/" + topic
			data, stat = zk.get(topic_path)
			d=json.loads(data)
			t['topic_partitions_data']=d['partitions']
			partitions_path = topic_path + "/partitions"			
			
			try:
				partitions = zk.get_children(partitions_path)
				t['partitions']=partitions
				tpp = {}
				p =[]
				for partition in partitions:
					tps = {}
					p.append(partition.encode('ascii'))
					partition_path = partitions_path + "/" + partition + "/state"
					data, stat = zk.get(partition_path)
					d = json.loads(data)
					tps['isr'] = d['isr']
					tps['leader'] = d['leader']
					tpp[partition.encode('ascii')]=tps
				
				t['partitions']=p	
				t['topic_partitions_states']=tpp
				topic_list.append(t)
			except NoNodeError:
				topic_list = []
				error = 2
				return topic_list, error 
			except:
				topic_list = []
				error = 1
				return topic_list, error

	zk.stop()
	return topic_list, error 
Ejemplo n.º 7
0
def get_arcus_service_list(addr, ip):
	zk = KazooClient(addr)
	zk.start()
	children = zk.get_children('/arcus/cache_server_mapping/')

	ret = []
	for child in children:
		l = len(ip)
		if child[:l] == ip:
			service = zk.get_children('/arcus/cache_server_mapping/' + child)
			ip, port = child.split(':')
			ret.append((ip, port, service[0]))
	return ret
Ejemplo n.º 8
0
def reset(bpath, new_val):
    zk = KazooClient(hosts=args.zkhost)
    zk.start()
    if zk.exists(bpath):  # /offsets
        for child in zk.get_children(bpath):  # offsets/topic
            c = '{0}/{1}'.format(bpath, child)
            print('Topic: {0}'.format(c))
            for c2 in zk.get_children(c):  # offsets/topic/partition
                c2 = '{0}/{1}'.format(c, c2)
                print('Set {0} to {1}'.format(c2, new_val))
                zk.set(c2, new_val)
    else:
        print('Path <{0}> not exists'.format(bpath))
    zk.stop()
Ejemplo n.º 9
0
def cleanup(args):
    now = dt.utcnow()
    server = '{server}:{port}'.format(server=args.server, port=args.port)
    logging.info('Connecting to {}'.format(server))
    zk = KazooClient(hosts=server)
    zk.start()

    for path in args.zk_paths:
        zk_path = '{}/{}'.format(args.zk_root_path, path)
        nodes = zk.get_children(zk_path)
        logging.info("Found {} nodes under {}".format(len(nodes), zk_path))

        deleted = 0
        for node in nodes:
            node_path = '{}/{}'.format(zk_path, node)
            data, stat = zk.get(node_path)
            last_modified = dt.fromtimestamp(stat.mtime/1000.0)
            if ((now - last_modified).days > args.age) or (args.inclusive and (now - last_modified).days >= args.age):
                if not args.dry_run:
                    # Kazoo does not support recursive async deletes
                    if stat.children_count == 0:
                        res = zk.delete_async(node_path)
                    else:
                        zk.delete(node_path, recursive=True)
                deleted += 1

        logging.info("Deleted {} nodes".format(deleted))

    zk.stop()
Ejemplo n.º 10
0
class Zookeeper(KeyManager):
    def __init__(self, hosts):
        self._hosts = hosts
        self.zk = KazooClient(hosts=hosts)
        self.zk.start()

    def get(self, key):
        result = self.zk.get(key)[0]
        if result == "":
            result = []
            children = self.zk.get_children(key)
            for i in children:
                result.append({'name': i, 'value': self.zk.get(os.path.join(key, i))[0]})
            return result
        else:
            return self.zk.get(key)[0]

    def set(self, key, data):
        try:
            self.zk.set(key, data.encode('utf-8'))
        except:
            self.zk.create(key, data.encode('utf-8'))

    def mkdir(self, key):
        self.set(key, "")

    def close(self):
        self.zk.stop()
        self.zk.close()

    @property
    def hosts(self):
        return self._hosts
def random_nodes(number_of_nodes_to_return, exclude=None, nodes=None):
    """ Selects a group of nodes from the pool of registered nodes

    Arguments:
        number_of_nodes_to_return: The total number of nodes to be returned
        to the caller of the function
        exclude (optional): A list of nodes that will be excluded from the
        results
        nodes (optional): A list of nodes to process, if None then the
        specified zookeeper will be contacted and the registered brokers used
    :return:
    """
    if not nodes:
        zk = KazooClient(hosts=zookeeper_connection_string, read_only=True)
        zk.start()
        try:
            if zk.exists('/brokers/ids'):
                ids = zk.get_children('/brokers/ids')
        finally:
            zk.stop()
    else:
        ids = nodes

    if exclude:
        ids = [x for x in ids if x not in exclude]
    return random.sample(ids, number_of_nodes_to_return)
Ejemplo n.º 12
0
    def create_from_zookeeper(cls, zkconnect):
        log.info("Connecting to zookeeper {0}".format(zkconnect))
        try:
            zk = KazooClient(zkconnect)
            zk.start()
        except Exception as e:
            raise ZookeeperException("Cannot connect to Zookeeper: {0}".format(e))

        # Get broker list
        cluster = cls()
        add_brokers_from_zk(cluster, zk)

        # Get current partition state
        log.info("Getting partition list from Zookeeper")
        for topic in zk.get_children("/brokers/topics"):
            zdata, zstat = zk.get("/brokers/topics/{0}".format(topic))
            add_topic_with_replicas(cluster, topic, json.loads(zdata))

        if cluster.num_topics() == 0:
            raise ZookeeperException("The cluster specified does not have any topics")

        log.info("Closing connection to zookeeper")
        zk.stop()
        zk.close()

        return cluster
Ejemplo n.º 13
0
def get_children_data(ensemble, namespace, read_only=True):
  hdfs = cluster.get_hdfs()
  if hdfs is None:
    raise PopupException(_('No [hdfs] configured in hue.ini.'))

  if hdfs.security_enabled:
    sasl_server_principal = PRINCIPAL_NAME.get()
  else:
    sasl_server_principal = None

  zk = KazooClient(hosts=ensemble, read_only=read_only, sasl_server_principal=sasl_server_principal)

  zk.start()

  children_data = []

  children = zk.get_children(namespace)

  for node in children:
    data, stat = zk.get("%s/%s" % (namespace, node))
    children_data.append(data)

  zk.stop()

  return children_data
Ejemplo n.º 14
0
def get_scn_from_zookeeper():
	try:
		print("Starting the process")
		zk = KazooClient(hosts='10.0.57.146:2181,10.0.57.145:2181,10.0.77.195:2181')
		print("Connection established")
		zk.start()
		sum_of_scn_num = 0
		if zk.exists("/fk_kafka_cluster1"):
			children_list = zk.get_children("/fk_kafka_cluster1/PROPERTYSTORE/")
			sorted_list = sorted(children_list)
			partion_num_scn_num_dict = {}
			for children in sorted_list:
				recv_data,stat = zk.get("/fk_kafka_cluster1/PROPERTYSTORE/"+str(children))
				data_dict = ast.literal_eval(recv_data)
				partition_num =  data_dict["id"]
				fields = ast.literal_eval(data_dict['simpleFields']['c'])
				scn_num = fields["windowScn"]
				sum_of_scn_num += scn_num
				partion_num_scn_num_dict[partition_num] = scn_num
			print "Data fetching from Zookeeper complete"
			avg_scn_num = (sum_of_scn_num/len(children_list))
			sorted_dict = sorted(partion_num_scn_num_dict.items(),key=operator.itemgetter(1))
			return avg_scn_num,sorted_dict 
		else:
			print("Node does not exist")
	except Exception as e:
		print("Exception occured!",str(e))
Ejemplo n.º 15
0
class ZookeeperSession(object):

    def __init__(self, locations, name_prefix, root_prefix='/frontera'):
        self._zk = KazooClient(hosts=locations)
        self._zk.add_listener(self.zookeeper_listener)
        self._zk.start()
        self.root_prefix = root_prefix
        self.znode_path = self._zk.create("%s/%s" % (self.root_prefix, name_prefix),
                                          ephemeral=True,
                                          sequence=True,
                                          makepath=True)

    def zookeeper_listener(self, state):
        if state == KazooState.LOST:
            # Register somewhere that the session was lost
            pass
        elif state == KazooState.SUSPENDED:
            # Handle being disconnected from Zookeeper
            pass
        else:
            # Handle being connected/reconnected to Zookeeper
            pass

    def set(self, value):
        self._zk.set(self.znode_path, value)

    def get_workers(self, prefix='', exclude_prefix=''):
        for znode_name in self._zk.get_children(self.root_prefix):
            if prefix and not znode_name.startswith(prefix):
                continue
            if exclude_prefix and znode_name.startswith(exclude_prefix):
                continue
            location, _ = self._zk.get(self.root_prefix+"/"+znode_name)
            yield location
Ejemplo n.º 16
0
class GetInfo:
    def __init__(self):
        self.all_info = {}
        #self.SERVER_IP_AND_PORT = "127.0.0.1:2181"
        self.SERVER_IP_AND_PORT = "172.18.229.251:2181"
        self.zk = None
    
    def start_zk(self):
        self.zk = KazooClient(hosts=self.SERVER_IP_AND_PORT)
        self.zk.start();
    
    def getInfo(self):
        children = self.zk.get_children("/monitorData")
        node_nums = len(children)
        for i in range(node_nums):
            data, stat = self.zk.get("/monitorData/" + str(children[i]))
            #data2, stat2 = self.zk.get("/monitorDataJustOneTime/" + str(children[i]))
            #print json.loads(data2.decode("utf-8"))
            #print data2
            #data3, stat3 = self.zk.get("/monitorDataProcessInfo/" + str(children[i]))
            #print data3
            #print str(children[i])
            #print json.loads(data.decode("utf-8"))
            self.all_info[children[i]] = json.loads(data.decode("utf-8"))
            for key in self.all_info[children[i]].keys():
                print key
                print self.all_info[children[i]][key]
            #print self.all_info
        return self.all_info
Ejemplo n.º 17
0
class ZooWrap():
    def __init__(self):
        self.zk = KazooClient(hosts="%s,%s" % (
            str(cc.conf['zookeeper']['host']),
            str(cc.conf['zookeeper']['port'])))
        self.zk.start()
        self.root = cc.conf['zookeeper']['rootpath']
        self.log = logging.getLogger('L.ZOOKEEPER')
        self.zk.ensure_path('/%s/sleeping' % (self.root))
        self.whoami = cc.conf['whoami']

    def get_sleeping(self):
        return self.zk.get_children('/%s/sleeping' % (self.root))

    def sleep(self):
        try:
            self.zk.create('/%s/sleeping/%s' % (self.root, self.whoami))
            self.log.info('Sleeping correctly')
        except NodeExistsError:
            self.log.error('Node already sleeping... seems weird')

    def wake(self):
        try:
            self.zk.delete('/%s/sleeping/%s' % (self.root, self.whoami))
        except NoNodeError:
            self.log.error('Node was not sleeping... seems weird')
Ejemplo n.º 18
0
def get_legacy_uids_from_zk(zk: KazooClient) -> List:
    """
    Loads users from legacy datastore

    https://github.com/dcos/dcos-oauth/blob/ac186bf48f21166c3bb935fdc1922bbace75b6a4/dcos-oauth/users.go
    """
    return zk.get_children(ZK_USERS_PATH)
Ejemplo n.º 19
0
Archivo: util.py Proyecto: ederst/mesos
def zookeeper_resolve_leader(addresses, path):
    """
    Resolve the leader using a znode path. ZooKeeper imposes a total
    order on the elements of the queue, guaranteeing that the
    oldest element of the queue is the first one. We can
    thus return the first address we get from ZooKeeper.
    """
    hosts = ",".join(addresses)

    try:
        zk = KazooClient(hosts=hosts)
        zk.start()
    except Exception as exception:
        raise CLIException("Unable to initialize Zookeeper Client: {error}"
                           .format(error=exception))

    try:
        children = zk.get_children(path)
    except Exception as exception:
        raise CLIException("Unable to get children of {zk_path}: {error}"
                           .format(zk_path=path, error=exception))

    masters = sorted(
        # 'json.info' is the prefix for master nodes.
        child for child in children if child.startswith("json.info")
    )

    address = ""
    for master in masters:
        try:
            node_path = "{path}/{node}".format(path=path, node=master)
            json_data, _ = zk.get(node_path)
        except Exception as exception:
            raise CLIException("Unable to get the value of '{node}': {error}"
                               .format(node=node_path, error=exception))

        try:
            data = json.loads(json_data)
        except Exception as exception:
            raise CLIException("Could not load JSON from '{data}': {error}"
                               .format(data=data, error=str(exception)))

        if ("address" in data and "ip" in data["address"] and
                "port" in data["address"]):
            address = "{ip}:{port}".format(ip=data["address"]["ip"],
                                           port=data["address"]["port"])
            break

    try:
        zk.stop()
    except Exception as exception:
        raise CLIException("Unable to stop Zookeeper Client: {error}"
                           .format(error=exception))

    if not address:
        raise CLIException("Unable to resolve the leading"
                           " master using ZooKeeper")
    return address
Ejemplo n.º 20
0
class DMSInventoryManager(object):
    def __init__(self):
        config = ServiceContext().getConfigService()
        self.zk_address = config.get("Inventory","zk_address")
        self.root_path = config.get("Inventory","zk_root_path")
        self.zk_client = KazooClient(hosts=self.zk_address)

    def start(self):
        self.zk_client.start()

    def stop(self):
        self.zk_client.stop()
        self.zk_client.close()

    def getservice(self,accountId):
        """
        :param accountId:
        :return:
        """
        services = []
        service_path = os.path.join(self.root_path,accountId,"services")
        children = self.zk_client.get_children(service_path)
        for child in children:
            services.append(child)
        return services

    def getinstancebyservice(self,accountId,service):
        parent_path = os.path.join(self.root_path,accountId,"services",service,"instances")
        nodes = []
        children = self.zk_client.get_children(parent_path)
        for child in children:
            nodepath = os.path.join(parent_path,child)
            print nodepath
            data,stats = self.zk_client.get(nodepath)
            map = json.loads(data)
            ret = {}
            ret["vmType"] = service
            ret["accountId"] = accountId
            ret["stackId"] = map.get("id","")
            ret["vmManagementIP"] = map.get("manageip",None)
            ret["vmPublicIP"] = map.get("publicip",None)
            ret["vmServiceIP"] = map.get("serviceip",None)
            ret["eventName"] = "CREATE_VM"
            nodes.append(ret)
        return nodes
Ejemplo n.º 21
0
class zk_util():
    
    def __init__(self, host_file):
         
        host_list = []
         
        z = open(host_file)
         
        for line in z.readlines():
            host = line.replace('\r\n','')
            host_list.append(host)
         
        host_string = ','.join(host_list)
 
        self.client = KazooClient(hosts=host_string)
        self.client.start()
        
    def get_compute_node(self):
        node = self.get_nodes('compute_nodes')
        
        return node
    
    def get_redis_endpoint(self):
        
        node = self.get_nodes('redis_endpoints')
        
        return node
    
    def get_redis_primary(self):
        
        node = self.client.get_children('/%s' % 'redis_endpoints')[0]
        
        return 'cloudmatrixredis-001.c5szdq.0001.use1.cache.amazonaws.com'
    
    def get_nodes(self, node_type):
        
        nodes = self.client.get_children('/%s' % node_type)
        
        node = random.choice(nodes)
        
        return node
        
        
        
        
Ejemplo n.º 22
0
def query_servers_from_zk(zkaddrs, cluster, namespace):
    conn = KazooClient(hosts=zkaddrs, read_only=True)
    conn.start()

    path = "/slacker/cluster/%s/namespaces/%s" % (cluster, namespace)
    children = conn.get_children(path)
    children = filter(lambda x: not x.startswith("_"), children)
    conn.stop()
    return children
Ejemplo n.º 23
0
    def _get_metadata(self, topics=None):
        """Get fresh cluster metadata from a broker."""
        # Works either on existing brokers or seed_hosts list
        brokers = random.shuffle([b for b in self.brokers.values() if b.connected])
        if brokers:
            for broker in brokers:
                response = broker.request_metadata(topics)
                if response is not None:
                    return response
        else:  # try seed hosts
            metadata = None
            broker_connects = [
                [broker_str.split(":")[0], broker_str.split(":")[1].split("/")[0]]
                for broker_str in self._seed_hosts.split(',')]
            metadata = self._request_metadata(broker_connects, topics)
            if metadata is not None:
                return metadata

            # try treating seed_hosts as a zookeeper host list
            zookeeper = KazooClient(self._seed_hosts,
                                    timeout=self._socket_timeout_ms / 1000)
            try:
                # This math is necessary due to a nested timeout in KazooClient.
                # KazooClient will attempt to retry its connections only until the
                # start() timeout is reached. Each of those retries will timeout as
                # indicated by the KazooClient kwarg. We do a number of timeouts of
                # self._socket_timeout_ms equal to the number of hosts. This provides
                # the same retrying behavior that pykafka uses above when treating this
                # host string as a list of kafka brokers.
                zookeeper.start(
                    timeout=(len(broker_connects) * self._socket_timeout_ms) / 1000)
            except Exception as e:
                log.error('Unable to connect to ZooKeeper instance %s', self._seed_hosts)
                log.exception(e)
            else:
                try:
                    # get a list of connect strings from zookeeper
                    brokers_path = "/brokers/ids/"
                    broker_ids = zookeeper.get_children(brokers_path)
                    broker_connects = []
                    for broker_id in broker_ids:
                        broker_json, _ = zookeeper.get("{}{}".format(brokers_path, broker_id))
                        broker_info = json.loads(broker_json.decode("utf-8"))
                        broker_connects.append((broker_info['host'], broker_info['port']))
                    zookeeper.stop()
                except Exception as e:
                    log.error('Unable to fetch broker info from ZooKeeper')
                    log.exception(e)

                metadata = self._request_metadata(broker_connects, topics)
                if metadata is not None:
                    self._zookeeper_connect = self._seed_hosts
                    return metadata

        # Couldn't connect anywhere. Raise an error.
        raise RuntimeError(
            'Unable to connect to a broker to fetch metadata. See logs.')
Ejemplo n.º 24
0
def _get_topology():
	""" Method to get the entire Kafka clusters (defined in hue.ini) topology """
	topology = CLUSTERS.get()
	clusters = []
	error = 0
	error_brokers = 0
	error_consumer_groups = 0

	for c in topology:
		cluster = get_cluster_or_404(c)
		try:
			zk = KazooClient(hosts=CLUSTERS[c].ZK_HOST_PORTS.get())
			zk.start()
			brokers, error_brokers = _get_brokers(zk,cluster['id'])
			consumer_groups, error_consumer_groups = _get_consumer_groups(zk,cluster['id'])
			consumer_groups_status = {} 

			for consumer_group in consumer_groups:
				# 0 = offline, (not 0) =  online
				consumers_path = CLUSTERS[c].CONSUMERS_PATH.get() + "/" + consumer_group + "/ids"
				try:
					consumers = zk.get_children(consumers_path)
				except NoNodeError:
					consumer_groups_status[consumer_group]=0
				else:
					consumer_groups_status[consumer_group]=len(consumers)
			
			c = {'cluster':cluster,
				'brokers':brokers,
				'consumer_groups':consumer_groups,
				'consumer_groups_status':consumer_groups_status,
				'error_brokers':error_brokers,
				'error_consumer_groups':error_consumer_groups,
				'error':0}
			
			zk.stop()

		except NoNodeError:
			c = {'cluster':cluster,
				'brokers':[],
				'consumer_groups':[],
				'consumer_groups_status':[],
				'error_brokers':error_brokers,
				'error_consumer_groups':error_consumer_groups,
				'error':2}
		except:
			c = {'cluster':cluster,
				'brokers':[],
				'consumer_groups':[],
				'consumer_groups_status':[],
				'error_brokers':error_brokers,
				'error_consumer_groups':error_consumer_groups,
				'error':1}

		clusters.append(c)		
	return clusters
Ejemplo n.º 25
0
 def __init__(self):
     self.url = Conf().getSolrUrl()
     self.collection = Conf().getSolrCollection()
     zk = KazooClient(hosts=self.url, read_only=True)
     zk.start()
     self.urls = []
     for node in zk.get_children("/live_nodes"):
         self.urls.append(node.replace('_solr', ''))
     zk.stop()
     self.conn = SolrConnection(self.urls, version="5.4.0", webappdir='solr')
Ejemplo n.º 26
0
def do_zookeeper_read(addr, path):
	print(path)
	zk = KazooClient(addr)
	zk.start()
	data, stat = zk.get(path)
	print('node info:', data)
	print('node stat:', stat)

	children = zk.get_children(path)
	print('node children:', children)
Ejemplo n.º 27
0
def test_zk_conn():
    project = utils.get_test_project()

    zk_ip = project.cluster.head.ip
    zk = KazooClient(hosts='%s:2181' % zk_ip)
    zk.start()
    assert zk

    children = zk.get_children('/')
    assert 'zookeeper' in children
Ejemplo n.º 28
0
class CockRoach(object):
    def __init__(self, zkHost, stale_max_days=30, assume_yes=False, preview=False):
        self.ConsumerGroups = []
        self.zk_client = KazooClient(hosts=zkHost)
        self.zk_client.start()
        self.stale_max_days = stale_max_days
        self.assume_yes = assume_yes
        self.preview = preview
        if self.zk_client.exists("/consumers"):
            for cg_name in self.zk_client.get_children("/consumers"):
                self.ConsumerGroups.append(ConsumerGroup(cg_name, \
                                                         self.zk_client))

    def get_stale_cgroups(self, display):
        """
           get_stale_cgroups returns ConsumerGroups
           that were not used for stale_max_days
        """
        ret = []
        for consumergroup in self.ConsumerGroups:
            delta = datetime.now() - consumergroup.last_seen().mtime
            if delta.days > self.stale_max_days:
                if display:
                    print "Stale: %s" % (consumergroup)
                ret.append(consumergroup)
        return ret

    def delete_stale_cgroups(self):
        """ Delete consumer groups that are considered stale"""
        stale_cgroups = self.get_stale_cgroups(display=False)
        for stale_cg in stale_cgroups:
            print stale_cg
            if self.assume_yes is False:
                confirm = raw_input("Delete?")
            else:
                confirm = "Y"

            if confirm == "Y":
                self.delete_cgroup(stale_cg)

    def delete_cgroup(self, consumergroup):
        """Deletes a consumer Group"""
        print "Deleting %s" % (consumergroup.gid)
        if self.preview is False:
          self.zk_client.delete("/consumers/%s" % (consumergroup.gid), version=-1, recursive=True)
          print "executed"
        else:
            print "pass"


    def __str__(self):
        ret = ""
        for consumer in self.ConsumerGroups:
            ret += "%s" % (consumer)
        return ret
Ejemplo n.º 29
0
 def __init__(self, path='/'):
     self.path = path
     zk_client =  KazooClient(hosts=ZOOKEEPER_SERVERS, read_only=True, timeout=TIMEOUT)
     try:
         zk_client.start()
         self.data, stat = zk_client.get(path)
         self.stat = _convert_stat(stat)
         self.children = zk_client.get_children(path) or []
         self.acls = _convert_acls(zk_client.get_acls(path)[0])
     finally:
         zk_client.stop()
Ejemplo n.º 30
0
def get_devices_list():
	pi_list = []
	zk = KazooClient(hosts=ZK_URI)
	zk.start()
	subs = zk.get_children(PI_PATH)
	for sub in subs:
		data = zk.get(JOIN(PI_PATH, sub))
		if data is not None and len(data) > 0:
		    pi_list.append(json.loads(data[0]))
	zk.stop()
	return pi_list
Ejemplo n.º 31
0
#!/usr/bin/python
import logging
from time import sleep
from kazoo.client import KazooClient,ChildrenWatch

# print log to console
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)

zk = KazooClient('127.0.0.1:2181')
zk.start()

def children_callback(children):
    print '****' , children

children = zk.get_children('/zookeeper', children_callback)

zk.create('/zookeeper/goodboy_2')
#zk.delete('/zookeeper/goodboy')

while True: 
    #children = zk.get_children('/zookeeper', children_callback)
    sleep(1)
Ejemplo n.º 32
0
def get_query_server_config(name='beeswax', server=None, cluster=None):
  LOG.debug("Query cluster %s: %s" % (name, cluster))

  cluster_config = get_cluster_config(cluster)
  if name == "llap":
    activeEndpoint = cache.get('llap')
    if activeEndpoint is None:
      if HIVE_DISCOVERY_LLAP.get():
        LOG.debug("Checking zookeeper for Hive Server Interactive endpoint")
        zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
        zk.start()
        if HIVE_DISCOVERY_LLAP_HA.get():
          znode = "{0}/instances".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
          LOG.debug("Setting up LLAP with the following node {0}".format(znode))
          if zk.exists(znode):
            hiveservers = zk.get_children(znode)
            for server in hiveservers:
              llap_servers= json.loads(zk.get("{0}/{1}".format(znode, server))[0])["internal"][0]
              if llap_servers["api"] == "activeEndpoint":
                cache.set("llap", json.dumps({"host": llap_servers["addresses"][0]["host"], "port": llap_servers["addresses"][0]["port"]}), CACHE_TIMEOUT.get())
          else:
            LOG.error("LLAP Endpoint not found, reverting to HiveServer2")
            cache.set("llap", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}), CACHE_TIMEOUT.get())
        else:
          znode = "{0}".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
          LOG.debug("Setting up LLAP with the following node {0}".format(znode))
          if zk.exists(znode):
            hiveservers = zk.get_children(znode)
            for server in hiveservers:
              cache.set("llap", json.dumps({"host": server.split(';')[0].split('=')[1].split(":")[0], "port": server.split(';')[0].split('=')[1].split(":")[1]}))
        zk.stop()
      else:
        LOG.debug("Zookeeper Discovery not enabled, reverting to config values")
        cache.set("llap", json.dumps({"host": LLAP_SERVER_HOST.get(), "port": LLAP_SERVER_PORT.get()}), CACHE_TIMEOUT.get())
    activeEndpoint = json.loads(cache.get("llap"))
  elif name != 'hms' and name != 'impala':
    activeEndpoint = cache.get("hiveserver2")
    if activeEndpoint is None:
      if HIVE_DISCOVERY_HS2.get():
        zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
        zk.start()
        znode = HIVE_DISCOVERY_HIVESERVER2_ZNODE.get()
        LOG.info("Setting up Hive with the following node {0}".format(znode))
        if zk.exists(znode):
          hiveservers = zk.get_children(znode)
          server_to_use = 0 # if CONF.HIVE_SPREAD.get() randint(0, len(hiveservers)-1) else 0
          cache.set("hiveserver2", json.dumps({"host": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[0], "port": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[1]}))
        else:
          cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}))
        zk.stop()
      else:
        cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": hive_site.hiveserver2_thrift_http_port()}))
    activeEndpoint = json.loads(cache.get("hiveserver2"))

  if name == 'impala':
    from impala.dbms import get_query_server_config as impala_query_server_config
    query_server = impala_query_server_config(cluster_config=cluster_config)
  elif name == 'hms':
    kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
    query_server = {
        'server_name': 'hms',
        'server_host': HIVE_METASTORE_HOST.get() if not cluster_config else cluster_config.get('server_host'),
        'server_port': HIVE_METASTORE_PORT.get(),
        'principal': kerberos_principal,
        'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
        'auth_username': AUTH_USERNAME.get(),
        'auth_password': AUTH_PASSWORD.get()
    }
  else:
    kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
    query_server = {
        'server_name': 'beeswax',
        'server_host': activeEndpoint["host"],
        'server_port': LLAP_SERVER_PORT.get() if name == 'llap' else HIVE_SERVER_PORT.get(),
        'principal': kerberos_principal,
        'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
            'protocol': 'https' if hiveserver2_use_ssl() else 'http',
            'host': activeEndpoint["host"],
            'port': activeEndpoint["port"],
            'end_point': hive_site.hiveserver2_thrift_http_path()
          },
        'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
        'auth_username': AUTH_USERNAME.get(),
        'auth_password': AUTH_PASSWORD.get()
      }
  if name == 'sparksql': # Spark SQL is almost the same as Hive
    from spark.conf import SQL_SERVER_HOST as SPARK_SERVER_HOST, SQL_SERVER_PORT as SPARK_SERVER_PORT

    query_server.update({
        'server_name': 'sparksql',
        'server_host': SPARK_SERVER_HOST.get(),
        'server_port': SPARK_SERVER_PORT.get()
    })

  debug_query_server = query_server.copy()
  debug_query_server['auth_password_used'] = bool(debug_query_server.pop('auth_password'))
  LOG.debug("Query Server: %s" % debug_query_server)

  return query_server
Ejemplo n.º 33
0
class KafkaConnection():
    _topics_path = 'brokers/topics'
    _brokers_path = 'brokers/ids'
    _topics_config_path = 'config/topics'

    def __init__(self,
                 hosts=['localhost:2181'],
                 zk_path=None,
                 load_state=False,
                 kazoo_client=None):
        """
        @param hosts         : list, zk host: port
        @param zk_path       : str, root of where kafka info stored
        @param load_state    : bool, by default avoid loading all state data on initializing due to all the ZK calls required
        @param kazoo_client  : KazooClient, use as KazooClient instead of creating new connection object
        """

        if kazoo_client:
            self._client = kazoo_client
            if zk_path:
                self._client.chroot = zk_path
        else:
            self._client = KazooClient(hosts=','.join(hosts))
            self._client.chroot = zk_path
            self._hosts = hosts

        self._topics = {}
        self._brokers = {}
        self._last_state_update = None

        self._client.start()

        # Load up metadata
        self._load_topics()
        self._load_partitions()
        self._load_brokers()

        if load_state:
            self.update()

    def __getitem__(self, topic):
        return self._topics[topic]

    def __repr__(self):
        return '<{0} topics={1}, zk_hosts={2}>'.format(
            self.__class__.__name__, str(self._topics.keys()), self._hosts)

    @property
    def topics(self):
        return self._topics

    @property
    def brokers(self):
        return self._brokers

    @property
    def connected(self):
        return self._client.connected

    def _load_brokers(self):
        brokers = self._client.get_children(self._brokers_path)

        for broker in brokers:
            broker_data = json.loads(
                self._client.get(os.path.join(self._brokers_path, broker))[0])
            broker_obj = KafkaBroker(broker, broker_data['host'],
                                     broker_data['port'],
                                     broker_data['jmx_port'],
                                     long(broker_data['timestamp']))
            self._brokers[int(broker)] = broker_obj

    def _load_topics(self):
        topics = self._client.get_children(self._topics_path)

        for topic in topics:
            top_obj = KafkaTopic(topic, client=self._client)
            self._topics[topic] = top_obj

    def _load_partitions(self):
        for topic in self._topics:
            logger.debug('Loading partition data for {0}'.format(topic))
            replicas = json.loads(
                self._client.get(os.path.join(self._topics_path,
                                              topic))[0])['partitions']
            replicas = dict((int(k), v) for k, v in replicas.items())

            for partition, val in replicas.iteritems():
                part_obj = KafkaPartition(partition,
                                          topic,
                                          replicas=val,
                                          client=self._client)
                self._topics[topic].add_partition(part_obj)

    def disconnect(self):
        self._client.stop()

    def update(self, topic=None):
        """
        Update partitions with latest metadata
        """
        if topic:
            self._topics[topic].update()
        else:
            self._load_brokers()
            self._load_topics()
            self._load_partitions()

            for t in self._topics.itervalues():
                t.update()

        self._last_state_update = datetime.now()
Ejemplo n.º 34
0
class ZooKeeper(AbstractDCS):

    def __init__(self, config):
        super(ZooKeeper, self).__init__(config)

        hosts = config.get('hosts', [])
        if isinstance(hosts, list):
            hosts = ','.join(hosts)

        self._client = KazooClient(hosts, handler=PatroniSequentialThreadingHandler(config['retry_timeout']),
                                   timeout=config['ttl'], connection_retry=KazooRetry(max_delay=1, max_tries=-1,
                                   sleep_func=time.sleep), command_retry=KazooRetry(deadline=config['retry_timeout'],
                                   max_delay=1, max_tries=-1, sleep_func=time.sleep))
        self._client.add_listener(self.session_listener)

        self._fetch_cluster = True
        self._fetch_optime = True

        self._orig_kazoo_connect = self._client._connection._connect
        self._client._connection._connect = self._kazoo_connect

        self._client.start()

    def _kazoo_connect(self, *args):
        """Kazoo is using Ping's to determine health of connection to zookeeper. If there is no
        response on Ping after Ping interval (1/2 from read_timeout) it will consider current
        connection dead and try to connect to another node. Without this "magic" it was taking
        up to 2/3 from session timeout (ttl) to figure out that connection was dead and we had
        only small time for reconnect and retry.

        This method is needed to return different value of read_timeout, which is not calculated
        from negotiated session timeout but from value of `loop_wait`. And it is 2 sec smaller
        than loop_wait, because we can spend up to 2 seconds when calling `touch_member()` and
        `write_leader_optime()` methods, which also may hang..."""

        ret = self._orig_kazoo_connect(*args)
        return max(self.loop_wait - 2, 2)*1000, ret[1]

    def session_listener(self, state):
        if state in [KazooState.SUSPENDED, KazooState.LOST]:
            self.cluster_watcher(None)

    def optime_watcher(self, event):
        self._fetch_optime = True
        self.event.set()

    def cluster_watcher(self, event):
        self._fetch_cluster = True
        self.optime_watcher(event)

    def reload_config(self, config):
        self.set_retry_timeout(config['retry_timeout'])

        loop_wait = config['loop_wait']

        loop_wait_changed = self._loop_wait != loop_wait
        self._loop_wait = loop_wait
        self._client.handler.set_connect_timeout(loop_wait)

        # We need to reestablish connection to zookeeper if we want to change
        # read_timeout (and Ping interval respectively), because read_timeout
        # is calculated in `_kazoo_connect` method. If we are changing ttl at
        # the same time, set_ttl method will reestablish connection and return
        # `!True`, otherwise we will close existing connection and let kazoo
        # open the new one.
        if not self.set_ttl(int(config['ttl'] * 1000)) and loop_wait_changed:
            self._client._connection._socket.close()

    def set_ttl(self, ttl):
        """It is not possible to change ttl (session_timeout) in zookeeper without
        destroying old session and creating the new one. This method returns `!True`
        if session_timeout has been changed (`restart()` has been called)."""
        if self._client._session_timeout != ttl:
            self._client._session_timeout = ttl
            self._client.restart()
            return True

    @property
    def ttl(self):
        return self._client._session_timeout

    def set_retry_timeout(self, retry_timeout):
        retry = self._client.retry if isinstance(self._client.retry, KazooRetry) else self._client._retry
        retry.deadline = retry_timeout

    def get_node(self, key, watch=None):
        try:
            ret = self._client.get(key, watch)
            return (ret[0].decode('utf-8'), ret[1])
        except NoNodeError:
            return None

    def get_leader_optime(self, leader):
        watch = self.optime_watcher if not leader or leader.name != self._name else None
        optime = self.get_node(self.leader_optime_path, watch)
        self._fetch_optime = False
        return optime and int(optime[0]) or 0

    @staticmethod
    def member(name, value, znode):
        return Member.from_node(znode.version, name, znode.ephemeralOwner, value)

    def get_children(self, key, watch=None):
        try:
            return self._client.get_children(key, watch)
        except NoNodeError:
            return []

    def load_members(self, sync_standby):
        members = []
        for member in self.get_children(self.members_path, self.cluster_watcher):
            watch = member == sync_standby and self.cluster_watcher or None
            data = self.get_node(self.members_path + member, watch)
            if data is not None:
                members.append(self.member(member, *data))
        return members

    def _inner_load_cluster(self):
        self._fetch_cluster = False
        self.event.clear()
        nodes = set(self.get_children(self.client_path(''), self.cluster_watcher))
        if not nodes:
            self._fetch_cluster = True

        # get initialize flag
        initialize = (self.get_node(self.initialize_path) or [None])[0] if self._INITIALIZE in nodes else None

        # get global dynamic configuration
        config = self.get_node(self.config_path, watch=self.cluster_watcher) if self._CONFIG in nodes else None
        config = config and ClusterConfig.from_node(config[1].version, config[0], config[1].mzxid)

        # get timeline history
        history = self.get_node(self.history_path, watch=self.cluster_watcher) if self._HISTORY in nodes else None
        history = history and TimelineHistory.from_node(history[1].mzxid, history[0])

        # get synchronization state
        sync = self.get_node(self.sync_path, watch=self.cluster_watcher) if self._SYNC in nodes else None
        sync = SyncState.from_node(sync and sync[1].version, sync and sync[0])

        # get list of members
        sync_standby = sync.leader == self._name and sync.sync_standby or None
        members = self.load_members(sync_standby) if self._MEMBERS[:-1] in nodes else []

        # get leader
        leader = self.get_node(self.leader_path) if self._LEADER in nodes else None
        if leader:
            client_id = self._client.client_id
            if not self._ctl and leader[0] == self._name and client_id is not None \
                    and client_id[0] != leader[1].ephemeralOwner:
                logger.info('I am leader but not owner of the session. Removing leader node')
                self._client.delete(self.leader_path)
                leader = None

            if leader:
                member = Member(-1, leader[0], None, {})
                member = ([m for m in members if m.name == leader[0]] or [member])[0]
                leader = Leader(leader[1].version, leader[1].ephemeralOwner, member)
                self._fetch_cluster = member.index == -1

        # get last leader operation
        last_leader_operation = self._OPTIME in nodes and self.get_leader_optime(leader)

        # failover key
        failover = self.get_node(self.failover_path, watch=self.cluster_watcher) if self._FAILOVER in nodes else None
        failover = failover and Failover.from_node(failover[1].version, failover[0])

        return Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history)

    def _load_cluster(self):
        cluster = self.cluster
        if self._fetch_cluster or cluster is None:
            try:
                cluster = self._client.retry(self._inner_load_cluster)
            except Exception:
                logger.exception('get_cluster')
                self.cluster_watcher(None)
                raise ZooKeeperError('ZooKeeper in not responding properly')
        # Optime ZNode was updated or doesn't exist and we are not leader
        elif (self._fetch_optime and not self._fetch_cluster or not cluster.last_leader_operation) and\
                not (cluster.leader and cluster.leader.name == self._name):
            try:
                optime = self.get_leader_optime(cluster.leader)
                cluster = Cluster(cluster.initialize, cluster.config, cluster.leader, optime,
                                  cluster.members, cluster.failover, cluster.sync, cluster.history)
            except Exception:
                pass
        return cluster

    def _bypass_caches(self):
        self._fetch_cluster = True

    def _create(self, path, value, retry=False, ephemeral=False):
        try:
            if retry:
                self._client.retry(self._client.create, path, value, makepath=True, ephemeral=ephemeral)
            else:
                self._client.create_async(path, value, makepath=True, ephemeral=ephemeral).get(timeout=1)
            return True
        except Exception:
            logger.exception('Failed to create %s', path)
        return False

    def attempt_to_acquire_leader(self, permanent=False):
        ret = self._create(self.leader_path, self._name.encode('utf-8'), retry=True, ephemeral=not permanent)
        if not ret:
            logger.info('Could not take out TTL lock')
        return ret

    def _set_or_create(self, key, value, index=None, retry=False, do_not_create_empty=False):
        value = value.encode('utf-8')
        try:
            if retry:
                self._client.retry(self._client.set, key, value, version=index or -1)
            else:
                self._client.set_async(key, value, version=index or -1).get(timeout=1)
            return True
        except NoNodeError:
            if do_not_create_empty and not value:
                return True
            elif index is None:
                return self._create(key, value, retry)
            else:
                return False
        except Exception:
            logger.exception('Failed to update %s', key)
        return False

    def set_failover_value(self, value, index=None):
        return self._set_or_create(self.failover_path, value, index)

    def set_config_value(self, value, index=None):
        return self._set_or_create(self.config_path, value, index, retry=True)

    def initialize(self, create_new=True, sysid=""):
        sysid = sysid.encode('utf-8')
        return self._create(self.initialize_path, sysid, retry=True) if create_new \
            else self._client.retry(self._client.set, self.initialize_path, sysid)

    def touch_member(self, data, permanent=False):
        cluster = self.cluster
        member = cluster and cluster.get_member(self._name, fallback_to_leader=False)
        encoded_data = json.dumps(data, separators=(',', ':')).encode('utf-8')
        if member and (self._client.client_id is not None and member.session != self._client.client_id[0] or
                       not (deep_compare(member.data.get('tags', {}), data.get('tags', {})) and
                            member.data.get('version') == data.get('version') and
                            member.data.get('checkpoint_after_promote') == data.get('checkpoint_after_promote'))):
            try:
                self._client.delete_async(self.member_path).get(timeout=1)
            except NoNodeError:
                pass
            except Exception:
                return False
            member = None

        if member:
            if deep_compare(data, member.data):
                return True
        else:
            try:
                self._client.create_async(self.member_path, encoded_data, makepath=True,
                                          ephemeral=not permanent).get(timeout=1)
                return True
            except Exception as e:
                if not isinstance(e, NodeExistsError):
                    logger.exception('touch_member')
                    return False
        try:
            self._client.set_async(self.member_path, encoded_data).get(timeout=1)
            return True
        except Exception:
            logger.exception('touch_member')

        return False

    def take_leader(self):
        return self.attempt_to_acquire_leader()

    def _write_leader_optime(self, last_operation):
        return self._set_or_create(self.leader_optime_path, last_operation)

    def _update_leader(self):
        return True

    def _delete_leader(self):
        self._client.restart()
        return True

    def _cancel_initialization(self):
        node = self.get_node(self.initialize_path)
        if node:
            self._client.delete(self.initialize_path, version=node[1].version)

    def cancel_initialization(self):
        try:
            self._client.retry(self._cancel_initialization)
        except Exception:
            logger.exception("Unable to delete initialize key")

    def delete_cluster(self):
        try:
            return self._client.retry(self._client.delete, self.client_path(''), recursive=True)
        except NoNodeError:
            return True

    def set_history_value(self, value):
        return self._set_or_create(self.history_path, value)

    def set_sync_state_value(self, value, index=None):
        return self._set_or_create(self.sync_path, value, index, retry=True, do_not_create_empty=True)

    def delete_sync_state(self, index=None):
        return self.set_sync_state_value("{}", index)

    def watch(self, leader_index, timeout):
        if super(ZooKeeper, self).watch(leader_index, timeout) and not self._fetch_optime:
            self._fetch_cluster = True
        return self._fetch_cluster