Пример #1
0
import json
import logging
#logger = logging.getLogger(__name__)

#zk = KazooClient(hosts='127.0.0.1:5001')
#zk = KazooClient(hosts='127.0.0.1:5001', read_only=True)
#zk.stop()
#zk.start()

#logging.basicConfig()
from kazoo.client import KazooClient
from kazoo.retry import KazooRetry
_retry = KazooRetry(max_tries=1000, delay=0.5, backoff=2)
zk = KazooClient(hosts="127.0.0.1:2181",
                 logger=logging,
                 read_only=True,
                 timeout=30,
                 connection_retry=_retry)
zk.start()


def my_listener(state):
    if (state == KazooState.LOST):
        print "Register somewhere that the session was lost"
    elif (state == KazooState.SUSPENDED):
        print "Handle being disconnected from Zookeeper"
    else:
        print "Handle being connected/reconnected to Zookeeper"


moduleNames = ['interface']
Пример #2
0
    def spoorer(self):  #连接kafka,获取topics
        try:
            kafka_client = SimpleClient(self.kafka_hosts, timeout=self.timeout)
            # print kafka_client.topics
        except Exception as e:
            print "Error, cannot connect kafka broker."
            sys.exit(1)
        else:
            kafka_topics = kafka_client.topics
        finally:
            kafka_client.close()

        #连接zk,获取当前消费进度current offset
        try:
            zookeeper_client = KazooClient(hosts=self.zookeeper_hosts, read_only=True, timeout=self.timeout)
            zookeeper_client.start()
        except Exception as e:
            print "Error, cannot connect zookeeper server."
            sys.exit(1)

        try:
            groups = map(str,zookeeper_client.get_children(self.zookeeper_url + 'consumers'))
        except NoNodeError as e:
            print "Error, invalid zookeeper url."
            zookeeper_client.stop()
            sys.exit(2)
        else:
            for group in groups:
                print group
                if 'offsets' not in zookeeper_client.get_children(self.zookeeper_url + 'consumers/%s' % group):continue
                topic_path = 'consumers/%s/offsets' % (group)
                print 22
                topics = map(str,zookeeper_client.get_children(self.zookeeper_url + topic_path))
                if len(topics) == 0: continue

                for topic in topics:
                    if topic not in self.white_topic_group.keys():
                        continue
                    elif group not in self.white_topic_group[topic].replace(' ','').split(','):
                        continue
                    partition_path = 'consumers/%s/offsets/%s' % (group,topic)
                    partitions = map(int,zookeeper_client.get_children(self.zookeeper_url + partition_path))
                    for partition in partitions:
                        base_path = 'consumers/%s/%s/%s/%s' % (group, '%s', topic, partition)
                        owner_path, offset_path = base_path % 'owners', base_path % 'offsets'
                        offset = zookeeper_client.get(self.zookeeper_url + offset_path)[0]

                        try:
                            owner = zookeeper_client.get(self.zookeeper_url + owner_path)[0]
                        except NoNodeError as e:
                            owner = 'null'
                        #消费进度放在字典metric中
                        metric = {'datetime':time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), 'topic':topic, 'group':group, 'partition':int(partition), 'logsize':None, 'offset':int(offset), 'lag':None, 'owner':owner}
                        self.result.append(metric)
                        print "ok"
        finally:
            zookeeper_client.stop()
        #获取每个分片的logsize(此处和原文不一样,做了修改)
        try:
            client = SimpleClient(self.kafka_hosts)
        except Exception as e:
            print "Error, cannot connect kafka broker."
            sys.exit(1)
        else:
            for kafka_topic in kafka_topics:
                self.kafka_logsize[kafka_topic] = {}
                partitions = client.topic_partitions[kafka_topic]
                offset_requests = [OffsetRequestPayload(kafka_topic, p, -1, 1) for p in partitions.keys()]
                offsets_responses = client.send_offset_request(offset_requests)
                for r in offsets_responses:
                    self.kafka_logsize[kafka_topic][r.partition] = r.offsets[0]

            #logsize减去current offset等于lag
        f1 = open(self.log_file,'a+')
        f2 = open(self.log_day_file,'a+')
        str1 = "hello"
        print 0
        # print self.result
        for metric in self.result:
            logsize = self.kafka_logsize[metric['topic']][metric['partition']]
            metric['logsize'] = int(logsize)
            metric['lag'] = int(logsize) - int(metric['offset'])
            f1.write(json.dumps(metric,sort_keys=True) + '\n')
            f1.write(str1)
            f1.flush()
            f2.write(json.dumps(metric,sort_keys=True) + '\n')
            f2.flush()
        # finally:
        client.close()
        print 3
        return ''
Пример #3
0
from kazoo.client import KazooClient

zk = KazooClient('zk:2181')
zk.start()
zk.delete('/lymph', recursive=True)
zk.stop()
Пример #4
0
        'bootstrap.servers': BOOTSTRAP,
        'schema.registry.url': SCHEMAREGISTRY
    }
    record_schema = avro.load(AVROLOADPATH)
    producer = AvroProducer(conf, default_value_schema=record_schema)

    try:
        producer.produce(topic=KAFKATOPIC, value=mce)
        producer.poll(0)
        sys.stdout.write('\n%s has been successfully produced!\n' % mce)
    except ValueError as e:
        sys.stdout.write('Message serialization failed %s' % e)
    producer.flush()


zk = KazooClient(ZOOKEEPER)
zk.start()
client = CachedSchemaRegistryClient(SCHEMAREGISTRY)

topics = zk.get_children("/brokers/topics")

for dataset_name in topics:
    if dataset_name.startswith('_'):
        continue
    topic = dataset_name + '-value'
    schema_id, schema, schema_version = client.get_latest_schema(topic)
    if schema_id is None:
        print(f"Skipping topic without schema: {topic}")
        continue

    print(topic)
Пример #5
0
 def get_kazoo_client(self, zoo_instance_name):
     zk = KazooClient(hosts=self.get_instance_ip(zoo_instance_name))
     zk.start()
     return zk
Пример #6
0
import logging

app = flask.Flask(__name__)

app.debug = True
app.logger.setLevel(logging.DEBUG)

host_name = socket.gethostname()

LOCAL_FLAG = False

if LOCAL_FLAG:
    invdata = ["http://*****:*****@app.route('/testaq', methods=['GET'])
Vim�UnDo�X��5��+���ʭ���!Y�i���K���aƓp,!!!!a�#0_�2����))=v=a���12        lines_with_dns.remove5��1�5�_�3����))=v=a���24pH    return re.findall(r"[\w.-]+\.prod\.booking\.com", lines_with_dns[0])5��2�5�_�3L����))=v=a���35q    �35p5��3�3

�35�_�4����))=v=a���35q    if HOSTNAME5��3�3�3	"	�3		"		5�_�4����))=v=a���36q    if HOSTNAME in alt_names5��3	"	�3
"
�3,�4-�455�_�5!����))=v=a���57s        �57r5��5P	�5T�5	[	5�_� 6����))=v=a���57s    return alt_names5��5	[	�5	[	5�_�! &,����))=v=a��%'sM    # The function returns the list of alterative names in puppet certificate5��%,�5�_� !&����36Va�#/�s#!/bin/blue-python3.8,# This script runs as a cron job many times.W# It triggers the puppet SSL certificate regeneration, waits for certificate revocation:# on puppet server side and removes the local certificate.#E# The second script (puppetserver-ensemble.py) runs on puppet serversG# and removes the node certificate from Puppet by request in Zookeeper.	import os
import sys	import re
import socketimport argparseimport logging/from subprocess import check_output, check_call$from kazoo.client import KazooClient'ZK_PATH_BASE = "/puppetserver/ensemble";ZK_PATH_REMOVE_REQUESTS = f"{ZK_PATH_BASE}/remove_requests"OZK_PATH_REMOVE_REQUESTS_PROCESSED = f"{ZK_PATH_BASE}/remove_requests_processed"HOSTNAME = socket.gethostname()>CERT_PATH = f"/etc/puppetlabs/puppet/ssl/certs/{HOSTNAME}.pem"logger = logging.getLogger()def read_bookings_env_var(var):5    with open("/etc/sysconfig/bookings.puppet") as f:        lines = f.readlines()    for line in lines:&        if line.startswith(f"{var}="):(            return line[len(f"{var}="):]    return None0def get_certificate_alt_names(certificate_path):N    # The function returns the list of alternative names in puppet certificatey    # /bin/openssl x509 -in /etc/puppetlabs/puppet/ssl/certs/$(uname -n).pem -text | grep -A 1 'Subject Alternative Name'G    command = f"/bin/openssl x509 -in {certificate_path} -text".split()G    certificate_output = check_output(command, universal_newlines=True)    if not certificate_output:S        logger.info(f"Cannot get the certificate details from {certificate_path}.")        return []X    lines_with_dns = [line for line in certificate_output.split('\n') if 'DNS:' in line]    if not lines_with_dns:        return []M    alt_names = re.findall(r"[\w.-]+\.prod\.booking\.com", lines_with_dns[0])    if HOSTNAME in alt_names:"        alt_names.remove(HOSTNAME)    return alt_namesdef remove_file(file):    if os.path.exists(file):        os.remove(file)-        logging.debug(f"Removed file {file}")def main():Q    parser = argparse.ArgumentParser(description="Regenerate puppet certificate")    parser.add_argument(        "--debug",        action="store_const",        const=True,        default=False,$        help="More verbose logging",    )    args = parser.parse_args()9    level = logging.DEBUG if args.debug else logging.INFO    logging.basicConfig(        level=level,F        format="[{levelname:7s}| {asctime} | {module:10s}] {message}",        style="{",        stream=sys.stdout,    )%    if not os.path.exists(CERT_PATH):L        logging.info(f"Certificate does not exist at {CERT_PATH}. Exiting.")        returnF    zk = KazooClient(hosts=read_bookings_env_var("ZOOKEEPER_CLUSTER"))    zk.start()4    alt_names = get_certificate_alt_names(CERT_PATH)    if alt_names:e        logging.info(f"Certificate {CERT_PATH} has alt names {alt_names}. Restarting nginx service.")$        # Cleanup request processingH        if zk.exists(f"{ZK_PATH_REMOVE_REQUESTS_PROCESSED}/{HOSTNAME}"):H            zk.delete(f"{ZK_PATH_REMOVE_REQUESTS_PROCESSED}/{HOSTNAME}"):        check_call("/bin/systemctl restart nginx".split())        returnD    if zk.exists(f"{ZK_PATH_REMOVE_REQUESTS_PROCESSED}/{HOSTNAME}"):�        logging.info(f"Request to remove certificate is processed: {ZK_PATH_REMOVE_REQUESTS_PROCESSED}/{HOSTNAME} exists. Clean up the local certificate.")&        # cleanup existing certificateG        remove_file(f"/etc/puppetlabs/puppet/ssl/certs/{HOSTNAME}.pem")N        remove_file(f"/etc/puppetlabs/puppet/ssl/private_keys/{HOSTNAME}.pem")M        remove_file(f"/etc/puppetlabs/puppet/ssl/public_keys/{HOSTNAME}.pem")0        logging.info("Restarting puppet daemon");        check_call("/bin/systemctl restart puppet".split())	    else:n        # if the requset to clean certificate on puppet server side is not processed then make cleanup request8        logging.info("Request the certificate cleanup.")?        zk.ensure_path(f"{ZK_PATH_REMOVE_REQUESTS}/{HOSTNAME}")if __name__ == '__main__':
    main()5�5�_�1����))=v=a�
��12p    �13q    if 'DNS:{HOSTNAME}5��1�	�1��1��1��2�5�_�3����))=v=a�
��24r    if f'DNS:{HOSTNAME}5��2�5�_�3����))=v=a�
��24r+    if f'DNS:{HOSTNAME}' in lines_with_dns:5��2
�
�2��2��2��2+��3
�
�3��3��3��3�3�3�3�3�3�35�_�4����42Va�
��35s        lines_with_dns.remove5��35�_�2����22Va�
��155��1�K5�_�/W����22Va�
��.0py    lines_with_dns = [line for line in certificate_output.split('\n') if 'DNS:' in line and 'DNS:{HOSTNAME}' not in line]5��.W!�!5�_�/\����88Va�
��/0p    �01q�/1q        lines_with_dns = [        line2        for line in certificate_output.split("\n");        if "DNS:" in line and f"DNS:{HOSTNAME}" not in line    ]5��/��/��5�_�
0����88Va�
��/1v    lines_with_dns = [5��/�5�_�
/����77Va�
��.05��.:z5�_�
4����/3Va�
��355��3�5�_�/����/3Va�W�.0t    lines_with_alt_dns = [5��.I5�_�/����/3Va�[�.0t    lines_with_alt_names = [5��.M5�_�4����/3Va�a�35t     if not lines_with_alt_names:5��3��3
�
�3�5�_�4����/3Va�c�35t     if not lines_with_alt_names:5��3
�
�3
�
5�_�7A����/3Va�h�68tE    return re.findall(r"[\w.-]+\.prod\.booking\.com", lines_with_[0])5��6AN5�_�7A����/3Va�i�68tN    return re.findall(r"[\w.-]+\.prod\.booking\.com", lines_with_alt_names[0])5��6AN�66
Пример #8
0
                      help='zookeeper node update')
    parser.add_option('',
                      '--copy',
                      dest='copy',
                      default='',
                      help='zookeeper copy addr:port/new_path')
    parser.add_option('',
                      '--read_tree',
                      dest='read_tree',
                      default=False,
                      help='zookeeper node read tree',
                      action='store_true')

    (options, args) = parser.parse_args()

    zk = KazooClient(options.address)
    zk.start()

    if options.read:
        do_zookeeper_read(zk, options.node)
    elif options.create != '':
        do_zookeeper_create(zk, options.node, options.create)
    elif options.delete:
        do_zookeeper_delete(zk, options.node)
    elif options.update != '':
        do_zookeeper_update(zk, options.node, options.update)
    elif options.copy != '':
        dest_addr, dest_path = options.copy.split('/', 1)
        zk_dest = KazooClient(dest_addr)
        zk_dest.start()
        do_zookeeper_copy(zk, options.node, zk_dest, '/' + dest_path)
import logging
import sys
from kazoo.client import KazooClient

logging.basicConfig(level=logging.DEBUG)

if len(sys.argv) < 2:
    print("Not enough input parameters")
    exit(1)

#Create a KazooClient object and establish a connection
zk = KazooClient(hosts=sys.argv[1], read_only=True, logger=logging)
zk.start()

path = "/myApp"

#Store the data
if zk.exists(path):
    zk.delete(path, recursive=True)

zk.ensure_path(path)
zk.create(path + "/broker_addr", b"172.16.2.34")
zk.create(path + "/broker_port", b"5672")
zk.create(path + "/broker_user", b"root")
zk.create(path + "/broker_psw", b"root")
zk.create(path + "/db_addr", b"172.16.2.57")
zk.create(path + "/db_user", b"root")
zk.create(path + "/db_psw", b"newpassword")
zk.create(path + "/db_name", b"mails")

exit(0)
Пример #10
0
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.zk = KazooClient()
        self.zkTimer = QTimer(self)
        self.zkTimer.setInterval(100)
        self.zkTimer.timeout.connect(self.zkTimeout)
        self.zkStartThread = threading.Thread(target=self.zkConnect)
        self.msgBox = QMessageBox(QMessageBox.NoIcon, "Connection",
                                  "Connecting...", QMessageBox.Cancel, self)
        self.treeWidget.itemClicked.connect(self.itemClicked)
        self.treeWidget.itemDoubleClicked.connect(self.itemOpen)
        self.tabWidget.tabCloseRequested.connect(self.closeTab)
        self.actionConnect.triggered.connect(self.msgBox.show)
        self.actionConnect.triggered.connect(self.zkStartThread.start)
        self.actionConnect.triggered.connect(self.zkTimer.start)
        self.actionDisconnect.triggered.connect(self.zkDisconnect)
        self.actionACLVersion.triggered.connect(self.aclVersion)
        self.actionCreated.triggered.connect(self.created)
        self.actionChildrenCount.triggered.connect(self.childrenCount)
        self.actionDataLength.triggered.connect(self.dataLength)
        self.actionLastModified.triggered.connect(self.lastModified)
        self.actionLastModifiedTransactionId.triggered.connect(
            self.lastModifiedTransactionId)
        self.actionOwnerSessionId.triggered.connect(self.ownerSessionId)
        self.actionVersion.triggered.connect(self.version)
        self.actionCreationTransactionId.triggered.connect(
            self.creationTransactionId)
        self.actionChangeServerAddress.triggered.connect(
            self.changeServerAddress)
        self.msgBox.rejected.connect(self.zkTimer.stop)
        self.msgBox.rejected.connect(self.msgBox.hide)
        self.msgBox.rejected.connect(self.zkDisconnect)
        self.mainWriteGui.connect(self.slotMainWriteGui)
        self.log.setCenterOnScroll(True)
        self.dialog = SelectorDialog(self)

        class PlainTextWidgetHandler:
            def __init__(self, logToWriteGui):
                self.logToWriteGui = logToWriteGui

            def write(self, text):
                self.logToWriteGui(text)

            def flush(self):
                pass

        logging.basicConfig(format='%(asctime)s.%(msecs)d: %(message)s',
                            datefmt='%H:%M:%S',
                            level=logging.DEBUG,
                            handlers=[
                                logging.StreamHandler(
                                    PlainTextWidgetHandler(
                                        self.logToWriteGui)),
                                logging.StreamHandler(sys.stderr)
                            ])

        self.treeWidget.setColumnCount(1)
        self.treeWidget.sortByColumn(0, Qt.AscendingOrder)

        l = self.msgBox.layout()
        progress = QProgressBar()
        progress.setMaximum(0)
        progress.setMinimum(0)
        l.addWidget(progress, l.rowCount() - 2, 1, 1, l.columnCount())

        self.actionConnect.setEnabled(False)
        if os.path.exists("config.txt"):
            with open("config.txt", "r") as f:
                prelines = [string.strip() for string in f.readlines()]
                lines = [prelines[0]]
                for i in range(1, len(prelines)):
                    if prelines[i -
                                1] != prelines[i] and prelines[i] not in lines:
                        lines.append(prelines[i])
                lines = list(filter(None, lines))
                self.dialog.comboBox.addItems(lines)
                self.actionConnect.setEnabled(True)
Пример #11
0
 def __init__(self, servers, timeout):
     self.connected = False
     print("Connecting to %s" % (servers))
     self.zk_client = KazooClient(hosts=servers)
     self.zk_client.start(timeout=timeout)
Пример #12
0
from flask import Flask, jsonify, request, redirect, session, flash, render_template, url_for, make_response
import json 
import mysql.connector, random, string, jwt, datetime
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, 
    BadSignature, SignatureExpired)
from functools import wraps 
import dateparser as dp
import sys
import logging
from kazoo.client import KazooClient
     
app = Flask(__name__) # create an app instance
logging.basicConfig(level=logging.DEBUG)   

zk=KazooClient(hosts='172.16.1.11:2181')
zk.start()         

# Ensure a path, create if necessary
zk.ensure_path("/boardgames")

# Create a node with data
zk.create("/boardgames/auth", b"Something3")      
            
app.secret_key = 'thisismysecretdonottouchit'
#app.secret_key = ''.join(random.choice(string.ascii_uppercase + string.digits)
#    for x in range(32)) #a random string of 32 characters 


def connect_to_db():
    #authentication db connection
    mydb = mysql.connector.connect(
Пример #13
0
 def __init__(self, host, root="/"):
     self.zk = KazooClient(hosts='127.0.0.1:2181')
     self.zk.start()
     self.root = root
        sys.exit(1)
else:
    log.error("The JAVA_HOME environment variable must be set")
    sys.exit(1)

# Validate we got a good ZK connect string
zkparts = args.zookeeper.split('/')
if (len(zkparts) != 2):
    log.error(
        'You must specify a full Zookeeper path (i.e. zoo1.example.com:2181/kafka-cluster)'
    )
    sys.exit(1)

log.info("Connecting to zookeeper {0}".format(zkparts[0]))
try:
    zk = KazooClient(zkparts[0])
    zk.start()
except KazooException as e:
    log.error("Cannot connect to Zookeeper: {0}".format(e))
    sys.exit(1)

# Get broker list
cluster = Cluster()
for b in zk.get_children("/{0}/brokers/ids".format(zkparts[1])):
    bdata, bstat = zk.get("/{0}/brokers/ids/{1}".format(zkparts[1], b))
    bj = json.loads(bdata)
    cluster.brokers[int(b)] = Broker(int(b), bj['host'])

# Get current partition state
log.info("Getting partition list from Zookeeper")
for topic in zk.get_children("/{0}/brokers/topics".format(zkparts[1])):
Пример #15
0
def zookeeper_resolve_leader(addresses, path):
    """
    Resolve the leader using a znode path. ZooKeeper imposes a total
    order on the elements of the queue, guaranteeing that the
    oldest element of the queue is the first one. We can
    thus return the first address we get from ZooKeeper.
    """
    hosts = ",".join(addresses)

    try:
        zk = KazooClient(hosts=hosts)
        zk.start()
    except Exception as exception:
        raise CLIException(
            "Unable to initialize Zookeeper Client: {error}".format(
                error=exception))

    try:
        children = zk.get_children(path)
    except Exception as exception:
        raise CLIException(
            "Unable to get children of {zk_path}: {error}".format(
                zk_path=path, error=exception))

    masters = sorted(
        # 'json.info' is the prefix for master nodes.
        child for child in children if child.startswith("json.info"))

    address = ""
    for master in masters:
        try:
            node_path = "{path}/{node}".format(path=path, node=master)
            json_data, _ = zk.get(node_path)
        except Exception as exception:
            raise CLIException(
                "Unable to get the value of '{node}': {error}".format(
                    node=node_path, error=exception))

        try:
            data = json.loads(json_data)
        except Exception as exception:
            raise CLIException(
                "Could not load JSON from '{data}': {error}".format(
                    data=data, error=str(exception)))

        if ("address" in data and "ip" in data["address"]
                and "port" in data["address"]):
            address = "{ip}:{port}".format(ip=data["address"]["ip"],
                                           port=data["address"]["port"])
            break

    try:
        zk.stop()
    except Exception as exception:
        raise CLIException(
            "Unable to stop Zookeeper Client: {error}".format(error=exception))

    if not address:
        raise CLIException("Unable to resolve the leading"
                           " master using ZooKeeper")
    return address
Пример #16
0
 def __init__(self):
     self.zk_client = KazooClient(hosts=Config().getZookeeperAddress())
     self.zk_client.start()
Пример #17
0
            Reason = "Failed to connect to MongoDB"
        elif RedisOK == False:
            logging.debug("Failed to connect to RedisDB")
            Reason = "Failed to connect to RedisDB"
        else:
            logging.debug("Failed to connect to zoo keeper")
            Reason = "Failed to connect to zoo keeper"

        jresp = json.dumps({"status": "fail", "reason": Reason})
        resp = Response(jresp, status=500, mimetype='application/json')
        return resp


if __name__ == '__main__':
    try:
        zk = KazooClient(hosts=config.ZOOKEEPER_HOST, timeout=5, max_retries=3)
        zk.start()
        try:
            if zk.exists("/databases/mongodb"):
                mongodata = zk.get("/databases/mongodb")
                mongodata = json.loads(mongodata[0])
                mongourl = mongodata["endpoints"]["url"]
                mongousername = mongodata["endpoints"]["username"]
                mongopassword = mongodata["endpoints"]["password"]
                logging.debug("Fetched mongodb config from zookeeper")
            else:
                mongourl = config.MONGODB_HOST
                mongousername = config.MONGODB_USERNAME
                mongopassword = config.MONGODB_PWD
        except:
            logging.debug(
Пример #18
0
from kazoo.client import KazooClient
from time import sleep
import json
import threading
import random
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

topK = 2
task_dir = '/task/ctoblog/'
work_co = 0
working_set = set()
hosts_list =  ['123.206.89.123:2181', '123.207.157.135:2181', '118.89.234.46:2181']
zk = KazooClient(hosts = hosts_list)


class SpiderCtoblogSpider(scrapy.Spider):
    name = 'spider_ctoblog'
    allowed_domains = ['51cto.com']
    start_urls = ['http://blog.51cto.com/expert']

    def parse(self, response):

        zk.start()
        zode_path =  zk.create("/pid/ctoblog/node-" , ephemeral = True, sequence = True)
        myid = zode_path[-10 : ]
        mytask_dir = task_dir + "node-" + myid
        try:
            zk.create('/task/ctoblog')
Пример #19
0
def get_fake_zk(nodename, timeout=30.0):
    _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) +
                                    ":9181",
                                    timeout=timeout)
    _fake_zk_instance.start()
    return _fake_zk_instance
Пример #20
0
    def read_config_file(self, config_file=None):
        """
        Read configuration file and initialize object. If config file is None, it will use default value
        :param config_file: path to configuration file
        :return:
        """
        # Stop storage service
        self.stop_storage_service()

        config = configparser.ConfigParser()
        if config_file is not None:
            config.read(os.path.realpath(config_file))

        # Main configuration
        self.__id = config.get("OPV", "id", fallback="ID")
        self.__path = config.get("OPV",
                                 "path",
                                 fallback="directory_manager_storage")
        self.__path = os.path.realpath(os.path.expanduser(self.__path))
        self.__host = config.get("OPV",
                                 "host",
                                 fallback=socket.gethostbyname(
                                     socket.gethostname()))
        uid_generator_type = config.get("OPV", "uid_type",
                                        fallback="basic").upper()

        # FTP configuration
        ftp_host = config.get("FTP", "host", fallback="0.0.0.0")
        ftp_port = config.getint("FTP", "port", fallback=2121)
        ftp_logfile = config.get("FTP",
                                 "logfile",
                                 fallback="opv_directory_manager_ftp.log")

        # HTTP configuration
        http_host = config.get("HTTP", "host", fallback="0.0.0.0")
        http_port = config.getint("HTTP", "port", fallback=5050)
        http_logfile = config.get("HTTP",
                                  "logfile",
                                  fallback="opv_directory_manager_http.log")

        # Id
        if uid_generator_type in ["ZOOKEEPER", "ZK"]:
            zk_hosts = config.get("ZOOKEEPER",
                                  "hosts",
                                  fallback="127.0.0.1:2181")
            zk_path = config.get("ZOOKEEPER",
                                 "path",
                                 fallback="/DirectoryManager/increment")
            print(zk_hosts)
            zk = KazooClient(zk_hosts)
            zk.start()
            self.__uid_generator = ZkIDGenerator(zk,
                                                 path=zk_path,
                                                 prefix=self.__id)
        else:
            self.__uid_generator = BasicIDGenerator(prefix=self.__id)

        # Storage
        self.__storage = LocalStorage(self.__path)

        # FTP
        ftp_storage_service = FTP(self.__path,
                                  host=self.__host,
                                  listen_host=ftp_host,
                                  listen_port=ftp_port,
                                  logfile=ftp_logfile)

        # HTTP
        http_storage_service = HTTP(self.__path,
                                    host=self.__host,
                                    listen_host=http_host,
                                    listen_port=http_port,
                                    logfile=http_logfile)

        # Local
        local_storage_service = LocalStorageService(self.__path)

        # Storage service
        self.__storage_service_manager = StorageServiceManager(
            "ftp", ftp_storage_service)
        self.__storage_service_manager.addURI("file", local_storage_service)
        self.__storage_service_manager.addURI("http", http_storage_service)
Пример #21
0
            values_list.append(values)

        i=0
        print("Records retrieved were:")
        for key in keys:
            print(key, values_list[i])
            i+=1

    elif(command == 'quit' or command == 'exit'):
        exit(0)
        
    else:
        print('ERROR: Command not found.')

#zookeeper intialisation
zk = KazooClient(hosts='192.168.43.228:2181') ##ZK server ip
#zk = KazooClient(hosts='127.0.0.1:2181') ##ZK server ip
zk.start()
zk.add_listener(my_listener)

#main
master_addr = onrequest()#will return the address of the master node, gives -1 on err 
print('Master server addr: ', master_addr)
while(1):
    command_string = input().rstrip()
    input_list = command_string.split(' ')

    input_handler(input_list)
    print("\n\n")
    
Пример #22
0
 def __init__(self, module):
     self.module = module
     self.zk = KazooClient(module.params['hosts'])
Пример #23
0
 def __init__(self):
     self.zk = KazooClient(hosts='10.0.0.2:2181')
     self.zk.start()
Пример #24
0
https://kazoo.readthedocs.io/en/latest/basic_usage.html#listening-for-connection-events
"""

logging.basicConfig()


# Can also use the @DataWatch and @ChildrenWatch decorators for the same
def demo_func(event):
    # Create a node with data
    zk.create("/producer/node_2", b"new demo producer node")
    print(event)
    children = zk.get_children("/producer")
    print("There are %s children with names %s" % (len(children), children))


zk = KazooClient(hosts='zoo:2181')
zk.start()
# Deleting all existing nodes (This is just for the demo to be consistent)
zk.delete("/producer", recursive=True)

# Ensure a path, create if necessary
zk.ensure_path("/producer")

# Create a node with data
if zk.exists("/producer/node_1"):
    print("Node already exists")
else:
    zk.create("/producer/node_1", b"demo producer node")

# Print the version of a node and its data
data, stat = zk.get("/producer/node_1")
Пример #25
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2020/12/23 23:04
# @Author  : [email protected]
# @File    : pyzk
# @Software: Pycharm
from kazoo.client import KazooClient
from datetime import datetime
import time
# zk = KazooClient(hosts="192.168.0.224:2171")
zk = KazooClient(
    hosts="127.0.0.1:2181",
    auth_data=[("digest", "wgq:0808")],
    #  sasl_options = {
    #     'mechanism': 'DIGEST-MD5',
    #     'username': '******',
    #     'password': '******'
    # }
)
zk.start()


def test():
    zk.create("/wgq", "mycontent".encode(), makepath=True)
    print(zk.get("/wgq", watch=pp))
    pass


def pp(event):
    print("event go", datetime.now())
Пример #26
0
def get_query_server_config(name='beeswax', connector=None):
  if connector and has_connectors(): # TODO: Give empty connector when no connector in use
    query_server = get_query_server_config_via_connector(connector)
  else:
    LOG.debug("Query cluster %s" % name)
    if name == "llap":
      activeEndpoint = cache.get('llap')
      if activeEndpoint is None:
        if HIVE_DISCOVERY_LLAP.get():
          LOG.debug("Checking zookeeper for Hive Server Interactive endpoint")
          zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
          zk.start()
          if HIVE_DISCOVERY_LLAP_HA.get():
            znode = "{0}/instances".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
            LOG.debug("Setting up LLAP with the following node {0}".format(znode))
            if zk.exists(znode):
              hiveservers = zk.get_children(znode)
              for server in hiveservers:
                llap_servers= json.loads(zk.get("{0}/{1}".format(znode, server))[0])["internal"][0]
                if llap_servers["api"] == "activeEndpoint":
                  cache.set("llap", json.dumps({"host": llap_servers["addresses"][0]["host"], "port": llap_servers["addresses"][0]["port"]}), CACHE_TIMEOUT.get())
            else:
              LOG.error("LLAP Endpoint not found, reverting to HiveServer2")
              cache.set("llap", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": HIVE_HTTP_THRIFT_PORT.get()}), CACHE_TIMEOUT.get())
          else:
            znode = "{0}".format(HIVE_DISCOVERY_LLAP_ZNODE.get())
            LOG.debug("Setting up LLAP with the following node {0}".format(znode))
            if zk.exists(znode):
              hiveservers = zk.get_children(znode)
              for server in hiveservers:
                cache.set("llap", json.dumps({"host": server.split(';')[0].split('=')[1].split(":")[0], "port": server.split(';')[0].split('=')[1].split(":")[1]}))
          zk.stop()
        else:
          LOG.debug("Zookeeper Discovery not enabled, reverting to config values")
          cache.set("llap", json.dumps({"host": LLAP_SERVER_HOST.get(), "port": LLAP_SERVER_THRIFT_PORT.get()}), CACHE_TIMEOUT.get())
      activeEndpoint = json.loads(cache.get("llap"))
    elif name != 'hms' and name != 'impala':
      activeEndpoint = cache.get("hiveserver2")
      if activeEndpoint is None:
        if HIVE_DISCOVERY_HS2.get():
          zk = KazooClient(hosts=libzookeeper_conf.ENSEMBLE.get(), read_only=True)
          zk.start()
          znode = HIVE_DISCOVERY_HIVESERVER2_ZNODE.get()
          LOG.info("Setting up Hive with the following node {0}".format(znode))
          if zk.exists(znode):
            hiveservers = zk.get_children(znode)
            server_to_use = 0 # if CONF.HIVE_SPREAD.get() randint(0, len(hiveservers)-1) else 0
            cache.set("hiveserver2", json.dumps({"host": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[0], "port": hiveservers[server_to_use].split(";")[0].split("=")[1].split(":")[1]}))
          else:
            cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": HIVE_HTTP_THRIFT_PORT.get()}))
          zk.stop()
        else:
          cache.set("hiveserver2", json.dumps({"host": HIVE_SERVER_HOST.get(), "port": HIVE_HTTP_THRIFT_PORT.get()}))
      activeEndpoint = json.loads(cache.get("hiveserver2"))

    if name == 'impala':
      from impala.dbms import get_query_server_config as impala_query_server_config
      query_server = impala_query_server_config()
    elif name == 'hms':
      kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
      query_server = {
          'server_name': 'hms',
          'server_host': HIVE_METASTORE_HOST.get() if not cluster_config else cluster_config.get('server_host'),
          'server_port': HIVE_METASTORE_PORT.get(),
          'principal': kerberos_principal,
          'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
          'auth_username': AUTH_USERNAME.get(),
          'auth_password': AUTH_PASSWORD.get(),
          'use_sasl': HIVE_USE_SASL.get()
      }
    else:
      kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
      query_server = {
          'server_name': 'beeswax',
          'server_host': activeEndpoint["host"],
          'server_port': LLAP_SERVER_PORT.get() if name == 'llap' else HIVE_SERVER_PORT.get(),
          'principal': kerberos_principal,
          'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
              'protocol': 'https' if hiveserver2_use_ssl() else 'http',
              'host': activeEndpoint["host"],
              'port': activeEndpoint["port"],
              'end_point': hive_site.hiveserver2_thrift_http_path()
            },
          'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
          'auth_username': AUTH_USERNAME.get(),
          'auth_password': AUTH_PASSWORD.get(),
          'use_sasl': HIVE_USE_SASL.get(),
          'close_sessions': CLOSE_SESSIONS.get(),
          'has_session_pool': has_session_pool(),
          'max_number_of_sessions': MAX_NUMBER_OF_SESSIONS.get()
        }

    if name == 'sparksql': # Extends Hive as very similar
      from spark.conf import SQL_SERVER_HOST as SPARK_SERVER_HOST, SQL_SERVER_PORT as SPARK_SERVER_PORT, USE_SASL as SPARK_USE_SASL

      query_server.update({
          'server_name': 'sparksql',
          'server_host': SPARK_SERVER_HOST.get(),
          'server_port': SPARK_SERVER_PORT.get(),
          'use_sasl': SPARK_USE_SASL.get()
      })

  if not query_server.get('dialect'):
    query_server['dialect'] = query_server['server_name']

  debug_query_server = query_server.copy()
  debug_query_server['auth_password_used'] = bool(debug_query_server.pop('auth_password', None))
  LOG.debug("Query Server: %s" % debug_query_server)

  return query_server
from kazoo.client import KazooClient,KazooState
from socket import *
import sys
import json
import os
import ast
import time
zk = KazooClient(hosts='127.0.0.1:2181')
zk.start()
print "Hi! I am server responsible for keys beginning with 'a' and going on till 'o'"
print "I can be the master if the master server dies!" 
string=""
while 1 :
	set1=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o']
	alphadict1={"@": "atr" , "?":"qm" , "#":"hash" ,"$":"dollar" , "%":"perc"}
	alphadict={"a": "apple" , "b":"ball" , "c":"cat" ,"d":"dog" , "m":"mango"}
	store=[]	
	from socket import *
	serverPort = 12345
	serverSocket = socket(AF_INET,SOCK_STREAM) 
	serverSocket.bind(('',serverPort)) 
	serverSocket.listen(1)
	temp_var1=[]
	port_list=[]
	name_list=[]
	port_list1=[]
	name_list1=[]
	lis_mixed=[]
	list_mixed=[]
	change=" "
	bk_dict={}
Пример #28
0
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@author: Great God
'''
import sys
sys.path.append("..")
from lib.SendRoute import SendRoute
from kazoo.client import KazooClient
from lib.get_conf import GetConf
from zk_handle.zkHandler import zkHander
from contextlib import closing
import getopt

zk = KazooClient(hosts=GetConf().GetZKHosts())
zk.start()

class White():
    '''手动操作,先执行setwhite再对master进行操作,完成后需执行setmaster'''
    def __init__(self):
        pass
    def SetWhite(self,groupname):
        path = GetConf().GetWhitePath()
        return zkHander().Create(path=path+'/'+groupname,value='',seq=False)
    def SetMaster(self,groupname,host,onlywatch=None):
        if onlywatch:
            value = [groupname,host.replace('.','-'),'white','onlywatch']
        else:
            value = [groupname, host.replace('.', '-'), 'white']
        path = GetConf().GetTaskPath()
        zkHander().DeleteWhite(groupname)
    def check(self, instance):
        consumer_groups = self.read_config(instance,
                                           'consumer_groups',
                                           cast=self._validate_consumer_groups)
        zk_connect_str = self.read_config(instance, 'zk_connect_str')
        kafka_host_ports = self.read_config(instance, 'kafka_connect_str')

        # Construct the Zookeeper path pattern
        zk_prefix = instance.get('zk_prefix', '')
        zk_path_tmpl = zk_prefix + '/consumers/%s/offsets/%s/%s'

        # Connect to Zookeeper
        zk_conn = KazooClient(zk_connect_str, timeout=self.zk_timeout)
        zk_conn.start()

        try:
            # Query Zookeeper for consumer offsets
            consumer_offsets = {}
            topics = defaultdict(set)
            for consumer_group, topic_partitions in consumer_groups.iteritems(
            ):
                for topic, partitions in topic_partitions.iteritems():
                    # Remember the topic partitions that we've see so that we can
                    # look up their broker offsets later
                    topics[topic].update(set(partitions))
                    for partition in partitions:
                        zk_path = zk_path_tmpl % (consumer_group, topic,
                                                  partition)
                        try:
                            consumer_offset = int(zk_conn.get(zk_path)[0])
                            key = (consumer_group, topic, partition)
                            consumer_offsets[key] = consumer_offset
                        except NoNodeError:
                            self.log.warn('No zookeeper node at %s' % zk_path)
                        except Exception:
                            self.log.exception(
                                'Could not read consumer offset from %s' %
                                zk_path)
        finally:
            try:
                zk_conn.stop()
                zk_conn.close()
            except Exception:
                self.log.exception('Error cleaning up Zookeeper connection')

        # Connect to Kafka
        kafka_conn = KafkaClient(kafka_host_ports, timeout=self.kafka_timeout)

        try:
            # Query Kafka for the broker offsets
            broker_offsets = {}
            for topic, partitions in topics.items():
                offset_responses = kafka_conn.send_offset_request(
                    [OffsetRequest(topic, p, -1, 1) for p in partitions])

                for resp in offset_responses:
                    broker_offsets[(resp.topic,
                                    resp.partition)] = resp.offsets[0]
        finally:
            try:
                kafka_conn.close()
            except Exception:
                self.log.exception('Error cleaning up Kafka connection')

        # Report the broker data
        for (topic, partition), broker_offset in broker_offsets.items():
            broker_tags = ['topic:%s' % topic, 'partition:%s' % partition]
            broker_offset = broker_offsets.get((topic, partition))
            self.gauge('kafka.broker_offset', broker_offset, tags=broker_tags)

        # Report the consumer
        for (consumer_group, topic,
             partition), consumer_offset in consumer_offsets.items():

            # Get the broker offset
            broker_offset = broker_offsets.get((topic, partition))

            # Report the consumer offset and lag
            tags = [
                'topic:%s' % topic,
                'partition:%s' % partition,
                'consumer_group:%s' % consumer_group
            ]
            self.gauge('kafka.consumer_offset', consumer_offset, tags=tags)
            self.gauge('kafka.consumer_lag',
                       broker_offset - consumer_offset,
                       tags=tags)
Пример #30
0
    def _run(self):
        while True:
            self._logger.error("Analytics Discovery zk start")
            self._zk = KazooClient(hosts=self._zkservers)
            self._zk.add_listener(self._zk_listen)
            try:
                self._zk.start()
                while self._conn_state != ConnectionStatus.UP:
                    gevent.sleep(1)
                break
            except Exception as e:
                # Update connection info
                self._sandesh_connection_info_update(status='DOWN',
                                                     message=str(e))
                self._zk.remove_listener(self._zk_listen)
                try:
                    self._zk.stop()
                    self._zk.close()
                except Exception as ex:
                    template = "Exception {0} in AnalyticsDiscovery zk stop/close. Args:\n{1!r}"
                    messag = template.format(type(ex).__name__, ex.args)
                    self._logger.error("%s : traceback %s for %s" % \
                        (messag, traceback.format_exc(), self._svc_name))
                finally:
                    self._zk = None
                gevent.sleep(1)

        try:
            # Update connection info
            self._sandesh_connection_info_update(status='UP', message='')
            self._reconnect = False
            # Done connecting to ZooKeeper

            for wk in self._watchers.keys():
                self._zk.ensure_path(self._basepath + "/" + wk)
                self._wchildren[wk] = {}
                self._zk.ChildrenWatch(self._basepath + "/" + wk,
                                       partial(self._zk_watcher, wk))

            # Trigger the initial publish
            self._reconnect = True

            while True:
                try:
                    if not self._reconnect:
                        pending_list = list(self._pendingcb)
                        self._pendingcb = set()
                        for wk in pending_list:
                            if self._watchers[wk]:
                                self._watchers[wk](\
                                        sorted(self._wchildren[wk].values()))

                    # If a reconnect happens during processing, don't lose it
                    while self._reconnect:
                        self._logger.error("Analytics Discovery %s reconnect" \
                                % self._svc_name)
                        self._reconnect = False
                        self._pendingcb = set()
                        self.publish(self._pubinfo)

                        for wk in self._watchers.keys():
                            self._zk.ensure_path(self._basepath + "/" + wk)
                            children = self._zk.get_children(self._basepath +
                                                             "/" + wk)

                            old_children = set(self._wchildren[wk].keys())
                            new_children = set(children)

                            # Remove contents for the children who are gone
                            # (DO NOT remove the watch)
                            for elem in old_children - new_children:
                                del self._wchildren[wk][elem]

                            # Overwrite existing children, or create new ones
                            for elem in new_children:
                                # Create a watch for new children
                                if elem not in self._wchildren[wk]:
                                    self._zk.DataWatch(self._basepath + "/" + \
                                            wk + "/" + elem,
                                            partial(self._zk_datawatch, wk, elem))

                                data_str, _ = self._zk.get(\
                                        self._basepath + "/" + wk + "/" + elem)
                                data_dict = json.loads(data_str)
                                self._wchildren[wk][elem] = \
                                        OrderedDict(sorted(data_dict.items()))

                                self._logger.error(\
                                    "Analytics Discovery %s ChildData : child %s, data %s, event %s" % \
                                    (wk, elem, self._wchildren[wk][elem], "GET"))
                            if self._watchers[wk]:
                                self._watchers[wk](sorted(
                                    self._wchildren[wk].values()))

                    gevent.sleep(self._freq)
                except gevent.GreenletExit:
                    self._logger.error("Exiting AnalyticsDiscovery for %s" % \
                            self._svc_name)
                    self._zk.remove_listener(self._zk_listen)
                    gevent.sleep(1)
                    try:
                        self._zk.stop()
                    except:
                        self._logger.error("Stopping kazooclient failed")
                    else:
                        self._logger.error("Stopping kazooclient successful")
                    try:
                        self._zk.close()
                    except:
                        self._logger.error("Closing kazooclient failed")
                    else:
                        self._logger.error("Closing kazooclient successful")
                    break

                except Exception as ex:
                    template = "Exception {0} in AnalyticsDiscovery reconnect. Args:\n{1!r}"
                    messag = template.format(type(ex).__name__, ex.args)
                    self._logger.error("%s : traceback %s for %s info %s" % \
                        (messag, traceback.format_exc(), self._svc_name, str(self._pubinfo)))
                    self._reconnect = True

        except Exception as ex:
            template = "Exception {0} in AnalyticsDiscovery run. Args:\n{1!r}"
            messag = template.format(type(ex).__name__, ex.args)
            self._logger.error("%s : traceback %s for %s info %s" % \
                    (messag, traceback.format_exc(), self._svc_name, str(self._pubinfo)))
            raise SystemExit