Esempio n. 1
0
 def readAMHostPort(self):
   amHost = ""
   amSecuredPort = ""
   zk = None
   try:
     zk = KazooClient(hosts=self.zk_quorum, read_only=True)
     zk.start()
     data, stat = zk.get(self.zk_reg_path)
     logger.debug("Registry Data: %s" % (data.decode("utf-8")))
     sliderRegistry = json.loads(data)
     amUrl = sliderRegistry["payload"]["internalView"]["endpoints"]["org.apache.slider.agents"]["address"]
     amHost = amUrl.split("/")[2].split(":")[0]
     amSecuredPort = amUrl.split(":")[2].split("/")[0]
     # the port needs to be utf-8 encoded 
     amSecuredPort = amSecuredPort.encode('utf8', 'ignore')
   except Exception:
     # log and let empty strings be returned
     logger.error("Could not connect to zk registry at %s in quorum %s" % 
                  (self.zk_reg_path, self.zk_quorum))
     pass
   finally:
     if not zk == None:
       zk.stop()
       zk.close()
   logger.info("AM Host = %s, AM Secured Port = %s" % (amHost, amSecuredPort))
   return amHost, amSecuredPort
Esempio n. 2
0
def cleanup(args):
    now = dt.utcnow()
    server = '{server}:{port}'.format(server=args.server, port=args.port)
    logging.info('Connecting to {}'.format(server))
    zk = KazooClient(hosts=server)
    zk.start()

    for path in args.zk_paths:
        zk_path = '{}/{}'.format(args.zk_root_path, path)
        nodes = zk.get_children(zk_path)
        logging.info("Found {} nodes under {}".format(len(nodes), zk_path))

        deleted = 0
        for node in nodes:
            node_path = '{}/{}'.format(zk_path, node)
            data, stat = zk.get(node_path)
            last_modified = dt.fromtimestamp(stat.mtime/1000.0)
            if ((now - last_modified).days > args.age) or (args.inclusive and (now - last_modified).days >= args.age):
                if not args.dry_run:
                    # Kazoo does not support recursive async deletes
                    if stat.children_count == 0:
                        res = zk.delete_async(node_path)
                    else:
                        zk.delete(node_path, recursive=True)
                deleted += 1

        logging.info("Deleted {} nodes".format(deleted))

    zk.stop()
Esempio n. 3
0
class Zookeeper(KeyManager):
    def __init__(self, hosts):
        self._hosts = hosts
        self.zk = KazooClient(hosts=hosts)
        self.zk.start()

    def get(self, key):
        result = self.zk.get(key)[0]
        if result == "":
            result = []
            children = self.zk.get_children(key)
            for i in children:
                result.append({'name': i, 'value': self.zk.get(os.path.join(key, i))[0]})
            return result
        else:
            return self.zk.get(key)[0]

    def set(self, key, data):
        try:
            self.zk.set(key, data.encode('utf-8'))
        except:
            self.zk.create(key, data.encode('utf-8'))

    def mkdir(self, key):
        self.set(key, "")

    def close(self):
        self.zk.stop()
        self.zk.close()

    @property
    def hosts(self):
        return self._hosts
Esempio n. 4
0
class ZKTestBase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        utdocker.pull_image(zk_tag)

    def setUp(self):

        utdocker.create_network()
        utdocker.start_container(
            zk_name,
            zk_tag,
            env={
                "ZOO_MY_ID": 1,
                "ZOO_SERVERS": "server.1=0.0.0.0:2888:3888",
            },
            port_bindings={2181: 21811}
        )

        self.zk = KazooClient('127.0.0.1:21811')
        self.zk.start()

        self.zkauthed, _ = zkutil.kazoo_client_ext(
            {'hosts': '127.0.0.1:21811', 'auth': ('digest', 'xp', '123'),
             'acl': (('xp', '123', 'cdrwa'), ('foo', 'bar', 'rw'))})

        dd('start zk-test in docker')

    def tearDown(self):

        self.zk.stop()
        self.zkauthed.stop()
        utdocker.remove_container(zk_name)
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser(description = DESCRIPTION)

    parser.add_argument("hosts", metavar = "<zookeeper-endpoint>", type = str,
        nargs = "+", help = "Zookeeper node endpoints to connect to")
    parser.add_argument("--timeout", dest = "timeout", action = "store", type = int,
        default = 30, help = "Zookeeper connection timeout")

    option = parser.parse_args()

    logging.debug("Using %s as a Zookeeper connection string" % option.hosts)

    client = KazooClient(hosts = ",".join(option.hosts))

    try:
        client.start(timeout = option.timeout)
    except TimeoutError as e:
        logging.error("Timed out while connecting to Zookeeper")
        return 1

    status = bootstrap(client, str(uuid.uuid4()))

    # If the client is not stopped, it will hang forever maintaining the connection.
    client.stop()

    return status
Esempio n. 6
0
def get_children_data(ensemble, namespace, read_only=True):
  hdfs = cluster.get_hdfs()
  if hdfs is None:
    raise PopupException(_('No [hdfs] configured in hue.ini.'))

  if hdfs.security_enabled:
    sasl_server_principal = PRINCIPAL_NAME.get()
  else:
    sasl_server_principal = None

  zk = KazooClient(hosts=ensemble, read_only=read_only, sasl_server_principal=sasl_server_principal)

  zk.start()

  children_data = []

  children = zk.get_children(namespace)

  for node in children:
    data, stat = zk.get("%s/%s" % (namespace, node))
    children_data.append(data)

  zk.stop()

  return children_data
Esempio n. 7
0
    def run(self):
        zk = KazooClient(hosts='%s:%d' % (self.options.host, self.options.port),
                         read_only=True, timeout=3)

        try:
            zk.start()

            options = vars(self.options)
            options.update({
                'system.hostname': socket.gethostname()
            })

            if self.options.regex:
                content, stats = zk.get(self.options.file)

                options['stats'] = stats

                m = re.search(self.options.regex, content, re.MULTILINE | re.DOTALL)

                if m:
                    options.update(m.groupdict())

                    self.ok(self.options.message.format(**options))
                else:
                    self.critical(self.options.message.format(**options))
            elif zk.exists(self.options.file):
                self.ok(self.options.message.format(**options))
            else:
                self.critical(self.options.message.format(**options))
        except Exception as ex:
            self.critical(ex)
        finally:
            zk.stop()
Esempio n. 8
0
def processTransfer():
    try:
        conn = psycopg2.connect(dbConnectStr)
        cur = conn.cursor()
        zk = KazooClient(hosts=zkHost)
        zk.start()
        transferq = LockingQueue(zk, '/transfer/')
        while True:
            rawCode = transferq.get()
            proposal = rawCode.decode().strip()
            transferq.consume()

            # print(" proposal = {0} ".format(proposal))
            ints = datetime.now()
            inload = os.getloadavg()[0]
            pro1 = Popen(['/usr/bin/python36', './processproptran.py', proposal], stdin=None, stdout=None)
            pro1.wait()

            outts = datetime.now()
            outload = os.getloadavg()[0]
            # insert the runtime info into c*
            cluster = Cluster(cfg.cassCluster)
            session = cluster.connect(cfg.cassKeyspace)
            stmt = SimpleStatement("""insert into runstat(id,executable,ints,inload,outts,outload)
            values (%s, %s, %s, %s, %s, %s)""", consistency_level=ConsistencyLevel.ANY)
            session.execute(stmt, (uuid.uuid4(), executable, ints, inload, outts, outload))
    except psycopg2.Error as err:
        print("SQLError {0}".format(err))
    finally:
        zk.stop()
        zk.close()
        cur.close()
        conn.close()
Esempio n. 9
0
def zookeeper_swarm(zk_server_list, path='/swarm'):
    path = path + '/docker/swarm/leader'
    zk = KazooClient(hosts=zk_server_list)
    zk.start()
    master, stat = zk.get(path)
    zk.stop()
    return master.decode('utf-8')
Esempio n. 10
0
def save2ownershipcatalog(pq, verdict, proposal, rawtext, symbol, noteId, quantity, target):
    zk = KazooClient(hosts=zkHost)
    zk.start()
    zkc = zk.Counter("/ownershipId", default=0x700)
    zkc += 1
    ownershipId = zkc.value
    print("ownershipId={0}".format(ownershipId))
    zkc = zk.Counter("/noteId", default=0x700)
    zkc += 1
    rowId = zkc.value
    print("rowId={0}".format(rowId))
    
    zk.stop()
    zk.close()
    sha256 = hashlib.sha256()
    sha256.update("{0}{1}".format(noteId.strip(),target.strip()).encode('utf-8'))
    hashcode = sha256.hexdigest()
    try:
        conn = psycopg2.connect(dbConnectStr)
        cur = conn.cursor()
        cur.execute("""insert into ownership0(id, symbol,"noteId", quantity,owner,updated,"hashCode")values(%s,%s,%s,%s,%s,now(),%s)
        """,[int(ownershipId),symbol.strip(),noteId.strip(), quantity.strip(), target.strip(), hashcode.strip()])
        conn.commit()
        #save the entry to note_catalog table
        sha256 = hashlib.sha256()
        sha256.update("{0}{1}".format(noteId.strip(),target.strip()).encode('utf-8'))
        #hashcode = sha256.hexdigest()
        cur.execute("""insert into note_catalog(id, pq , verdict, proposal, note, recipient, hook, stmt, setup, "hashCode")values(%s,%s,%s,%s,%s,%s,%s,%s,now(),%s)
        """,[int(rowId), pq.strip(), verdict.strip(), proposal.strip(), "{0}||{1}||{2}".format(symbol.strip(),noteId.strip(),quantity.strip()) ,target.strip(),'',rawtext.strip(), hashcode.strip()])
        conn.commit()
    except psycopg2.Error as err:
        print("SQLError {0}".format(err))
    finally:
        cur.close()
        conn.close()
Esempio n. 11
0
    def achieve_consensus(self):
        """Trigger consensus logic and handle errors."""

        log.info('Set up ZK client using host(s): %s', self._hosts)
        zk = KazooClient(hosts=self._hosts)

        # Initialize ZK connection state variable, which is shared across
        # threads. It is updated from a change listener function which is
        # invoked from within a Kazoo connection management thread, see
        # http://kazoo.readthedocs.org/en/latest/api/handlers/threading.html.
        self._connected = False
        zk.add_listener(self._zk_state_change_listener)
        zk.start()

        # Wait for handling thread to update connection status. (As of non-
        # determinism around GIL context switches there is otherwise no
        # guarantee that the status is updated within
        # `_run_consensus_procedure`).
        while not self._connected:
            time.sleep(0.01)

        self._zk = zk
        try:
            # This may raise ConnectionLost or various
            # kazoo.exceptions.* types.
            return self._run_consensus_procedure()
        finally:
            log.info('Shut down ZK client.')
            try:
                zk.stop()
            finally:
                zk.close()
Esempio n. 12
0
def cluster_state(pathVariable):
    try:
        zk_host = zk_map[pathVariable]
        zk = KazooClient(hosts=zk_host, read_only=True)
        zk.start()
        if pathVariable.find('kafka') > 0:
            nodes = zk.get_children('/brokers/ids')
            brokers = ""
            for id in nodes:
                data, stat = zk.get('/brokers/ids/'+id)
                jdata = json.loads(data)
                brokers += jdata['host']+"\n"
            return 'There are '+str(len(nodes))+\
                   ' brokers running\nids: '+\
                   ','.join(nodes)+'\nbrokers:'+\
                   brokers+'\nZK:'+zk_host+\
                   '\nThe cluster looks healthy. ', 200, {'Content-Type': 'text/plain; charset=utf-8'}
        else:
            data, stat = zk.get('/hbase/master')
            start = data.find('bach-')
            end = data.find('.bloomberg')
            hmaster = data[start:end]
            data = zk.get_children('/hbase/rs')
            rs = ""
            for node in data:
               rs += node+"\n"
            return "Its a hadoop cluster\n"+\
            'hmaster :'+hmaster+\
            '\nRegionServers :'+ rs+\
            '\nZK: '+zk_host+\
            '\nThe cluster looks healthy.', 200, {'Content-Type': 'text/plain; charset=utf-8'}
        zk.stop()
    except:
        return 'Cluster seems down'
Esempio n. 13
0
def main():
   # Read configuration.
   options = yaml.load(open("config.yaml"))
   
   # Connect to the ZooKeeper cluster.
   zookeeper = KazooClient(
     hosts = ','.join(options["zookeeper"])
   )
   
   zookeeper.start()
   
   # Connect to SES.
   email_connection = boto.ses.connect_to_region("us-east-1")
   
   try:
      update_number = 0
      
      # Sends an update to me every hour.
      while True:
         trigger_update(zookeeper, email_connection,
           force = (update_number % 24 == 0))
         time.sleep(1 * 3600)
         
         update_number += 1
      
   finally:
         
      # Clean up the connection to ZooKeeper.
      zookeeper.stop()
Esempio n. 14
0
class Store(object):
    def __init__(self,**kwargs):
        self.config = kwargs
        self.client = None

    def get_client(self):
        return self.client

    def open(self):
        self.client = KazooClient(**self.config)
        self.client.add_listener
        self.client.start()

    def close(self):
        self.client.stop()

    def read(self,path):
        return self.client.get(path)

    def write(self,path,value):
        base_path = os.path.dirname(path)
        self.client.ensure_path(base_path)
        self.client.create(path,value)

    def overwrite(self,path,value):
        self.client.set(path,value)

    def exists(self,path):
        return self.client.exists(path)
Esempio n. 15
0
def chunk(args=None):
    args = chunk_parser.parse_args(args)

    # Log verbosity
    verbosity = args.verbose - args.quiet
    if args.debug:
        log_level = logging.DEBUG - verbosity*10
    else:
        log_level = logging.WARN - verbosity*10

    logging.basicConfig(level=log_level)
    logging.getLogger('kazoo.client').setLevel(log_level + 20)

    # Zookeeper servers
    if len(args.servers):
        zk_hosts = ','.join(args.servers)
    else:
        zk_hosts = '127.0.0.1:2181'

    # Zookeeper client
    zk = KazooClient(hosts=zk_hosts)

    zk.start()

    # ChunkServer
    cs = HTTPChunkServer(zk=zk, addr=(args.host,args.port), cache_path=args.chunk_cache, hash_data=args.hash_data)
    cs.run()

    # Cleanup
    zk.stop()
Esempio n. 16
0
File: actor.py Progetto: tlvu/mochi
class ActorAddressBook(object):
    def __init__(self, zk_hosts, timeout=60.0):
        self.retry = KazooRetry(max_tries=10)
        self.zk = KazooClient(hosts=zk_hosts, timeout=timeout)
        self.zk.start()

    def lookup(self, path):
        return self.retry(self._lookup, path)

    def _lookup(self, path):
        actor_url, stat = self.zk.get(path)
        return RemoteActor(actor_url.decode('utf-8'))

    def register(self, path, actor_url):
        return self.retry(self._register, path, actor_url)

    def _register(self, path, actor_url):
        self.zk.ensure_path(path)
        self.zk.set(path, actor_url.encode('utf-8'))

    def delete(self, path):
        self.zk.delete(path, recursive=True)

    def __del__(self):
        self.zk.stop()
def random_nodes(number_of_nodes_to_return, exclude=None, nodes=None):
    """ Selects a group of nodes from the pool of registered nodes

    Arguments:
        number_of_nodes_to_return: The total number of nodes to be returned
        to the caller of the function
        exclude (optional): A list of nodes that will be excluded from the
        results
        nodes (optional): A list of nodes to process, if None then the
        specified zookeeper will be contacted and the registered brokers used
    :return:
    """
    if not nodes:
        zk = KazooClient(hosts=zookeeper_connection_string, read_only=True)
        zk.start()
        try:
            if zk.exists('/brokers/ids'):
                ids = zk.get_children('/brokers/ids')
        finally:
            zk.stop()
    else:
        ids = nodes

    if exclude:
        ids = [x for x in ids if x not in exclude]
    return random.sample(ids, number_of_nodes_to_return)
  def from_task(self, task, sandbox):
    data = json.loads(task.data)
    cluster_name, host, port, zk_url = data['cluster'], data['host'], data['port'], data['zk_url']
    _, servers, path = parse(zk_url)
    kazoo = KazooClient(servers)
    kazoo.start()
    self_instance = ServiceInstance(Endpoint(host, port))

    try:
      task_control = self._task_control_provider.from_task(task, sandbox)
      installer = self._installer_provider.from_task(task, sandbox)
      backup_store = self._backup_store_provider.from_task(task, sandbox)
    except (TaskControl.Error, PackageInstaller.Error) as e:
      kazoo.stop()  # Kazoo needs to be cleaned up. See kazoo/issues/217.
      raise TaskError(e.message)

    state_manager = StateManager(sandbox, backup_store)

    return MysosTaskRunner(
        self_instance,
        kazoo,
        get_cluster_path(path, cluster_name),
        installer,
        task_control,
        state_manager)
Esempio n. 19
0
class ZookeeperClient(object):
    def __init__(self, server_list):
        self._retry = KazooRetry(max_tries=None, max_delay=300,
                                 sleep_func=gevent.sleep)
        self._zk_client = KazooClient(
            hosts=','.join(server_list),
            timeout=400,
            handler=SequentialGeventHandler(),
            logger=logger,
            connection_retry=self._retry,
            command_retry=self._retry)

    def connect(self):
        self._zk_client.start()

    def disconnect(self):
        self._zk_client.stop()
        self._zk_client.close()

    def create_node(self, path, value=None):
        if value is None:
            value = uuid.uuid4()
        try:
            self._zk_client.create(path, str(value), makepath=True)
        except NodeExistsError:
            self._zk_client.set(path, str(value))
Esempio n. 20
0
File: zk.py Progetto: BITDM/pinot
class PinotZk(object):

  def __init__(self, config, logger, fabric):
    self.config = config
    self.fabric = fabric
    self.logger = logger
    self.zk = None

  def get_handle(self):
    host = self.config.get_zk_host(self.fabric)

    if not self.zk:
      try:
        self.zk = KazooClient(hosts=host)
        self.zk.start()
      except kazoo.exceptions.KazooException:
        error = 'Failed connecting to zk  {0}'.format(host)
        self.logger.exception(error)
        raise PinotException(error)

    return self.zk

  def close(self):
    if self.zk:
      self.zk.stop()
      self.zk.close()
Esempio n. 21
0
def _get_json_type(request, cluster_id, type):
	data = []
	error_brokers = 0
	try:	
		cluster = get_cluster_or_404(id=cluster_id)

		zk = KazooClient(hosts=cluster['zk_host_ports'])
		zk.start()

		if type == "broker":			
			brokers, error_brokers = _get_brokers(zk,cluster_id)
			for broker in brokers:
				data.append(broker['host'])
		if type == "topic":
			topics, error_zk_topics = _get_topics(cluster)
			for topic in topics:
				data.append(topic['id'])
		if type == "metric":
			data = _get_sections_ini()
	except KazooException:
		error_zk_brokers = 1

	zk.stop()

	return JsonResponse(data, safe=False)
Esempio n. 22
0
    def expire_session(self, client_id=None):
        """Force ZK to expire a client session

        :param client_id: id of client to expire. If unspecified, the id of
                          self.client will be used.

        """
        client_id = client_id or self.client.client_id

        lost = threading.Event()
        safe = threading.Event()

        def watch_loss(state):
            if state == KazooState.LOST:
                lost.set()
            if lost.is_set() and state == KazooState.CONNECTED:
                safe.set()
                return True

        self.client.add_listener(watch_loss)

        # Sometimes we have to do this a few times
        attempts = 0
        while attempts < 5 and not lost.is_set():
            client = KazooClient(self.hosts, client_id=client_id, timeout=0.8)
            client.start()
            client.stop()
            lost.wait(5)
            attempts += 1
        # Wait for the reconnect now
        safe.wait(15)
        self.client.retry(self.client.get_async, '/')
Esempio n. 23
0
def get_alive_master_ip():
    zk_conn_str = get_os_env('ZOOKEEPER_CONN_STR')
    master_stack_name = get_os_env('MASTER_STACK_NAME')
    master_ip = ""
    global region
    if zk_conn_str != "":
        from kazoo.client import KazooClient
        zk = KazooClient(hosts=zk_conn_str)
        zk.start()
        try:
            master_ip = zk.get("/spark/leader_election/current_master")[0].decode('utf-8')
            zk.stop()
        except:
            master_ip = ""
            zk.stop()
        return master_ip
    elif master_stack_name != "" and region is not None:
        try:
            elb = boto3.client('elb', region_name=region)
            ec2 = boto3.client('ec2', region_name=region)
            master_ips = get_instance_ips(elb, ec2, master_stack_name)
            if len(master_ips) != 1:
                return ""  # shouldn't happen without zookeeper
            elif len(master_ips) == 1:
                return master_ips[0]
            else:
                return ""
        except:
            return ""
    else:
        return ""
Esempio n. 24
0
def mkfs(args=None):
    args = mkfs_parser.parse_args(args)

    # Log verbosity
    verbosity = args.verbose - args.quiet
    log_level = logging.WARN - verbosity*10

    logging.basicConfig(level=log_level)
    logging.getLogger('kazoo.client').setLevel(log_level + 20)

    # ZK Path of filesystem root
    zk_root = posixpath.join(FILESYSTEMS, args.name)

    # Zookeeper
    if len(args.servers):
        zk_hosts = ','.join(args.servers)
    else:
        zk_hosts = '127.0.0.1:2181'
    zk = KazooClient(hosts=zk_hosts)

    zk.start()

    # Run
    ClowderFS.mkfs(zk=zk, fs_root=zk_root, chunk_size=args.chunk_size)

    # Cleanup
    zk.stop()
Esempio n. 25
0
def start_zoo(cport):
    '''
    Client uses this function to start an instance of zookeeper
    Arguments:
        cport : An unused TCP port for zookeeper to use as the client port
    '''
    basefile = "zookeeper-3.4.5"
    tarfile = os.path.dirname(os.path.abspath(__file__)) + "/" + basefile + ".tar.gz"
    cassbase = "/tmp/zoo." + str(cport) + "/"
    confdir = cassbase + basefile + "/conf/"
    output,_ = call_command_("mkdir " + cassbase)

    logging.info('Installing zookeeper in ' + cassbase + " conf " + confdir)
    os.system("cat " + tarfile + " | tar -xpzf - -C " + cassbase)

    output,_ = call_command_("cp " + confdir + "zoo_sample.cfg " + confdir + "zoo.cfg")

    logging.info('zookeeper Client Port %d' % cport)

    replace_string_(confdir + "zoo.cfg", \
        [("dataDir=/tmp/zookeeper", "dataDir="+cassbase)])

    replace_string_(confdir + "zoo.cfg", \
        [("clientPort=2181", "clientPort="+str(cport))])

    output,_ = call_command_(cassbase + basefile + "/bin/zkServer.sh start")

    zk = KazooClient(hosts='127.0.0.1:'+str(cport))
    zk.start()
    zk.stop()
Esempio n. 26
0
class ZProducer(object):
    """
    A base Zookeeper producer to be used by other producer classes

    Args
    hosts: Comma-separated list of hosts to connect to
           (e.g. 127.0.0.1:2181,127.0.0.1:2182)
    topic - The kafka topic to send messages to
    chroot - The kafka subdirectory to search for brokers
    """
    producer_kls = None

    def __init__(self, hosts, topic, chroot='/', **kwargs):

        if self.producer_kls is None:
            raise NotImplemented("Producer class needs to be mentioned")

        self.zkclient = KazooClient(hosts=hosts)
        self.zkclient.start()

        # Start the producer instance
        self.client = get_client(self.zkclient, chroot=chroot)
        self.producer = self.producer_kls(self.client, topic, **kwargs)

        # Stop Zookeeper
        self.zkclient.stop()
        self.zkclient.close()
        self.zkclient = None

    def stop(self):
        self.producer.stop()
        self.client.close()
Esempio n. 27
0
    def create_from_zookeeper(cls, zkconnect):
        log.info("Connecting to zookeeper {0}".format(zkconnect))
        try:
            zk = KazooClient(zkconnect)
            zk.start()
        except Exception as e:
            raise ZookeeperException("Cannot connect to Zookeeper: {0}".format(e))

        # Get broker list
        cluster = cls()
        add_brokers_from_zk(cluster, zk)

        # Get current partition state
        log.info("Getting partition list from Zookeeper")
        for topic in zk.get_children("/brokers/topics"):
            zdata, zstat = zk.get("/brokers/topics/{0}".format(topic))
            add_topic_with_replicas(cluster, topic, json.loads(zdata))

        if cluster.num_topics() == 0:
            raise ZookeeperException("The cluster specified does not have any topics")

        log.info("Closing connection to zookeeper")
        zk.stop()
        zk.close()

        return cluster
Esempio n. 28
0
class ZookeeperHostSource(HostSource):
    def __init__(self, config):
        self.zk = KazooClient(config.hosts.zookeeper.connection_string)
        self.zk.start()
        credentials = ":".join((config.hosts.zookeeper.username,
                                config.hosts.zookeeper.password))
        self.zk.add_auth("digest", credentials)
        self.retry = KazooRetry(max_tries=3)

    def get_all_hosts(self):
        try:
            return self.retry(self.zk.get_children, "/server")
        except KazooException as e:
            raise HostLookupError("zk host enumeration failed: %r", e)

    def should_host_be_alive(self, host_name):
        try:
            host_root = "/server/" + host_name

            state = self.retry(self.zk.get, host_root + "/state")[0]
            if state in ("kicking", "unhealthy"):
                return False

            is_autoscaled = self.retry(self.zk.exists, host_root + "/asg")
            is_running = self.retry(self.zk.exists, host_root + "/running")
            return not is_autoscaled or is_running
        except NoNodeException:
            return False
        except KazooException as e:
            raise HostLookupError("zk host aliveness check failed: %r", e)

    def shut_down(self):
        self.zk.stop()
Esempio n. 29
0
class ShellTestCase(unittest.TestCase):
    """ base class for all tests """

    def setUp(self):
        """
        make sure that the prefix dir is empty
        """
        self.tests_path = os.getenv("ZKSHELL_PREFIX_DIR", "/tests")
        self.zk_host = os.getenv("ZKSHELL_ZK_HOST", "localhost:2181")
        self.username = os.getenv("ZKSHELL_USER", "user")
        self.password = os.getenv("ZKSHELL_PASSWD", "user")
        self.digested_password = os.getenv("ZKSHELL_DIGESTED_PASSWD", "F46PeTVYeItL6aAyygIVQ9OaaeY=")
        self.super_password = os.getenv("ZKSHELL_SUPER_PASSWD", "secret")
        self.scheme = os.getenv("ZKSHELL_AUTH_SCHEME", "digest")

        self.client = KazooClient(self.zk_host, 5)
        self.client.start()
        self.client.add_auth(self.scheme, self.auth_id)
        if self.client.exists(self.tests_path):
            self.client.delete(self.tests_path, recursive=True)
        self.client.create(self.tests_path, str.encode(""))

        self.output = StringIO()
        self.shell = Shell([self.zk_host], 5, self.output, setup_readline=False, async=False)

        # Create an empty test dir (needed for some tests)
        self.temp_dir = tempfile.mkdtemp()

    @property
    def auth_id(self):
        return "%s:%s" % (self.username, self.password)

    @property
    def auth_digest(self):
        return "%s:%s" % (self.username, self.digested_password)

    def tearDown(self):
        self.output = None
        self.shell = None

        if os.path.isdir(self.temp_dir):
            shutil.rmtree(self.temp_dir)

        if self.client.exists(self.tests_path):
            self.client.delete(self.tests_path, recursive=True)

        self.client.stop()

    ###
    # Helpers.
    ##

    def create_compressed(self, path, value):
        """
        ZK Shell doesn't support creating directly from a bytes array so we use a Kazoo client
        to create a znode with zlib compressed content.
        """
        compressed = zlib.compress(bytes(value, "utf-8") if PYTHON3 else value)
        self.client.create(path, compressed, makepath=True)
Esempio n. 30
0
def check_broker_id_in_zk(broker_id_policy, process, region):
    """
    Check endlessly for the Zookeeper Connection.

    This function checks endlessly if the broker is still registered in ZK
    (we observered running brokers but missing broker id's so we implemented this check)
    and if the ZK IP's changed (e.g. due to a node restart). If this happens a Kafka restart is enforced.
    """
    from kazoo.client import KazooClient
    zk_conn_str = os.getenv('ZOOKEEPER_CONN_STRING')
    broker_id_manager = find_out_own_id.get_broker_policy(broker_id_policy)
    broker_id = broker_id_manager.get_id(kafka_data_dir)
    logging.info("check broker id... {}".format(broker_id))

    if not broker_id:
        broker_id = wait_for_broker_id(broker_id_manager, kafka_data_dir)

    while True:
        check_kafka(region)

        new_zk_conn_str = generate_zk_conn_str.run(os.getenv('ZOOKEEPER_STACK_NAME'), region)
        if zk_conn_str != new_zk_conn_str:
            logging.warning("ZooKeeper connection string changed!")
            logging.warning("new ZK: " + new_zk_conn_str)
            logging.warning("old ZK: " + zk_conn_str)
            zk_conn_str = new_zk_conn_str
            os.environ['ZOOKEEPER_CONN_STRING'] = zk_conn_str
            create_broker_properties(zk_conn_str)
            from random import randint
            wait_to_stop = randint(1, 10)
            logging.info("Waiting " + str(wait_to_stop) + " seconds to stop kafka broker ...")
            sleep(wait_to_stop)
            process.terminate()
            process.wait()
            wait_to_restart = randint(10, 20)
            logging.info("Waiting " + str(wait_to_restart) + " seconds to restart kafka broker ...")
            sleep(wait_to_restart)
            logging.info("Restarting kafka broker with new ZooKeeper connection string ...")
            process = subprocess.Popen([kafka_dir + "/bin/kafka-server-start.sh",
                                        kafka_dir + "/config/server.properties"])
            os.environ['WAIT_FOR_KAFKA'] = 'yes'
            continue

        zk = KazooClient(hosts=zk_conn_str)
        zk.start()
        try:
            zk.get("/brokers/ids/" + broker_id)
            logging.info("I'm still in ZK registered, all good!")
            sleep(60)
            zk.stop()
        except:
            logging.warning("I'm not in ZK registered, stopping kafka broker process!")
            zk.stop()
            process.terminate()
            process.wait()
            logging.info("Restarting kafka broker ...")
            process = subprocess.Popen([kafka_dir + "/bin/kafka-server-start.sh",
                                        kafka_dir + "/config/server.properties"])
            os.environ['WAIT_FOR_KAFKA'] = 'yes'
Esempio n. 31
0
def manager(proj=None, env='dev'):
    session = request.environ.get('beaker.session')
    if not session or not session.has_key('trinity_user'):
        redirect('/', code=302)

    result = {}
    result['user'] = session['trinity_user']
    result['roles'] = user_map[result['user']][1]
    result['user_projects'] = user_map[result['user']][2]
    zk = KazooClient(hosts[env])
    projects = []

    key = request.forms.get('configkey')
    value = request.forms.get('configvalue')
    comment = request.forms.get('configcomment')
    action = request.forms.get('action')

    try:
        zk.start()
        projects = getProjects(zk)
        result['projects'] = projects
        result['current'] = proj if proj else projects[0]
        result['env'] = env
        if action:
            zkOp = zkOperator(zk, result['current'], env, action)
            zkOp(key, value, comment)

        if env not in result['roles'] or result['current'] not in result[
                'user_projects']:
            result['warn'] = '对不起,您没有' + env + '环境的操作权限!'
            return result
        zkOp = zkOperator(zk, result['current'], env, 'get')
        result['config'] = zkOp()
        if action == 'delete' or action == 'add':
            redirect('/manager/' + result['current'] + '/' + result['env'],
                     code=302)
    finally:
        zk.stop()
        zk.close()

    return result
Esempio n. 32
0
    def test_zk_conn_lost(self):
        """Check we restore zookeeper nodes correctly after connection loss

        See also github issue #204.
        """
        check_partitions = lambda c: c._get_held_partitions() == c._partitions
        zk = KazooClient(self.kafka.zookeeper)
        zk.start()
        try:
            topic = self.client.topics[self.topic_name]
            consumer_group = b'test_zk_conn_lost'

            consumer = topic.get_balanced_consumer(
                consumer_group, zookeeper=zk, use_rdkafka=self.USE_RDKAFKA)
            self.assertTrue(check_partitions(consumer))
            zk.stop()  # expires session, dropping all our nodes

            # Start a second consumer on a different zk connection
            other_consumer = topic.get_balanced_consumer(
                consumer_group, use_rdkafka=self.USE_RDKAFKA)

            # Slightly contrived: we'll grab a lock to keep _rebalance() from
            # starting when we restart the zk connection (restart triggers a
            # rebalance), so we can confirm the expected discrepancy between
            # the (empty) set of partitions on zk and the set in the internal
            # consumer:
            with consumer._rebalancing_lock:
                zk.start()
                self.assertFalse(check_partitions(consumer))

            # Finally, confirm that _rebalance() resolves the discrepancy:
            self.wait_for_rebalancing(consumer, other_consumer)
            self.assertTrue(check_partitions(consumer))
            self.assertTrue(check_partitions(other_consumer))
        finally:
            try:
                consumer.stop()
                other_consumer.stop()
                zk.stop()
            except:
                pass
Esempio n. 33
0
    def test_create_makepath_incompatible_acls(self):
        from kazoo.client import KazooClient
        from kazoo.security import make_digest_acl_credential, CREATOR_ALL_ACL

        credential = make_digest_acl_credential("username", "password")
        alt_client = KazooClient(
            self.cluster[0].address + self.client.chroot,
            max_retries=5,
            auth_data=[("digest", credential)],
            handler=self._makeOne(),
        )
        alt_client.start()
        alt_client.create("/1/2", b"val2", makepath=True, acl=CREATOR_ALL_ACL)

        try:
            with pytest.raises(NoAuthError):
                self.client.create("/1/2/3/4/5", b"val2", makepath=True)

        finally:
            alt_client.delete('/', recursive=True)
            alt_client.stop()
Esempio n. 34
0
def _acquire_zk_node():
    logger.debug('zk1')
    try:
        zk = KazooClient(hosts=ZOOKEEPER['HOST'])
        zk.start()
    except Exception as e:
        err_log.error(e)
        raise Exception(1910)
    global boxs
    boxs = []
    _node_list = zk.get_children('/B/')
    for vm in _node_list:
        logger.debug(vm)
        box_list = zk.get_children('/B/' + vm + '/')
        for box in box_list:
            node = '/B/' + vm + '/' + box
            data, stat = zk.get(node)
            # _dict = json.loads(data)
            tup = (node, eval(data.decode("utf-8"))['Add'])
            boxs.append(tup)
    zk.stop()
Esempio n. 35
0
def create_autoscaling_lock(service: str, instance: str) -> Iterator[None]:
    """Acquire a lock in zookeeper for autoscaling. This is
    to avoid autoscaling a service multiple times, and to avoid
    having multiple paasta services all attempting to autoscale and
    fetching mesos data."""
    zk = KazooClient(hosts=load_system_paasta_config().get_zk_hosts(),
                     timeout=ZK_LOCK_CONNECT_TIMEOUT_S)
    zk.start()
    lock = zk.Lock(f'/autoscaling/{service}/{instance}/autoscaling.lock')
    try:
        lock.acquire(
            timeout=1)  # timeout=0 throws some other strange exception
        yield
    except LockTimeout:
        raise LockHeldException(
            f"Failed to acquire lock for autoscaling! {service}.{instance}")
    else:
        lock.release()
    finally:
        zk.stop()
        zk.close()
Esempio n. 36
0
def bounce_lock_zookeeper(name):
    """Acquire a bounce lock in zookeeper for the name given. The name should
    generally be the service namespace being bounced.

    This is a contextmanager. Please use it via 'with bounce_lock(name):'.

    :param name: The lock name to acquire"""
    zk = KazooClient(hosts=load_system_paasta_config().get_zk_hosts(), timeout=ZK_LOCK_CONNECT_TIMEOUT_S)
    zk.start()
    lock = zk.Lock('%s/%s' % (ZK_LOCK_PATH, name))
    acquired = False
    try:
        lock.acquire(timeout=1)  # timeout=0 throws some other strange exception
        acquired = True
        yield
    except LockTimeout:
        raise LockHeldException("Service %s is already being bounced!" % name)
    finally:
        if acquired:
            lock.release()
        zk.stop()
Esempio n. 37
0
def code_task():
    # 业务代码定时任务监控,0正常,1故障,2未知异常
    svc = sys.argv[3]
    task = sys.argv[4]
    type = sys.argv[5]
    try:
        zk = KazooClient(timeout=2, read_only=True)
        zk.start(2)
        status = 1
        info = json.loads(re.sub('.*\{','{',zk.get('/dis_tasks/v2/%s/%s/' % (svc, task))[0]))
        if type == 'nextFireTime':
            if time.time() < time.mktime(time.strptime(info[type], '%Y-%m-%d %H:%M:%S')) + 3600:
                status = 0
        else:
            if info[type] in zk.get_children('/dis_tasks/v2/%s/nodes/' % svc):
                status = 0
        zk.stop()
        zk.close()
    except:
        status = 2
    print status
Esempio n. 38
0
class MyTestCase(unittest.TestCase):
    def setUp(self) -> None:
        self.client = KazooClient(hosts='127.0.0.1:2181/jaina')
        self.client.start()
        # self.client.create('/jaina')
        # self.client.chroot = '/jaina'

    def test_create(self):
        self.client.create('test', 'abc'.encode())

    def test_delete(self):
        self.client.delete('/test')

    def test_get(self):
        value = self.client.get('/test')
        print(value[0], type(value[0]))
        print(value[0].decode())
        print(value[1], type(value[1]))

    def tearDown(self) -> None:
        self.client.stop()
def init():

    global inited
    zk = None
    try:
        zk = KazooClient(hosts='127.0.0.1:2181')
        zk.add_listener(state_listener)
        zk.start()
        register(stop_zk, zk)
        create_path_if_not_exists(zk, '/jobs')
        create_path_if_not_exists(zk, '/watchers')
        create_path_if_not_exists(zk, '/watchlocks')
        create_path_if_not_exists(zk, '/executors')
    except Exception as e:
        print 'Zk problem ', e
        if zk is not None:
            zk.stop()
        sys.exit(1)

    inited = True
    return zk
Esempio n. 40
0
def main():
    (options, args) = parse_options(sys.argv[1:])

    data = options.znode_data_size * b"D"

    s = KazooClient(options.server)
    s.start()

    if s.exists(options.root_znode):
        children = s.get_children(options.root_znode)
        print("delete old entries: %d" % len(children))
        for child in children:
            s.delete("%s/%s" % (options.root_znode, child))
    else:
        s.create(options.root_znode, "kazoo root znode")

    evaluation(s, options.root_znode, data, options)

    s.stop()

    print("Performance test complete")
Esempio n. 41
0
 def reset_driverunit(self,ordernumber):
     '''下发小车复位指令。例
     					|ordernumber
     		'''
     try:
         zk = KazooClient(hosts=ReadMysql.ReadMysql().zkhost)
         driverunitid = (ReadMysql.ReadMysql().get_driverunitid_in_station())
         print(ReadMysql.ReadMysql().reset_jobID)
         print  driverunitid
         for d in driverunitid:
             sleep(1)
             ReadMysql.ReadMysql().reset_driverunit(ordernumber)
             zk.start()
             zk.create("/driveunitTasksAssign/" + d[0] + "/jobs/" + ReadMysql.ReadMysql().reset_jobID,b"")
             sleep(1)
             result = zk.get_children("/driveunitTasksAssign/" + d[0] + "/jobs")
             print(d[0] + u"号小车的节点展示为: " )
             print  (result)
             zk.stop()
     except Exception:
         print(u"没有小车在工作站或zk连接失败")
Esempio n. 42
0
class TestSessions(unittest.TestCase):
    def setUp(self):
        from kazoo.client import KazooClient
        from kazoo.protocol.states import KazooState
        from kazoo.testing.common import ZookeeperCluster
        ZK_HOME = os.environ.get("ZOOKEEPER_PATH")
        self.cluster = ZookeeperCluster(ZK_HOME, size=1, port_offset=21000)
        self.cluster.start()
        atexit.register(lambda cluster: self.cluster.terminate(), self.cluster)
        self.client = KazooClient(self.cluster[0].address, max_retries=5)
        self.ev = threading.Event()

        def back(state):
            if state == KazooState.CONNECTED:
                self.ev.set()

        self.client.start()
        self.path = self.client.create(uuid.uuid4().hex)
        self.client.add_listener(back)

    def test_restarted_server(self):
        raise SkipTest('Patch missing')
        self.cluster.stop()
        self.cluster.start()
        self.ev.wait(5)
        eq_(self.ev.is_set(), True)
        self.assertTrue(self.client.retry(self.client.exists, self.path))

    def test_terminated_server(self):
        raise SkipTest('Patch missing')
        self.cluster.reset()
        self.cluster.start()
        self.ev.wait(5)
        eq_(self.ev.is_set(), True)
        self.assertFalse(self.client.retry(self.client.exists, self.path))

    def tearDown(self):
        self.ev.clear()
        self.client.stop()
        self.cluster.stop()
Esempio n. 43
0
def cmak2zk(over_zk, zk_url, yaml_cfg):
    """
    Populates Zookeeper at ZK_URL with Kafka cluster configuration
    in CMAK compatible format from YAML_CFG configuration file.

    ZK_URL - myzk1:2181,myzk2:2181, etc.

    \b
    YAML_CFG - Format is equal to CMAK operator Helm chart values.
    The only section used is
    https://artifacthub.io/packages/helm/cmak-operator/cmak-operator?modal=values-schema&path=cmak.

    """
    cmak_cfg = yaml.load(yaml_cfg)['cmak']
    common_cfg = cmak_cfg['clustersCommon']

    zk = KazooClient(hosts=zk_url)
    zk.start()

    for cl in cmak_cfg['clusters']:
        cl = merge(common_cfg, cl)
        dst = f"{ZK_ROOT}/{cl['name']}"
        json_b = json.dumps(cl, separators=(',', ':')).encode()

        if zk.exists(dst):
            file_md5 = md5(json_b).hexdigest()

            zk_b, stat = zk.get(dst)
            zk_md5 = md5(zk_b).hexdigest()
            logging.info(
                f"md5 of {dst}: {zk_md5}, md5 of {cl['name']}: {file_md5}")

            if zk_md5 != file_md5 and over_zk is True:
                zk.set(dst, json_b)
                logging.info(f"Overwritten {dst} from {yaml_cfg.name}")
        else:
            zk.create(dst, json_b, makepath=True)
            logging.info(f"Created {dst} from {yaml_cfg.name}")

    zk.stop()
Esempio n. 44
0
def start_zoo(cport):
    '''
    Client uses this function to start an instance of zookeeper
    Arguments:
        cport : An unused TCP port for zookeeper to use as the client port
    '''
    zookeeper_download = 'curl -o ' +\
        zookeeper_url + ' -s -m 120 http://archive.apache.org/dist/zookeeper/zookeeper-'+\
        zookeeper_version+'/zookeeper-'+zookeeper_version+'.tar.gz'
    if not os.path.exists(zookeeper_url):
        process = subprocess.Popen(zookeeper_download.split(' '))
        process.wait()
        if process.returncode is not 0:
            return

    basefile = "zookeeper-" + zookeeper_version
    tarfile = zookeeper_url
    cassbase = "/tmp/zoo." + str(cport) + "/"
    confdir = cassbase + basefile + "/conf/"
    output, _ = call_command_("mkdir " + cassbase)

    logging.info('Installing zookeeper in ' + cassbase)
    os.system("cat " + tarfile + " | tar -xpzf - -C " + cassbase)

    output, _ = call_command_("cp " + confdir + "zoo_sample.cfg " + confdir +
                              "zoo.cfg")

    logging.info('zookeeper Client Port %d' % cport)

    replace_string_(confdir + "zoo.cfg", \
        [("dataDir=/tmp/zookeeper", "dataDir="+cassbase)])

    replace_string_(confdir + "zoo.cfg", \
        [("clientPort=2181", "clientPort="+str(cport))])

    output, _ = call_command_(cassbase + basefile + "/bin/zkServer.sh start")

    zk = KazooClient(hosts='127.0.0.1:' + str(cport))
    zk.start()
    zk.stop()
Esempio n. 45
0
class KafkaProtocol(Protocol):
    zk: KazooClient
    producer: kafka.producer

    def initProtocol(self, cfg: Cfg):

        self.zk = KazooClient(hosts=cfg.server + ":2181")
        self.zk.start()

        endpoints = []

        children = self.zk.get_children("/brokers/ids")
        print("There are %s children with names %s" %
              (len(children), children))
        for child in children:
            childData, sats = self.zk.get("/brokers/ids/" + child)
            jsonData = childData.decode('UTF-8')
            jsonData = json.loads(jsonData)
            print(jsonData)
            endpoints.append(jsonData['host'] + ':' + str(jsonData['port']))
        self.zk.stop()
        self.producer = kafka.KafkaProducer(bootstrap_servers=endpoints)

    def sendPacket(self, parsedPacket: ParsedPacket):
        super().sendPacket(parsedPacket)
        tmp = "\"date\": \"{}\",\"tv\": {},\"bluray\": {},\"appleTv\": {},\"ipTv\":  {}"
        json = tmp.format(parsedPacket.date.isoformat(), parsedPacket.tv,
                          parsedPacket.bluray, parsedPacket.appleTv,
                          parsedPacket.ipTv)

        #self.client.publish("AppliancesBucket","{"+json+"}")
        json = "{" + json + "}"
        self.producer.send("AppliancesBucket", value=json.encode("UTF-8"))
        #self.producer.flush()

    def close(self):
        self.producer.flush()  #Waits until all messages are sent (ACK)
        super().close()
        self.producer.close()
Esempio n. 46
0
class ZKState(object):
    def __init__(self, path, name=None):
        super(ZKState, self).__init__()
        options = {"max_tries": -1, "max_delay": 5, "ignore_expire": True}
        self._zk = KazooClient(hosts=ZK_HOSTS, connection_retry=options)
        try:
            self._zk.start(timeout=3600)
        except:
            print(traceback.format_exc(), flush=True)
        self._path = path
        self._name = "" if name is None else name + "."
        self._zk.ensure_path(path)

    def processed(self):
        return self._zk.exists(self._path + "/" + self._name + "complete")

    def process_start(self):
        if self.processed():
            return False
        try:
            self._zk.create(self._path + "/" + self._name + "processing",
                            ephemeral=True)
            return True
        except NodeExistsError:  # another process wins
            return False

    def process_end(self):
        try:
            self._zk.create(self._path + "/" + self._name + "complete")
        except NodeExistsError:
            pass

    def process_abort(self):
        # the ephemeral node will be deleted upon close
        pass

    def close(self):
        self._zk.stop()
        self._zk.close()
def job_submitter_main():
    try:
        zk = init()
        cpool = ConnectionPool(host='localhost', port=6379, db=0)
        r = Redis(connection_pool=cpool)
        added = 0
        tried = 0
        max_add_try = 5000
        jobname = uuid.uuid4().hex
        added_nums = set()

        while tried < max_add_try:
            value = randint(5000, 90000000)
            tried += 1
            if value not in added_nums:
                added_nums.add(value)
            else:
                continue

            while True:
                try:
                    r.lpush(jobname, value)
                    added += 1
                    break
                except Exception as e:
                    sleep(1)
                    print "Lpush ", jobname, e

        zk = KazooClient(hosts='127.0.0.1:2181')
        zk.add_listener(state_listener)
        zk.start()
        value = SUBMITTED + "=" + str(added)
        zk.create('/jobs/' + jobname, value=value)
        zk.stop()

    except Exception as e:
        print 'Big problem in submitting job ', e
        sys.exit(1)
    print 'Job submitted ' + jobname
Esempio n. 48
0
def main():
    records = user_input()

    for arguments in records:
        print arguments['replicas']
        print arguments['zookeeper']

        if arguments['debug']:
            logging.getLogger().setLevel(logging.DEBUG)

        logging.info(
            '%scmd started with Python %s and User %s',
            os.path.splitext(os.path.basename(sys.argv[0]))[0].upper(),
            platform.python_version(), getpass.getuser())

        number_svc_replicas = arguments['replicas']
        group_name = "barrier_group"
        zk_path = "/" + group_name + "/barrier/g" + str(number_svc_replicas)
        logger = init_logger()

        print("Max elements in the group " + (str(number_svc_replicas)))

        zk = KazooClient(hosts=arguments['zookeeper'])
        zkb = DoubleBarrier(zk, zk_path, number_svc_replicas)

        print("Starting the client")
        zk.start()
        print("Entering in the barrier and waiting for the group")
        zkb.enter()

        # Note, if you disable the leaving code the clients will no longer
        # wait in the barrier after the first time the group leaves it.
        # This can be useful sometimes.
        print("leaving the barrier")
        zkb.leave()

        # finish
        zk.stop()
        print("bye")
Esempio n. 49
0
def _get_topology():
	topology = CLUSTERS.get()
	clusters = []
	for cluster in topology:
		zk = KazooClient(hosts=CLUSTERS[cluster].ZK_HOST_PORTS.get())
		zk.add_listener(my_listener)
		zk.start()
		brokers = _get_brokers(zk,cluster)
		consumer_groups = _get_consumer_groups(zk,cluster)
		consumer_groups_status = {} # 0 = offline, (not 0) =  online
		for consumer_group in consumer_groups:
			consumers_path = CLUSTERS[cluster].CONSUMERS_PATH.get() + "/" + consumer_group + "/ids"
			try:
				consumers = zk.get_children(consumers_path)
			except NoNodeError:
				consumer_groups_status[consumer_group]=0 # 0 = offline
			else:
				consumer_groups_status[consumer_group]=len(consumers) # (not 0) =  online
		c = {'cluster':get_cluster_or_404(id=cluster),'brokers':brokers,'consumer_groups':consumer_groups,'consumer_groups_status':consumer_groups_status}
		clusters.append(c)
		zk.stop()
	return clusters
Esempio n. 50
0
class Cli(object):

    def __init__(self, config=None):
        # TODO if chroot path not exists, this will occur exception
        self.config = config
        self.client = KazooClient(hosts=config.hosts)
        self._chroot = self.client.chroot if self.client.chroot else '/'
        self._abs_chroot = self._chroot

    def connect(self):
        if self.client:
            self.client.start()
            log.info('Zookeeper connected')

    def quit(self):
        if self.client:
            self.client.stop()

    def exists(self, path):
        try:
            return self.client.exists(path) is not None
        except Exception as e:
            return False

    @staticmethod
    def check_hosts(hosts):
        if '/' not in hosts:
            return hosts, None
        else:
            return hosts.split('/')

    @property
    def chroot(self):
        return self._chroot

    @chroot.setter
    def chroot(self, chroot):
        self._chroot = chroot
        self.client.chroot = chroot
Esempio n. 51
0
def topic_exists(topic_name, zookeepers):

    # start zookeeper client, get topic list and stop client
    logger.info('checking if topic %s exists', topic_name)
    logger.debug('trying to connect to zookeeper with zookeeper list %s',
                 zookeepers)
    try:
        zk = KazooClient(hosts=zookeepers)
        zk.start()
        topics = zk.get_children('/brokers/topics')
        logger.debug('topics found: %s', ','.join(topics))
        zk.stop()
    except Exception as e:
        logger.exception("exception getting info from zookeeper")

    # check for topic in list
    if topic_name in topics:
        logger.info('topic %s is found', topic_name)
        return True
    else:
        logger.info('topic %s not found', topic_name)
        return False
Esempio n. 52
0
def test_lock_timeout():
    zk = KazooClient(hosts="127.0.0.1:2181")
    zk.start()
    lock = zk.Lock("/zha-lock", "test")
    lock.acquire()

    obj = type('', (), {})
    obj.flg = False

    def _oa():
        obj.flg = True
        return 0

    config = skelton.Config()
    config.check_health = lambda: 3
    config.become_active = _oa
    z = zha.ZHA(config)
    trigger_zha(z)
    assert obj.flg is False
    lock.release()
    zk.stop()
    time.sleep(10)
Esempio n. 53
0
    def __init__(self, service_name, instance_name, framework_id,
                 system_paasta_config):
        super().__init__(service_name, instance_name, framework_id,
                         system_paasta_config)
        self.zk_hosts = system_paasta_config.get_zk_hosts()

        # For some reason, I could not get the code suggested by this SO post to work to ensure_path on the chroot.
        # https://stackoverflow.com/a/32785625/25327
        # Plus, it just felt dirty to modify instance attributes of a running connection, especially given that
        # KazooClient.set_hosts() doesn't allow you to change the chroot. Must be for a good reason.

        chroot = f"task_store/{service_name}/{instance_name}/{framework_id}"

        temp_zk_client = KazooClient(hosts=self.zk_hosts)
        temp_zk_client.start()
        temp_zk_client.ensure_path(chroot)
        temp_zk_client.stop()
        temp_zk_client.close()

        self.zk_client = KazooClient(hosts=f"{self.zk_hosts}/{chroot}")
        self.zk_client.start()
        self.zk_client.ensure_path("/")
    def from_task(self, task, sandbox):
        data = json.loads(task.data)
        cluster_name, host, port, zk_url = data['cluster'], data['host'], data[
            'port'], data['zk_url']
        _, servers, path = parse(zk_url)
        kazoo = KazooClient(servers)
        kazoo.start()
        self_instance = ServiceInstance(Endpoint(host, port))

        try:
            task_control = self._task_control_provider.from_task(task, sandbox)
            installer = self._installer_provider.from_task(task, sandbox)
            backup_store = self._backup_store_provider.from_task(task, sandbox)
        except (TaskControl.Error, PackageInstaller.Error) as e:
            kazoo.stop()  # Kazoo needs to be cleaned up. See kazoo/issues/217.
            raise TaskError(e.message)

        state_manager = StateManager(sandbox, backup_store)

        return MysosTaskRunner(self_instance, kazoo,
                               get_cluster_path(path, cluster_name), installer,
                               task_control, state_manager)
Esempio n. 55
0
class ZKState(object):
    def __init__(self, path, name=None, timeout=30):
        super(ZKState, self).__init__()
        self._zk = KazooClient(hosts=ZK_HOSTS, timeout=timeout)
        self._zk.start(timeout=timeout)
        self._path = path
        self._name = ""
        if name != None:
            self._name = name + "."
        self._zk.ensure_path(path)

    def processed(self):
        return self._zk.exists(self._path + "/" + self._name + "complete")

    def process_start(self):
        if self.processed(): return False
        if self._zk.exists(self._path + "/" + self._name + "processing"):
            return False
        try:
            self._zk.create(self._path + "/" + self._name + "processing",
                            ephemeral=True)
            return True
        except NodeExistsError:  # another process wins
            return False

    def process_end(self):
        self._zk.create(self._path + "/" + self._name + "complete")
        self._zk.delete(self._path + "/" + self._name + "processing")

    def process_abort(self):
        try:
            self._zk.delete(self._path + "/" + self._name + "processing")
        except NoNodeError:
            pass

    def close(self):
        self._zk.stop()
        self._zk.close()
Esempio n. 56
0
    def __zk_detect(self, zk_url, prefix='/mesos'):
        '''
        Try to get master url info from zookeeper

        :param zk_url: ip/port to reach zookeeper
        :type zk_url: str
        :param prefix: prefix to search for in zookeeper
        :type prefix: str
        '''
        mesos_master = None
        mesos_prefix = prefix
        if not prefix.startswith('/'):
            mesos_prefix = '/' + prefix
        zk = KazooClient(zk_url)
        zk.start()
        childs = zk.get_children(mesos_prefix)
        for child in childs:
            if not child.startswith('json'):
                continue
            (data, zk_mesos) = zk.get('/mesos/' + child)
            if sys.version_info.major == 3:
                data = data.decode("utf-8")
            master_info = json.loads(data)
            if 'pid' in master_info and master_info['pid'].startswith(
                    'master@'):
                mesos_master = master_info['pid'].replace('master@', '')

                try:
                    requests.get('http://' + mesos_master)
                    mesos_master = 'http://' + mesos_master
                # If we get connection closed, assume we are in strict mode and set https protocol
                except ConnectionError:
                    mesos_master = 'https://' + mesos_master

                break
        zk.stop()
        self.logger.debug('Zookeeper mesos master: %s' % (str(mesos_master)))
        return mesos_master
def get_zookeeper_topics_partitions(zoo_list):
    """
    Get topics partitions from zookeeper

    Args:
        zoo_list (str): A comma separated list of zookeeper hosts and ports, e.g. zoo1:2181,zoo2:2181,zoo3:2181
    Returns:
        list: A list that each entry represents a topic with its partitions
    """

    topics_partitions = []

    zk = KazooClient(hosts=zoo_list)
    zk.start()
    for zoo_kafka_topic in zk.get_children("/brokers/topics"):
        zoo_kafka_topic_path = TOPICS_PATH + zoo_kafka_topic
        partitions = json.loads(
            (zk.get(zoo_kafka_topic_path)[0]).decode('utf-8'))['partitions']
        topic = {"topic_name": zoo_kafka_topic, "partitions": partitions}

        topics_partitions.append(topic)
    zk.stop()
    return topics_partitions
Esempio n. 58
0
class PyZooConn(object):
    # init function include connection method
    def __init__(self):
        self.zk = KazooClient(hosts='10.15.107.110:2181')
        self.zk.start()

        # get node data

    def get_data(self, param):
        result = self.zk.get(param)
        print(result)

        # create a node and input a value in this node

    def create_node(self, node, value):
        self.zk.create(node, value)

        # close the connection

    def close(self):
        self.zk.stop()

    ''''' 
class MidKazooClient(object):
    def __init__(self):
        self.zk_client = KazooClient(
            hosts=config.config["kazoo"]["KAZOO_HOST"])

    def kazoo_info_get(self):
        self.zk_client.start()
        self.nodes = self.zk_client.get_children(
            config.config["kazoo"]["KAZOO_ROOT"])
        for node in self.nodes:
            self.user_node = self.zk_client.get_children(
                config.config["kazoo"]["USER_NODE"].format(node))
            for nn in self.user_node:
                print(nn)
                print(config.config["kazoo"]["KAZOO_NODE"].format(node, nn))
                print(
                    str(self.zk_client.get(
                        config.config["kazoo"]["KAZOO_NODE"].format(node,
                                                                    nn))[0],
                        encoding="utf-8").replace("null", ""))
                # print(nn)

        self.zk_client.stop()
Esempio n. 60
0
    def reader(self, j):
        l.info("Thread-%s" % j)
        zk = KazooClient(hosts=self.zk_server_ip)
        zk.start()
        while True:
            if self.run_data['test_action'] == 'stopstress':
                l.info("Stopping thread-%s" % j)
                zk.stop()
                break
            else:
                #				conn_start = time.time()*1000
                #				zk.start()
                #				conn_end = time.time()*1000
                #				conn_diff = conn_end - conn_start
                #				l.info("thread-%s=%s" %(j,conn_diff))
                for i in range(1000):
                    #					read_start = time.time()*1000
                    #					time.sleep(0.1)
                    data, stat = zk.get("/Hydra/Test/test-0")
#					read_end = time.time()*1000 - read_start
#					l.info(read_end)
#				zk.stop()
        return 'ok', None