Esempio n. 1
0
def main():
  """ Main function which initializes and starts the tornado server. """
  # Parse command line arguments
  parser = argparse.ArgumentParser(description='A taskqueue API server')
  parser.add_argument('--port', '-p', default='17447',
                      help='TaskQueue server port')
  parser.add_argument('--verbose', action='store_true',
                      help='Output debug-level logging')
  args = parser.parse_args()
  if args.verbose:
    logger.setLevel(logging.DEBUG)

  # Configure zookeeper and db access
  zk_client = KazooClient(
    hosts=','.join(appscale_info.get_zk_node_ips()),
    connection_retry=ZK_PERSISTENT_RECONNECTS)
  zk_client.start()
  db_access = DatastoreProxy()

  # Initialize tornado server
  task_queue = distributed_tq.DistributedTaskQueue(db_access, zk_client)
  tq_application = prepare_taskqueue_application(task_queue)
  # Automatically decompress incoming requests.
  server = httpserver.HTTPServer(tq_application, decompress_request=True)
  server.listen(args.port)

  # Make sure taskqueue shuts down gracefully when signal is received
  graceful_shutdown = prepare_graceful_shutdown(zk_client, server)
  signal.signal(signal.SIGTERM, graceful_shutdown)
  signal.signal(signal.SIGINT, graceful_shutdown)

  logger.info('Starting TaskQueue server on port {}'.format(args.port))
  ioloop.IOLoop.current().start()
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(description = DESCRIPTION)

    parser.add_argument("hosts", metavar = "<zookeeper-endpoint>", type = str,
        nargs = "+", help = "Zookeeper node endpoints to connect to")
    parser.add_argument("--timeout", dest = "timeout", action = "store", type = int,
        default = 30, help = "Zookeeper connection timeout")

    option = parser.parse_args()

    logging.debug("Using %s as a Zookeeper connection string" % option.hosts)

    client = KazooClient(hosts = ",".join(option.hosts))

    try:
        client.start(timeout = option.timeout)
    except TimeoutError as e:
        logging.error("Timed out while connecting to Zookeeper")
        return 1

    status = bootstrap(client, str(uuid.uuid4()))

    # If the client is not stopped, it will hang forever maintaining the connection.
    client.stop()

    return status
Esempio n. 3
0
 def _open(self):
     conninfo = self.connection.client
     self.vhost = os.path.join('/', conninfo.virtual_host[0:-1])
     hosts = []
     if conninfo.alt:
         for host_port in conninfo.alt:
             if host_port.startswith('zookeeper://'):
                 host_port = host_port[len('zookeeper://'):]
             if not host_port:
                 continue
             try:
                 host, port = host_port.split(':', 1)
                 host_port = (host, int(port))
             except ValueError:
                 if host_port == conninfo.hostname:
                     host_port = (host_port, conninfo.port or DEFAULT_PORT)
                 else:
                     host_port = (host_port, DEFAULT_PORT)
             hosts.append(host_port)
     host_port = (conninfo.hostname, conninfo.port or DEFAULT_PORT)
     if host_port not in hosts:
         hosts.insert(0, host_port)
     conn_str = ','.join(['%s:%s' % (h, p) for h, p in hosts])
     conn = KazooClient(conn_str)
     conn.start()
     return conn
Esempio n. 4
0
    def _start(self, err_msg, spawn_monit=False):
        if self._is_destroyed:
            return

        self._client = None
        # Increase the session timeout from 10 to 25 seconds.
        try:
            host_list = self.zk_hosts
            client = KazooClient(
                hosts=",".join(host_list),
                timeout=self._get_session_timeout(),
                max_retries=3,
                handler=SequentialGeventHandler())

            # Increase the start timeout to 20 seconds from 15 seconds.
            # Guard this with explicit gevent timeout to protect us from
            # some corner cases where starting client failed to respect
            # start timeout passed in below.
            with gevent.Timeout(seconds=self._get_start_timeout() + 5):
                client.start(timeout=self._get_start_timeout())
            client.ensure_path("/")
            self._last_success_health_check_ts = time.time()
            log.info("Successfully started kazoo client.")
            self._client = client
        except (Exception, gevent.Timeout):
            self._sc.increment("errors.zk.client.start.failure",
                               tags={'host': hostname},
                               sample_rate=1)
            log.exception(err_msg)
        finally:
            if spawn_monit:
                self._monit_greenlet = gevent.spawn(self._monit)
                gevent.sleep(0)
Esempio n. 5
0
def _get_zk_conn(hosts):
    global ZK_CONNECTION
    if ZK_CONNECTION is None:
        ZK_CONNECTION = KazooClient(hosts=hosts)
        ZK_CONNECTION.start()

    return ZK_CONNECTION
Esempio n. 6
0
 def setup(self):
     zk = KazooClient(hosts=self.addr)
     zk.start()
     self.zk = zk
     cfg = self.app.cfg
     log = cfg.logger_class(cfg)
     self.log = log
Esempio n. 7
0
def start_zoo(cport):
    '''
    Client uses this function to start an instance of zookeeper
    Arguments:
        cport : An unused TCP port for zookeeper to use as the client port
    '''
    basefile = "zookeeper-3.4.5"
    tarfile = os.path.dirname(os.path.abspath(__file__)) + "/" + basefile + ".tar.gz"
    cassbase = "/tmp/zoo." + str(cport) + "/"
    confdir = cassbase + basefile + "/conf/"
    output,_ = call_command_("mkdir " + cassbase)

    logging.info('Installing zookeeper in ' + cassbase + " conf " + confdir)
    os.system("cat " + tarfile + " | tar -xpzf - -C " + cassbase)

    output,_ = call_command_("cp " + confdir + "zoo_sample.cfg " + confdir + "zoo.cfg")

    logging.info('zookeeper Client Port %d' % cport)

    replace_string_(confdir + "zoo.cfg", \
        [("dataDir=/tmp/zookeeper", "dataDir="+cassbase)])

    replace_string_(confdir + "zoo.cfg", \
        [("clientPort=2181", "clientPort="+str(cport))])

    output,_ = call_command_(cassbase + basefile + "/bin/zkServer.sh start")

    zk = KazooClient(hosts='127.0.0.1:'+str(cport))
    zk.start()
    zk.stop()
Esempio n. 8
0
class ZKTestBase(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        utdocker.pull_image(zk_tag)

    def setUp(self):

        utdocker.create_network()
        utdocker.start_container(
            zk_name,
            zk_tag,
            env={
                "ZOO_MY_ID": 1,
                "ZOO_SERVERS": "server.1=0.0.0.0:2888:3888",
            },
            port_bindings={2181: 21811}
        )

        self.zk = KazooClient('127.0.0.1:21811')
        self.zk.start()

        self.zkauthed, _ = zkutil.kazoo_client_ext(
            {'hosts': '127.0.0.1:21811', 'auth': ('digest', 'xp', '123'),
             'acl': (('xp', '123', 'cdrwa'), ('foo', 'bar', 'rw'))})

        dd('start zk-test in docker')

    def tearDown(self):

        self.zk.stop()
        self.zkauthed.stop()
        utdocker.remove_container(zk_name)
Esempio n. 9
0
 def kafka_save(key,content,kafka,pre=''):
     zookeeper = KazooClient()
     zookeeper.start()
     cluster = Cluster(zookeeper)
     topic = cluster.topics['topicname']
     topic.publish('msg')
     pass
Esempio n. 10
0
    def init_connections(self, no_init=False):
        '''
        Sets up the initial Kazoo Client and watches
        '''
        success = False
        self.set_valid(False)

        if not no_init:
            if self.zoo_client:
                self.zoo_client.remove_listener(self.state_listener)
                self.old_data = ''
                self.old_pointed = ''

            while not success:
                try:
                    if self.zoo_client is None:
                        self.zoo_client = KazooClient(hosts=self.hosts)
                        self.zoo_client.start()
                    else:
                        # self.zoo_client.stop()
                        self.zoo_client._connection.connection_stopped.set()
                        self.zoo_client.close()
                        self.zoo_client = KazooClient(hosts=self.hosts)
                        self.zoo_client.start()
                except Exception as e:
                    log.error("ZKWatcher Exception: " + e.message)
                    sleep(1)
                    continue

                self.setup()
                success = self.update_file(self.my_file)
                sleep(5)
        else:
            self.setup()
            self.update_file(self.my_file)
Esempio n. 11
0
def resolve_master(
      cluster_url, master_callback=lambda: True, termination_callback=lambda: True, zk_client=None):
  """
    Resolve the MySQL cluster master's endpoint from the given URL for this cluster.
    :param cluster_url: The ZooKeeper URL for this cluster.
    :param master_callback: A callback method with one argument: the ServiceInstance for the elected
                            master.
    :param termination_callback: A callback method with no argument. Invoked when the cluster
                                 terminates.
    :param zk_client: Use a custom ZK client instead of Kazoo if specified.
  """
  try:
    _, zk_servers, cluster_path = zookeeper.parse(cluster_url)
  except Exception as e:
    raise ValueError("Invalid cluster_url: %s" % e.message)

  if not zk_client:
    zk_client = KazooClient(zk_servers)
    zk_client.start()

  listener = ClusterListener(
      zk_client,
      cluster_path,
      None,
      master_callback=master_callback,
      termination_callback=termination_callback)
  listener.start()
Esempio n. 12
0
    def expire_session(self, client_id=None):
        """Force ZK to expire a client session

        :param client_id: id of client to expire. If unspecified, the id of
                          self.client will be used.

        """
        client_id = client_id or self.client.client_id

        lost = threading.Event()
        safe = threading.Event()

        def watch_loss(state):
            if state == KazooState.LOST:
                lost.set()
            if lost.is_set() and state == KazooState.CONNECTED:
                safe.set()
                return True

        self.client.add_listener(watch_loss)

        # Sometimes we have to do this a few times
        attempts = 0
        while attempts < 5 and not lost.is_set():
            client = KazooClient(self.hosts, client_id=client_id, timeout=0.8)
            client.start()
            client.stop()
            lost.wait(5)
            attempts += 1
        # Wait for the reconnect now
        safe.wait(15)
        self.client.retry(self.client.get_async, '/')
Esempio n. 13
0
  class ZookeeperMasterDetector(FutureMasterDetector):
    @classmethod
    def from_uri(cls, uri):
      url = urlparse(uri)
      if url.scheme.lower() != 'zk':
        raise self.InvalidUrl('ZookeeperMasterDetector got invalid ensemble URI %s' % uri)
      return cls(url.netloc, url.path)

    def __init__(self, ensemble, path):
      super(ZookeeperMasterDetector, self).__init__()

      self._kazoo_client = KazooClient(ensemble)
      self._kazoo_client.start_async()

      self._group = MesosKazooGroup(self._kazoo_client, path)
      self._group.monitor(callback=self.on_change)

    def on_change(self, membership):
      if membership:
        leader = sorted(membership)[0]
        self._group.info(leader, callback=self.on_appointment)
      self._group.monitor(membership, callback=self.on_change)

    def on_appointment(self, master_data):
      master_info = MasterInfo()
      master_info.MergeFromString(master_data)
      self.appoint(master_info_to_pid(master_info))
  def from_task(self, task, sandbox):
    data = json.loads(task.data)
    cluster_name, host, port, zk_url = data['cluster'], data['host'], data['port'], data['zk_url']
    _, servers, path = parse(zk_url)
    kazoo = KazooClient(servers)
    kazoo.start()
    self_instance = ServiceInstance(Endpoint(host, port))

    try:
      task_control = self._task_control_provider.from_task(task, sandbox)
      installer = self._installer_provider.from_task(task, sandbox)
      backup_store = self._backup_store_provider.from_task(task, sandbox)
    except (TaskControl.Error, PackageInstaller.Error) as e:
      kazoo.stop()  # Kazoo needs to be cleaned up. See kazoo/issues/217.
      raise TaskError(e.message)

    state_manager = StateManager(sandbox, backup_store)

    return MysosTaskRunner(
        self_instance,
        kazoo,
        get_cluster_path(path, cluster_name),
        installer,
        task_control,
        state_manager)
Esempio n. 15
0
def main():
  """ Starts the groomer. """
  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

  parser = argparse.ArgumentParser()
  parser.add_argument('-v', '--verbose', action='store_true',
                      help='Output debug-level logging')
  args = parser.parse_args()

  if args.verbose:
    logger.setLevel(logging.DEBUG)

  zk_hosts = appscale_info.get_zk_node_ips()
  zk_client = KazooClient(hosts=','.join(zk_hosts),
                          connection_retry=ZK_PERSISTENT_RECONNECTS,
                          command_retry=KazooRetry(max_tries=-1))
  zk_client.start()

  db_access = DatastoreProxy()

  thread_pool = ThreadPoolExecutor(4)

  TransactionGroomer(zk_client, db_access, thread_pool)
  logger.info('Starting transaction groomer')

  IOLoop.current().start()
Esempio n. 16
0
def get_zoo_client(cluster_name="qconf"):
    """get zoo client by cluster_name
    """
    global ZOO_CLIENTS

    if cluster_name not in ZOO_CLIENTS:
        # get zookeeper hosts info
        zookeeper = ZdZookeeper.one(cluster_name=cluster_name, deleted="0")
        if not zookeeper:
            raise ZookeeperConfError("Zookeeper not configured for cluster: {}!".format(cluster_name))
        # connect to zookeeper
        try:
            client = KazooClient(hosts=zookeeper.hosts,
                                 connection_retry={"max_tries": 3, "backoff": 2})
            client.start(3)
            ZOO_CLIENTS[cluster_name] = client
        except KazooTimeoutError as exc:
            log.error('Failed to connnect zookeeper, %s', str(exc))
            return

    # check connection's state, if not connected, reconect
    zoo_client = ZOO_CLIENTS[cluster_name]
    if not zoo_client.connected:
        zoo_client.restart()
    return zoo_client
Esempio n. 17
0
    def __setstate__(self, state):
        hosts = state.pop('client')
        client = KazooClient(hosts)
        client.start()

        self.__dict__ = state
        self.client = client
Esempio n. 18
0
def mkfs(args=None):
    args = mkfs_parser.parse_args(args)

    # Log verbosity
    verbosity = args.verbose - args.quiet
    log_level = logging.WARN - verbosity*10

    logging.basicConfig(level=log_level)
    logging.getLogger('kazoo.client').setLevel(log_level + 20)

    # ZK Path of filesystem root
    zk_root = posixpath.join(FILESYSTEMS, args.name)

    # Zookeeper
    if len(args.servers):
        zk_hosts = ','.join(args.servers)
    else:
        zk_hosts = '127.0.0.1:2181'
    zk = KazooClient(hosts=zk_hosts)

    zk.start()

    # Run
    ClowderFS.mkfs(zk=zk, fs_root=zk_root, chunk_size=args.chunk_size)

    # Cleanup
    zk.stop()
Esempio n. 19
0
def init_hierarchy(hosts, hierarchy, users, auth):

    zkcli = KazooClient(hosts)
    zkcli.start()

    scheme, name, passw = auth
    zkcli.add_auth(scheme, name + ':' + passw)

    def _init_hierarchy(hierarchy, parent_path):

        if len(hierarchy) == 0:
            return

        for node, attr_children in hierarchy.items():
            val = attr_children.get('__val__', {})
            val = utfjson.dump(val)
            acl = attr_children.get('__acl__')

            path = _init_node(zkcli, parent_path, node, val, acl, users)
            children = {k: v
                        for k, v in attr_children.items()
                        if k not in ('__val__', '__acl__')
                        }

            _init_hierarchy(children, path)

    _init_hierarchy(hierarchy, '/')
    close_zk(zkcli)
Esempio n. 20
0
def main():
  global datastore_path
  global deployment_config

  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

  parser = argparse.ArgumentParser()
  parser.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,
                      required=True, help="The blobstore server's port")
  parser.add_argument('-d', '--datastore-path', required=True,
                      help='The location of the datastore server')
  args = parser.parse_args()

  datastore_path = args.datastore_path
  zk_ips = appscale_info.get_zk_node_ips()
  zk_client = KazooClient(hosts=','.join(zk_ips))
  zk_client.start()
  deployment_config = DeploymentConfig(zk_client)
  setup_env()

  http_server = tornado.httpserver.HTTPServer(
    Application(), max_buffer_size=MAX_REQUEST_BUFF_SIZE, xheaders=True)

  http_server.listen(args.port)

  # Make sure this server is accessible from each of the load balancers.
  secret = appscale_info.get_secret()
  for load_balancer in appscale_info.get_load_balancer_ips():
    acc = AppControllerClient(load_balancer, secret)
    acc.add_routing_for_blob_server()

  logger.info('Starting BlobServer on {}'.format(args.port))
  tornado.ioloop.IOLoop.instance().start()
Esempio n. 21
0
def chunk(args=None):
    args = chunk_parser.parse_args(args)

    # Log verbosity
    verbosity = args.verbose - args.quiet
    if args.debug:
        log_level = logging.DEBUG - verbosity*10
    else:
        log_level = logging.WARN - verbosity*10

    logging.basicConfig(level=log_level)
    logging.getLogger('kazoo.client').setLevel(log_level + 20)

    # Zookeeper servers
    if len(args.servers):
        zk_hosts = ','.join(args.servers)
    else:
        zk_hosts = '127.0.0.1:2181'

    # Zookeeper client
    zk = KazooClient(hosts=zk_hosts)

    zk.start()

    # ChunkServer
    cs = HTTPChunkServer(zk=zk, addr=(args.host,args.port), cache_path=args.chunk_cache, hash_data=args.hash_data)
    cs.run()

    # Cleanup
    zk.stop()
Esempio n. 22
0
def _get_json_type(request, cluster_id, type):
	data = []
	error_brokers = 0
	try:	
		cluster = get_cluster_or_404(id=cluster_id)

		zk = KazooClient(hosts=cluster['zk_host_ports'])
		zk.start()

		if type == "broker":			
			brokers, error_brokers = _get_brokers(zk,cluster_id)
			for broker in brokers:
				data.append(broker['host'])
		if type == "topic":
			topics, error_zk_topics = _get_topics(cluster)
			for topic in topics:
				data.append(topic['id'])
		if type == "metric":
			data = _get_sections_ini()
	except KazooException:
		error_zk_brokers = 1

	zk.stop()

	return JsonResponse(data, safe=False)
Esempio n. 23
0
def main():
    """ A server that handles API requests from runtime instances. """
    logging.basicConfig(format=LOG_FORMAT)

    parser = argparse.ArgumentParser()
    parser.add_argument('--port', type=int, required=True,
                        help='The port to serve requests from')
    parser.add_argument('--project-id', required=True,
                        help='The project to handle requests for')
    parser.add_argument('--zookeeper-locations', required=True, nargs='+',
                        help='A list of ZooKeeper locations')
    args = parser.parse_args()

    pidfile_location = os.path.join(
        VAR_DIR, 'api-server_{}-{}.pid'.format(args.project_id, args.port))
    with open(pidfile_location, 'w') as pidfile:
        pidfile.write(str(os.getpid()))

    logging.getLogger('appscale').setLevel(logging.INFO)

    zk_client = KazooClient(hosts=','.join(args.zookeeper_locations),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()

    service_map = {
        'app_identity_service': AppIdentityService(args.project_id, zk_client)
    }

    app = web.Application([
        ('/', MainHandler, {'service_map': service_map})
    ])
    logger.info('Starting API server for {} on {}'.format(args.project_id,
                                                          args.port))
    app.listen(args.port)
    IOLoop.current().start()
Esempio n. 24
0
class GetInfo:
    def __init__(self):
        self.all_info = {}
        #self.SERVER_IP_AND_PORT = "127.0.0.1:2181"
        self.SERVER_IP_AND_PORT = "172.18.229.251:2181"
        self.zk = None
    
    def start_zk(self):
        self.zk = KazooClient(hosts=self.SERVER_IP_AND_PORT)
        self.zk.start();
    
    def getInfo(self):
        children = self.zk.get_children("/monitorData")
        node_nums = len(children)
        for i in range(node_nums):
            data, stat = self.zk.get("/monitorData/" + str(children[i]))
            #data2, stat2 = self.zk.get("/monitorDataJustOneTime/" + str(children[i]))
            #print json.loads(data2.decode("utf-8"))
            #print data2
            #data3, stat3 = self.zk.get("/monitorDataProcessInfo/" + str(children[i]))
            #print data3
            #print str(children[i])
            #print json.loads(data.decode("utf-8"))
            self.all_info[children[i]] = json.loads(data.decode("utf-8"))
            for key in self.all_info[children[i]].keys():
                print key
                print self.all_info[children[i]][key]
            #print self.all_info
        return self.all_info
Esempio n. 25
0
def start():
  global zk

  zk = KazooClient()

  if shell.config['barrier'] is True:
    path_barrier = '/'+shell.config['identity']+'/barrier'
    value_barrier = json.dumps({'NodeId':shell.config['nodeid']}, encoding='utf-8')

    @zk.DataWatch(path_barrier)
    def watch_node(data, stat, event):
      global flag
      if event:
        logging.info("Node Event %s %s, data %s" %(event.path, event.type, data))
        if event.type == EventType.DELETED:
          flag[0] = True
          if flag[1]:
            zk.handler.spawn(create_ephemeral)
          else:
            flag[1] = True


  zk.add_listener(my_listener)

  try:
    zk.start()
  except Exception as e:
    logging.error(e)
    sys.exit(1)
def connect_to_zk():
    zookeeper_connect_string = os.getenv('ZOOKEEPER_CONN_STRING')
    zk = KazooClient(hosts=zookeeper_connect_string)
    zk.start()
    zk.add_listener(state_listener)
    logging.info("connected to Zookeeper")
    return zk
Esempio n. 27
0
class ZookeeperClient(object):
    def __init__(self, server_list):
        self._retry = KazooRetry(max_tries=None, max_delay=300,
                                 sleep_func=gevent.sleep)
        self._zk_client = KazooClient(
            hosts=','.join(server_list),
            timeout=400,
            handler=SequentialGeventHandler(),
            logger=logger,
            connection_retry=self._retry,
            command_retry=self._retry)

    def connect(self):
        self._zk_client.start()

    def disconnect(self):
        self._zk_client.stop()
        self._zk_client.close()

    def create_node(self, path, value=None):
        if value is None:
            value = uuid.uuid4()
        try:
            self._zk_client.create(path, str(value), makepath=True)
        except NodeExistsError:
            self._zk_client.set(path, str(value))
Esempio n. 28
0
def do_zookeeper_update(addr, path, value):
	print(path)
	zk = KazooClient(addr)
	zk.start()
	zk.set(path, bytes(value, 'utf-8'))

	do_zookeeper_read(addr, path)
Esempio n. 29
0
def main():
   # Read configuration.
   options = yaml.load(open("config.yaml"))
   
   # Connect to the ZooKeeper cluster.
   zookeeper = KazooClient(
     hosts = ','.join(options["zookeeper"])
   )
   
   zookeeper.start()
   
   # Connect to SES.
   email_connection = boto.ses.connect_to_region("us-east-1")
   
   try:
      update_number = 0
      
      # Sends an update to me every hour.
      while True:
         trigger_update(zookeeper, email_connection,
           force = (update_number % 24 == 0))
         time.sleep(1 * 3600)
         
         update_number += 1
      
   finally:
         
      # Clean up the connection to ZooKeeper.
      zookeeper.stop()
Esempio n. 30
0
def get_connected_zookeeper_client():
    zookeeper_client = KazooClient('33.33.33.10:2181')

    zookeeper_client
    zookeeper_client.connect()

    return zookeeper_client
Esempio n. 31
0
 def _makeOne(self, *args, **kw):
     from kazoo.client import KazooClient
     return KazooClient(*args, **kw)
Esempio n. 32
0
from kazoo.client import KazooClient
import sys
from kazoo.client import KazooState
import time
import pandas as pd

import logging
logging.basicConfig()

port = sys.argv[1]
zk = KazooClient(hosts=port)

try:
    zk.start()

except:
    raise Exception("Please give valid port number")

n = int(sys.argv[2])
print("\n ")
#print("the max number list is ",n)
if (n > 25):
    print(
        "The maximum vakue of N is 25, so the watcher will diaplay maximum of 25"
    )
    n = 25


@zk.ChildrenWatch("/NN")
def watch_children(children):
    if (len(children) == 0):
Esempio n. 33
0
class RPCServer(asyncore.dispatcher):
    zk_root = "/demo"
    zk_rpc = zk_root + "/rpc"

    def __init__(self, host, port):
        asyncore.dispatcher.__init__(self)
        self.host = host
        self.port = port
        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
        self.set_reuse_addr()
        self.bind((host, port))
        self.listen(1)
        self.child_pids = []
        if self.prefork(10):  # 产生子进程
            self.register_zk()  # 注册服务
            self.register_parent_signal()  # 父进程善后处理
        else:
            self.register_child_signal()  # 子进程善后处理

    def prefork(self, n):
        for i in range(n):
            pid = os.fork()
            if pid < 0:  # fork error
                raise
            if pid > 0:  # parent process
                self.child_pids.append(pid)
                continue
            if pid == 0:
                return False  # child process
        return True

    def register_zk(self):
        self.zk = KazooClient(hosts='127.0.0.1:2181')
        self.zk.start()
        self.zk.ensure_path(self.zk_root)  # 创建根节点
        value = json.dumps({"host": self.host, "port": self.port})
        # 创建服务子节点
        self.zk.create(self.zk_rpc, value, ephemeral=True, sequence=True)

    def exit_parent(self, sig, frame):
        self.zk.stop()  # 关闭 zk 客户端
        self.close()  # 关闭 serversocket
        asyncore.close_all()  # 关闭所有 clientsocket
        pids = []
        for pid in self.child_pids:
            print 'before kill'
            try:
                os.kill(pid, signal.SIGINT)  # 关闭子进程
                pids.append(pid)
            except OSError, ex:
                if ex.args[0] == errno.ECHILD:  # 目标子进程已经提前挂了
                    continue
                raise ex
            print 'after kill', pid
        for pid in pids:
            while True:
                try:
                    os.waitpid(pid, 0)  # 收割目标子进程
                    break
                except OSError, ex:
                    if ex.args[0] == errno.ECHILD:  # 子进程已经割过了
                        break
                    if ex.args[0] != errno.EINTR:
                        raise ex  # 被其它信号打断了,要重试
            print 'wait over', pid
Esempio n. 34
0
from flask import Flask
import redis
import kazoo

app = Flask(__name__)

from kazoo.client import KazooClient
import logging

zk = KazooClient(hosts='127.0.0.1:2181')
zk.start()

REDIS_NODE_PATH = "/soma/caches/redis"
redis_conns = []


def create_redis_connections():
    # TODO
    # Implement code to get redis lists from Zookeeper
    raise Exception("Not Implements")


create_redis_connections()


def get_index(name):
    s = 0
    for i in name:
        s += ord(i)

    return s % len(redis_conns)
Esempio n. 35
0
def _get_address(zkquorum, path, timeout=9, retries=3):
    """Get specific server address.

    Args:
        zkquorum (str):
        path (str):
        timeout (int):
        retries (int):

    Returns:
        tuple: (hostname, port)

    Raises:
        exceptions.TransportError: Failed to connect.
        exceptions.NoSuchZookeeperNodeError: The required node not found.
        exceptions.ZookeeperProtocolError: Invalid response.

    """
    zk_client = KazooClient(hosts=zkquorum)
    try:
        zk_client.start(timeout=timeout)
    except KazooTimeoutError:
        raise exceptions.TransportError(
            'Failed to connect to zookeeper at %s.' % zkquorum)
    response, znodestat = None, None
    for _ in range(retries + 1):
        try:
            response, znodestat = zk_client.get(path)
        except NoNodeError:
            time.sleep(3.0)
            continue
        else:
            break
    if response is None:
        raise exceptions.NoSuchZookeeperNodeError(
            'ZooKeeper does not contain a %s node.' % path)
    zk_client.stop()

    # the message contains at least 5 bytes with the following structure:
    # (1B)(4B)... => (b'\xff')(meta_size)...
    if len(response) < 5:
        raise exceptions.ZookeeperProtocolError(
            'ZooKeeper returned too few response. Response size: %d.' %
            len(response))
    tag, meta_size = struct.unpack('>cI', response[:5])
    if tag != b'\xff':
        raise exceptions.ZookeeperProtocolError(
            'ZooKeeper returned an invalid response. b\'\\xff\' expected, got %s'
            % tag)
    if meta_size <= 0 or meta_size > 65000:
        raise exceptions.ZookeeperProtocolError(
            'ZooKeeper returned an invalid meta size %d.' % meta_size)

    # (meta_size B)(4B)... => (meta)(b'PBUF')...
    magic = struct.unpack('>4s', response[meta_size + 5:meta_size + 9])[0]
    if magic != b'PBUF':
        raise exceptions.ZookeeperProtocolError(
            'ZooKeeper returned an invalid response. b\'PBUF\' expected, got %s.'
            % magic)

    meta = pb.MetaRegionServer()
    try:
        meta.ParseFromString(response[meta_size + 9:])
    except DecodeError:
        raise exceptions.ZookeeperProtocolError(
            'Failed to parse MetaRegionServer from response.')
    return meta.server.host_name, meta.server.port
Esempio n. 36
0
class RPCServer(asyncore.dispatcher):

    zk_root = "/demo"
    zk_rpc = zk_root + "/rpc"

    def __init__(self, host, port):
        asyncore.dispatcher.__init__(self)
        self.host = host
        self.port = port
        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
        self.set_reuse_addr()
        self.bind((host, port))
        self.listen(1)
        self.child_pids = []
        if self.prefork(10):  # 产生子进程
            self.register_zk()  # 注册服务
            self.register_parent_signal()  # 父进程善后处理
        else:
            self.register_child_signal()  # 子进程善后处理

    def prefork(self, n):
        for i in range(n):
            pid = os.fork()
            if pid < 0:  # fork error
                raise
            if pid > 0:  # parent process
                self.child_pids.append(pid)
                continue
            if pid == 0:
                return False  # child process
        return True

    def register_zk(self):
        self.zk = KazooClient(hosts='127.0.0.1:2181')
        self.zk.start()
        self.zk.ensure_path(self.zk_root)  # 创建根节点
        value = json.dumps({"host": self.host, "port": self.port})
        # 创建服务子节点
        self.zk.create(self.zk_rpc,
                       bytes(value, 'utf-8'),
                       ephemeral=True,
                       sequence=True)

    def exit_parent(self, sig, frame):
        self.zk.stop()  # 关闭zk客户端
        self.close()  # 关闭serversocket
        asyncore.close_all()  # 关闭所有clientsocket
        pids = []
        for pid in self.child_pids:
            print('before kill')
            try:
                os.kill(pid, signal.SIGINT)  # 关闭子进程
                pids.append(pid)
            except OSError as ex:
                if ex.args[0] == errno.ECHILD:  # 目标子进程已经提前挂了
                    continue
                raise ex
            print('after kill', pid)
        for pid in pids:
            while True:
                try:
                    os.waitpid(pid, 0)  # 收割目标子进程
                    break
                except OSError as ex:
                    if ex.args[0] == errno.ECHILD:  # 子进程已经割过了
                        break
                    if ex.args[0] != errno.EINTR:
                        raise ex  # 被其它信号打断了,要重试
            print('wait over', pid)

    def reap_child(self, sig, frame):
        print('before reap')
        while True:
            try:
                info = os.waitpid(-1, os.WNOHANG)  # 收割任意子进程
                break
            except OSError as ex:
                if ex.args[0] == errno.ECHILD:
                    return  # 没有子进程可以收割
                if ex.args[0] != errno.EINTR:
                    raise ex  # 被其它信号打断要重试
        pid = info[0]
        try:
            self.child_pids.remove(pid)
        except ValueError:
            pass
        print('after reap', pid)

    def register_parent_signal(self):
        signal.signal(signal.SIGINT, self.exit_parent)
        signal.signal(signal.SIGTERM, self.exit_parent)
        signal.signal(signal.SIGCHLD, self.reap_child)  # 监听子进程退出

    def exit_child(self, sig, frame):
        self.close()  # 关闭serversocket
        asyncore.close_all()  # 关闭所有clientsocket
        print('all closed')

    def register_child_signal(self):
        signal.signal(signal.SIGINT, self.exit_child)
        signal.signal(signal.SIGTERM, self.exit_child)

    def handle_accept(self):
        pair = self.accept()  # 接收新连接
        if pair is not None:
            sock, addr = pair
            RPCHandler(sock, addr)
Esempio n. 37
0
    def test_session_callback_states(self):
        from kazoo.protocol.states import KazooState, KeeperState
        from kazoo.client import KazooClient

        client = KazooClient()
        client._handle = 1
        client._live.set()

        result = client._session_callback(KeeperState.CONNECTED)
        eq_(result, None)

        # Now with stopped
        client._stopped.set()
        result = client._session_callback(KeeperState.CONNECTED)
        eq_(result, None)

        # Test several state transitions
        client._stopped.clear()
        client.start_async = lambda: True
        client._session_callback(KeeperState.CONNECTED)
        eq_(client.state, KazooState.CONNECTED)

        client._session_callback(KeeperState.AUTH_FAILED)
        eq_(client.state, KazooState.LOST)

        client._handle = 1
        client._session_callback(-250)
        eq_(client.state, KazooState.SUSPENDED)
Esempio n. 38
0
# --------------------------------------- ORCHESTRATOR CODE INIT ---------------------------------------

logging.basicConfig(filename='orchestrator.log',
                    format='%(asctime)s => %(levelname)s : %(message)s',
                    level=logging.DEBUG)

print("\n\n-----ORCHESTRATOR RUNNING-----\n\n")
logging.info('Orchestrator running')

ip = "http://0.0.0.0:80"
ride_share = Flask(__name__)
port = 80
host = "0.0.0.0"

zk = KazooClient(hosts="zoo")
zk.start()
logging.info('Zookeeper connection established')

connection = pika.BlockingConnection(
    pika.ConnectionParameters(host='rabbitmq', heartbeat=0))
print("connection:", connection)
channel = connection.channel()
logging.info('RabbitMQ connection established')

# Timer variable for the auto scaling logic
timer = None

# Flag variable to check whether it is the first read db request. This is done so that we can start the timer on the first request
is_first_read_request = True
Esempio n. 39
0
class Subscriber:

    # instantiate variables and connect to broker
    def __init__(self, ip_add, timeout=-1, history=1):
        self.history = int(history)
        self.kill = True
        self.count = 0
        self.full_add = "tcp://" + str(ip_add)
        self.context = zmq.Context()
        self.sock_sub = self.context.socket(zmq.SUB)
        self.sock_sub.RCVTIMEO = timeout

        # PRESIDENT ZNODE ADDRESS
        self.home = "/president/pres"

        self.zk_driver = KazooClient(hosts='127.0.0.1:2181')
        self.zk_driver.start()

        data, stat = self.zk_driver.get(self.home)
        ports = data.decode('ASCII').split(":")
        self.full_add = "tcp://" + str(ip_add) + ":" + ports[1]
        self.sock_sub.connect(self.full_add)

    def register_sub(self, topics):
        topic_list = topics.split(",")
        topic_list = [topic.strip() for topic in topics.split(',')]
        for topic in topic_list:
            #subscribe to topic
            self.sock_sub.setsockopt_string(zmq.SUBSCRIBE, topic)

    def notify(self, stop=None):
        if stop:
            while (not stop.is_set()):
                # only used for measurements.py
                message = self.sock_sub.recv_string()
                topic, info, id = message.split("||")
                # print("Time received: %.20f" % time.time())  # uncomment for measurements.py purposes
                msgs = info.split("...")
                if len(msgs) < self.history:
                    info = "The publisher's history size is less than the requested history size, so no messages will be played."
                else:
                    msgs = msgs[len(msgs) - self.history:len(msgs)]
                    info = ",".join(msgs)
                print("Topic: %s. Message: %s" % (topic, info))
                self.count = self.count + 1
        else:
            while True:

                @self.zk_driver.DataWatch(self.home)
                def watch_node(data, stat, event):
                    if event is not None and event.type == "CREATED" and self.kill:
                        self.kill = False
                        print("Updated Broker!")

                message = self.sock_sub.recv_string()
                topic, info, id = message.split("||")
                msgs = info.split("...")
                #print("Time received: %.20f" % time.time())  # uncomment for measurements purposes
                if len(msgs) < self.history:
                    info = "The topics' history size is less than the requested history size, so no messages will be played."
                else:
                    msgs = msgs[len(msgs) - self.history:len(msgs)]
                    info = ",".join(msgs)
                print("Topic: %s. Message(s): %s" % (topic, info))
                self.count = self.count + 1
Esempio n. 40
0
def get_connection_zk(nodename, timeout=30.0):
    _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout)
    _fake_zk_instance.start()
    return _fake_zk_instance
Esempio n. 41
0
def update_zk_node(zk_node_path: str, zookeeper_ensemble: str, state: FetcherResult):
    zk = KazooClient(hosts=zookeeper_ensemble)
    zk.start()
    zk.set(zk_node_path, state.to_binary())
    zk.stop()
Esempio n. 42
0
def main():
  """ Starts the AdminServer. """
  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

  parser = argparse.ArgumentParser(
    prog='appscale-admin', description='Manages AppScale-related processes')
  subparsers = parser.add_subparsers(dest='command')
  subparsers.required = True

  serve_parser = subparsers.add_parser(
    'serve', description='Starts the server that manages AppScale processes')
  serve_parser.add_argument(
    '-p', '--port', type=int, default=constants.DEFAULT_PORT,
    help='The port to listen on')
  serve_parser.add_argument(
    '-v', '--verbose', action='store_true', help='Output debug-level logging')

  subparsers.add_parser(
    'summary', description='Lists AppScale processes running on this machine')
  restart_parser = subparsers.add_parser(
    'restart',
    description='Restart AppScale processes running on this machine')
  restart_parser.add_argument('service', nargs='+',
                              help='The process or service ID to restart')

  args = parser.parse_args()
  if args.command == 'summary':
    table = sorted(list(get_combined_services().items()))
    print(tabulate(table, headers=['Service', 'State']))
    sys.exit(0)

  if args.command == 'restart':
    socket_path = urlquote(ServiceManagerHandler.SOCKET_PATH, safe='')
    session = requests_unixsocket.Session()
    response = session.post(
      'http+unix://{}/'.format(socket_path),
      data={'command': 'restart', 'arg': [args.service]})
    response.raise_for_status()
    return

  if args.verbose:
    logging.getLogger('appscale').setLevel(logging.DEBUG)

  options.define('secret', appscale_info.get_secret())
  options.define('login_ip', appscale_info.get_login_ip())
  options.define('private_ip', appscale_info.get_private_ip())
  options.define('load_balancers', appscale_info.get_load_balancer_ips())

  acc = appscale_info.get_appcontroller_client()
  ua_client = UAClient(appscale_info.get_db_master_ip(), options.secret)
  zk_client = KazooClient(
    hosts=','.join(appscale_info.get_zk_node_ips()),
    connection_retry=ZK_PERSISTENT_RECONNECTS)
  zk_client.start()
  version_update_lock = zk_client.Lock(constants.VERSION_UPDATE_LOCK_NODE)
  thread_pool = ThreadPoolExecutor(4)
  monit_operator = MonitOperator()
  all_resources = {
    'acc': acc,
    'ua_client': ua_client,
    'zk_client': zk_client,
    'version_update_lock': version_update_lock,
    'thread_pool': thread_pool
  }

  if options.private_ip in appscale_info.get_taskqueue_nodes():
    logger.info('Starting push worker manager')
    GlobalPushWorkerManager(zk_client, monit_operator)

  if options.private_ip in appscale_info.get_load_balancer_ips():
    logger.info('Starting RoutingManager')
    routing_manager = RoutingManager(zk_client)
    routing_manager.start()

  service_manager = ServiceManager(zk_client)
  service_manager.start()

  controller_state = ControllerState(zk_client)

  app = web.Application([
    ('/oauth/token', OAuthHandler, {'ua_client': ua_client}),
    ('/v1/apps/([^/]*)/services/([^/]*)/versions', VersionsHandler,
     {'acc': acc, 'ua_client': ua_client, 'zk_client': zk_client,
      'version_update_lock': version_update_lock, 'thread_pool': thread_pool,
      'controller_state': controller_state}),
    ('/v1/projects', ProjectsHandler, all_resources),
    ('/v1/projects/([a-z0-9-]+)', ProjectHandler, all_resources),
    ('/v1/apps/([^/]*)/services', ServicesHandler,
     {'ua_client': ua_client, 'zk_client': zk_client}),
    ('/v1/apps/([^/]*)/services/([^/]*)', ServiceHandler,
     all_resources),
    ('/v1/apps/([^/]*)/services/([^/]*)/versions/([^/]*)',
     VersionHandler,
     {'acc': acc, 'ua_client': ua_client, 'zk_client': zk_client,
      'version_update_lock': version_update_lock, 'thread_pool': thread_pool,
      'controller_state': controller_state}),
    ('/v1/apps/([^/]*)/operations/([a-z0-9-]+)', OperationsHandler,
     {'ua_client': ua_client}),
    ('/api/cron/update', UpdateCronHandler,
     {'acc': acc, 'zk_client': zk_client, 'ua_client': ua_client}),
    ('/api/datastore/index/add', UpdateIndexesHandler,
     {'zk_client': zk_client, 'ua_client': ua_client}),
    ('/api/queue/update', UpdateQueuesHandler,
     {'zk_client': zk_client, 'ua_client': ua_client})
  ])
  logger.info('Starting AdminServer')
  app.listen(args.port)

  management_app = web.Application([
    ('/', ServiceManagerHandler, {'service_manager': service_manager})])
  management_server = HTTPServer(management_app)
  management_socket = bind_unix_socket(ServiceManagerHandler.SOCKET_PATH)
  management_server.add_socket(management_socket)

  io_loop = IOLoop.current()
  io_loop.start()
Esempio n. 43
0
    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
    parser = init_parser()
    args = parser.parse_args()
    status = {'status': 'inProgress', 'message': 'Starting services'}
    write_to_json_file(status, args.log_postfix)

    db_access = None
    zookeeper = None
    try:
        # Ensure monit is running.
        relevant_ips = set(args.zookeeper) | set(args.database)
        for ip in relevant_ips:
            utils.ssh(ip, args.keyname, 'service monit start')

        start_zookeeper(args.zookeeper, args.keyname)
        conn = KazooClient(hosts=",".join(args.zookeeper))
        conn.start()
        if not conn.exists(ZK_CASSANDRA_CONFIG):
            conn.create(ZK_CASSANDRA_CONFIG,
                        json.dumps({"num_tokens": 256}),
                        makepath=True)
        start_cassandra(args.database, args.db_master, args.keyname,
                        args.zookeeper)
        datastore_upgrade.wait_for_quorum(args.keyname, len(args.database),
                                          args.replication)
        db_access = datastore_upgrade.get_datastore()

        # Exit early if a data layout upgrade is not needed.
        if db_access.valid_data_version():
            status = {
                'status': 'complete',
Esempio n. 44
0
 def get_kazoo_client(self, zoo_instance_name):
     zk = KazooClient(hosts=self.get_instance_ip(zoo_instance_name))
     zk.start()
     return zk
Esempio n. 45
0
class ZooKeeperJobStore(BaseJobStore):
    """
    Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to
    kazoo's `KazooClient
    <http://kazoo.readthedocs.io/en/latest/api/client.html>`_.

    Plugin alias: ``zookeeper``

    :param str path: path to store jobs in
    :param client: a :class:`~kazoo.client.KazooClient` instance to use instead of
        providing connection arguments
    :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
        highest available
    """

    def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False,
                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        super(ZooKeeperJobStore, self).__init__()
        self.pickle_protocol = pickle_protocol
        self.close_connection_on_exit = close_connection_on_exit

        if not path:
            raise ValueError('The "path" parameter must not be empty')

        self.path = path

        if client:
            self.client = maybe_ref(client)
        else:
            self.client = KazooClient(**connect_args)
        self._ensured_path = False

    def _ensure_paths(self):
        if not self._ensured_path:
            self.client.ensure_path(self.path)
        self._ensured_path = True

    def start(self, scheduler, alias):
        super(ZooKeeperJobStore, self).start(scheduler, alias)
        if not self.client.connected:
            self.client.start()

    def lookup_job(self, job_id):
        self._ensure_paths()
        node_path = os.path.join(self.path, job_id)
        try:
            content, _ = self.client.get(node_path)
            doc = pickle.loads(content)
            job = self._reconstitute_job(doc['job_state'])
            return job
        except BaseException:
            return None

    def get_due_jobs(self, now):
        timestamp = datetime_to_utc_timestamp(now)
        jobs = [job_def['job'] for job_def in self._get_jobs()
                if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp]
        return jobs

    def get_next_run_time(self):
        next_runs = [job_def['next_run_time'] for job_def in self._get_jobs()
                     if job_def['next_run_time'] is not None]
        return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None

    def get_all_jobs(self):
        jobs = [job_def['job'] for job_def in self._get_jobs()]
        self._fix_paused_jobs_sorting(jobs)
        return jobs

    def add_job(self, job):
        self._ensure_paths()
        node_path = os.path.join(self.path,  str(job.id))
        value = {
            'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
            'job_state': job.__getstate__()
        }
        data = pickle.dumps(value, self.pickle_protocol)
        try:
            self.client.create(node_path, value=data)
        except NodeExistsError:
            raise ConflictingIdError(job.id)

    def update_job(self, job):
        self._ensure_paths()
        node_path = os.path.join(self.path,  str(job.id))
        changes = {
            'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
            'job_state': job.__getstate__()
        }
        data = pickle.dumps(changes, self.pickle_protocol)
        try:
            self.client.set(node_path, value=data)
        except NoNodeError:
            raise JobLookupError(job.id)

    def remove_job(self, job_id):
        self._ensure_paths()
        node_path = os.path.join(self.path,  str(job_id))
        try:
            self.client.delete(node_path)
        except NoNodeError:
            raise JobLookupError(job_id)

    def remove_all_jobs(self):
        try:
            self.client.delete(self.path, recursive=True)
        except NoNodeError:
            pass
        self._ensured_path = False

    def shutdown(self):
        if self.close_connection_on_exit:
            self.client.stop()
            self.client.close()

    def _reconstitute_job(self, job_state):
        job_state = job_state
        job = Job.__new__(Job)
        job.__setstate__(job_state)
        job._scheduler = self._scheduler
        job._jobstore_alias = self._alias
        return job

    def _get_jobs(self):
        self._ensure_paths()
        jobs = []
        failed_job_ids = []
        all_ids = self.client.get_children(self.path)
        for node_name in all_ids:
            try:
                node_path = os.path.join(self.path, node_name)
                content, _ = self.client.get(node_path)
                doc = pickle.loads(content)
                job_def = {
                    'job_id': node_name,
                    'next_run_time': doc['next_run_time'] if doc['next_run_time'] else None,
                    'job_state': doc['job_state'],
                    'job': self._reconstitute_job(doc['job_state']),
                    'creation_time': _.ctime
                }
                jobs.append(job_def)
            except BaseException:
                self._logger.exception('Unable to restore job "%s" -- removing it' % node_name)
                failed_job_ids.append(node_name)

        # Remove all the jobs we failed to restore
        if failed_job_ids:
            for failed_id in failed_job_ids:
                self.remove_job(failed_id)
        paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
        return sorted(jobs, key=lambda job_def: (job_def['job'].next_run_time or paused_sort_key,
                                                 job_def['creation_time']))

    def __repr__(self):
        self._logger.exception('<%s (client=%s)>' % (self.__class__.__name__, self.client))
        return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
Esempio n. 46
0
import re
import sys
import time
from time import sleep

from kazoo.client import KazooClient

from interpreter import parser, zookeeper_result, get_result_flag, get_result, clear_result

hosts = '172.16.238.2:2181,172.16.238.3:2182,172.16.238.4:2183'
# test_hosts = '127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183'
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
server_name = sys.argv[1]
server_path = "/servers/" + server_name
# 创建一个客户端,可以指定多台zookeeper,
zk = KazooClient(hosts=hosts, logger=logging)
server_num = 0
server_list = []


# 删除断线服务器相关节点
def delete_server_node(offline_server_name):
    zk.delete('/servers/{}'.format(offline_server_name), recursive=True)
    tables = zk.get_children('{}/tables'.format(server_path))
    for table in tables:
        if offline_server_name in zk.get_children('/tables/{}'.format(table)):
            zk.delete('/tables/{}/{}'.format(table, offline_server_name))


# 确定复制到哪台服务器
def copy_server(offline_server_name):
                      "apollo/index-pipelines")
    upgrade_from_json(workspace, session, "index-stages",
                      "apollo/index-stages/instances")
    upgrade_from_json(workspace, session, "query-pipelines",
                      "apollo/query-pipelines")
    upgrade_from_json(workspace, session, "query-stages",
                      "apollo/query-stages/instances")
    upgrade_from_json(workspace, session, "datasources",
                      "apollo/connectors/datasources")


if __name__ == "__main__":
    args = parser.parse_args()

    # Connect to ZK
    zk = KazooClient(hosts=args.zk_connect, read_only=True)
    zk.start()

    ws_name = "fusion_upgrade_2.1"
    action = args.action

    if action == "download":
        print("Downloading pipelines, stages and datasource")
        download_data(ws_name)
    elif action == "upload":
        # Start a new Fusion session
        session = fusion_session(args.fusion_url, args.fusion_username,
                                 args.fusion_password)

        print("Uploading pipelines, stages and datasource payloads")
        upload_data(ws_name, session)
Esempio n. 48
0
    def check(self, instance):
        consumer_groups = self.read_config(instance, 'consumer_groups',
                                           cast=self._validate_consumer_groups)
        zk_connect_str = self.read_config(instance, 'zk_connect_str')
        kafka_host_ports = self.read_config(instance, 'kafka_connect_str')

        zk_prefix = instance.get('zk_prefix', '')
        zk_path_tmpl = zk_prefix + '/consumers/%s/offsets/%s/%s'

        zk_conn = KazooClient(zk_connect_str, timeout=self.zk_timeout)
        zk_conn.start()

        try:
            consumer_offsets = {}
            topics = defaultdict(set)
            for consumer_group, topic_partitions in consumer_groups.iteritems():
                for topic, partitions in topic_partitions.iteritems():
                    topics[topic].update(set(partitions))
                    for partition in partitions:
                        zk_path = zk_path_tmpl % (consumer_group, topic, partition)
                        try:
                            consumer_offset = int(zk_conn.get(zk_path)[0])
                            key = (consumer_group, topic, partition)
                            consumer_offsets[key] = consumer_offset
                        except NoNodeError:
                            self.log.warn('No zookeeper node at %s' % zk_path)
                        except Exception:
                            self.log.exception('Could not read consumer offset from %s' % zk_path)
        finally:
            try:
                zk_conn.stop()
                zk_conn.close()
            except Exception:
                self.log.exception('Error cleaning up Zookeeper connection')

        kafka_conn = KafkaClient(kafka_host_ports, timeout=self.kafka_timeout)

        if kafka_conn:
            self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK)
        else:
            self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.UNKNOWN)
        try:
            broker_offsets = {}
            for topic, partitions in topics.items():
                offset_responses = kafka_conn.send_offset_request([
                    OffsetRequest(topic, p, -1, 1) for p in partitions])

                for resp in offset_responses:
                    broker_offsets[(resp.topic, resp.partition)] = resp.offsets[0]
        finally:
            try:
                kafka_conn.close()
            except Exception:
                self.log.exception('Error cleaning up Kafka connection')

        for (topic, partition), broker_offset in broker_offsets.items():
            broker_tags = ['topic:%s' % topic, 'partition:%s' % partition]
            broker_offset = broker_offsets.get((topic, partition))
            self.gauge('kafka.broker_offset', broker_offset, tags=broker_tags)

        for (consumer_group, topic, partition), consumer_offset in consumer_offsets.items():
            broker_offset = broker_offsets.get((topic, partition))

            tags = ['topic:%s' % topic, 'partition:%s' % partition,
                    'consumer_group:%s' % consumer_group]
            self.gauge('kafka.consumer_offset', consumer_offset, tags=tags)
            self.gauge('kafka.consumer_lag', broker_offset - consumer_offset,
                       tags=tags)
Esempio n. 49
0
from kazoo.client import KazooClient
from kazoo.client import KazooState

logging.basicConfig()


def ms(name):
    client = docker.DockerClient(base_url='tcp://172.31.88.190:2375')
    l = client.containers.list(filters={"name": name})
    k = l[0].top()['Processes'][0][1]
    id = l[0].id
    return int(k), id


zk = KazooClient(hosts='zoo:2181')
zk.start()


def my_listener(state):
    print("HERR", state)
    if state == KazooState.LOST:
        # Register somewhere that the session was lost
        print("LOST")
    elif state == KazooState.SUSPENDED:
        # Handle being disconnected from Zookeeper
        print("SUSPENDED")
    else:
        print("HI")
        # Handle being connected/reconnected to Zookeeper
Esempio n. 50
0
class ZooKeeper(object):
    # Constants used by the REST API:
    LIVE_NODES_ZKNODE = '/live_nodes'
    ALIASES = '/aliases.json'
    CLUSTER_STATE = '/clusterstate.json'
    SHARDS = 'shards'
    REPLICAS = 'replicas'
    STATE = 'state'
    ACTIVE = 'active'
    LEADER = 'leader'
    BASE_URL = 'base_url'
    TRUE = 'true'
    FALSE = 'false'
    COLLECTION = 'collection'

    def __init__(self,
                 zkServerAddress,
                 timeout=15,
                 max_retries=-1,
                 kazoo_client=None):
        if KazooClient is None:
            logging.error(
                'ZooKeeper requires the `kazoo` library to be installed')
            raise RuntimeError

        self.collections = {}
        self.liveNodes = {}
        self.aliases = {}
        self.state = None

        if kazoo_client is None:
            self.zk = KazooClient(zkServerAddress,
                                  read_only=True,
                                  timeout=timeout,
                                  command_retry={'max_tries': max_retries},
                                  connection_retry={'max_tries': max_retries})
        else:
            self.zk = kazoo_client

        self.zk.start()

        def connectionListener(state):
            if state == KazooState.LOST:
                self.state = state
            elif state == KazooState.SUSPENDED:
                self.state = state

        self.zk.add_listener(connectionListener)

        @self.zk.DataWatch(ZooKeeper.CLUSTER_STATE)
        def watchClusterState(data, *args, **kwargs):
            if not data:
                LOG.warning(
                    "No cluster state available: no collections defined?")
            else:
                self.collections = json.loads(data.decode('utf-8'))
                LOG.info('Updated collections: %s', self.collections)

        @self.zk.ChildrenWatch(ZooKeeper.LIVE_NODES_ZKNODE)
        def watchLiveNodes(children):
            self.liveNodes = children
            LOG.info("Updated live nodes: %s", children)

        @self.zk.DataWatch(ZooKeeper.ALIASES)
        def watchAliases(data, stat):
            if data:
                json_data = json.loads(data.decode('utf-8'))
                if ZooKeeper.COLLECTION in json_data:
                    self.aliases = json_data[ZooKeeper.COLLECTION]
                else:
                    LOG.warning('Expected to find %s in alias update %s',
                                ZooKeeper.COLLECTION, json_data.keys())
            else:
                self.aliases = None
            LOG.info("Updated aliases: %s", self.aliases)

    def getHosts(self, collname, only_leader=False, seen_aliases=None):
        if self.aliases and collname in self.aliases:
            return self.getAliasHosts(collname, only_leader, seen_aliases)

        hosts = []
        if collname not in self.collections:
            raise SolrError("Unknown collection: %s", collname)
        collection = self.collections[collname]
        shards = collection[ZooKeeper.SHARDS]
        for shardname in shards.keys():
            shard = shards[shardname]
            if shard[ZooKeeper.STATE] == ZooKeeper.ACTIVE:
                replicas = shard[ZooKeeper.REPLICAS]
                for replicaname in replicas.keys():
                    replica = replicas[replicaname]

                    if replica[ZooKeeper.STATE] == ZooKeeper.ACTIVE:
                        if not only_leader or (replica.get(
                                ZooKeeper.LEADER, None) == ZooKeeper.TRUE):
                            base_url = replica[ZooKeeper.BASE_URL]
                            if base_url not in hosts:
                                hosts.append(base_url)
        return hosts

    def getAliasHosts(self, collname, only_leader, seen_aliases):
        if seen_aliases:
            if collname in seen_aliases:
                LOG.warn("%s in circular alias definition - ignored", collname)
                return []
        else:
            seen_aliases = []
        seen_aliases.append(collname)
        collections = self.aliases[collname].split(",")
        hosts = []
        for collection in collections:
            for host in self.getHosts(collection, only_leader, seen_aliases):
                if host not in hosts:
                    hosts.append(host)
        return hosts

    def getRandomURL(self, collname, only_leader=False):
        hosts = self.getHosts(collname, only_leader=only_leader)
        if not hosts:
            raise SolrError('ZooKeeper returned no active shards!')
        return '%s/%s' % (random.choice(hosts), collname)

    def getLeaderURL(self, collname):
        return self.getRandomURL(collname, only_leader=True)
Esempio n. 51
0
class TestZookeeperMasterDetector(unittest.TestCase):
    def setUp(self):
        self.client = KazooClient('%s:2181' % os.environ['ZOOKEEPER_IP'])
        self.client.start()
        self.root = '/' + uuid.uuid4().hex
        self.client.ensure_path(self.root)
        self.uri = 'zk://%s:2181%s' % (os.environ['ZOOKEEPER_IP'], self.root)

    def tearDown(self):
        self.client.delete(self.root, recursive=True)
        self.client.stop()

    def create_root(self):
        self.client.ensure_path(self.root)

    def unregister_master(self, pid):
        for path in self.client.get_children(self.root):
            full_path = posixpath.join(self.root, path)
            data, _ = self.client.get(full_path)
            master_info = mesos_pb2.MasterInfo()
            master_info.MergeFromString(data)
            if master_info.id == pid.id and master_info.port == pid.port and (
                    socket.inet_ntoa(struct.pack('<L',
                                                 master_info.ip)) == pid.ip):
                self.client.delete(full_path)
                return True

        return False

    def register_master(self, pid):
        master_info = mesos_pb2.MasterInfo(id=pid.id,
                                           ip=struct.unpack(
                                               '<L',
                                               socket.inet_aton(pid.ip))[0],
                                           port=pid.port)
        self.client.create('%s/info_' % self.root,
                           value=master_info.SerializeToString(),
                           ephemeral=True,
                           sequence=True)

    def test_zk_master_detector_creation(self):
        class WrappedZookeeperMasterDetector(ZookeeperMasterDetector):
            def __init__(self, *args, **kw):
                super(WrappedZookeeperMasterDetector,
                      self).__init__(*args, **kw)
                self.changed = threading.Event()

            def on_change(self, membership):
                self.changed.set()
                super(WrappedZookeeperMasterDetector,
                      self).on_change(membership)

        event = threading.Event()
        leader_queue = []

        def appointed_callback(future):
            leader_queue.append(future.result())
            event.set()

        self.create_root()

        # construct master detector and detect master
        detector = WrappedZookeeperMasterDetector.from_uri(self.uri)
        leader_future = detector.detect().add_done_callback(appointed_callback)

        # trigger detection by registering master
        master_pid = PID('10.1.2.3', 12345, 'master(1)')
        self.register_master(master_pid)
        detector.changed.wait(timeout=10)
        assert detector.changed.is_set()
        event.wait(timeout=10)
        assert event.is_set()
        assert leader_queue == [master_pid]
        leader_queue = []
        event.clear()

        # start new detection loop when existing master changes
        leader_future = detector.detect(master_pid).add_done_callback(
            appointed_callback)
        detector.changed.clear()

        # register new master (won't trigger detection until original master is gone.)
        new_master_pid = PID('10.2.3.4', 12345, 'master(1)')
        self.register_master(new_master_pid)
        detector.changed.wait(timeout=10)
        assert detector.changed.is_set()
        detector.changed.clear()
        assert leader_queue == []
        assert not event.is_set()

        # failover existing master
        assert self.unregister_master(master_pid)

        # make sure new master is detected.
        detector.changed.wait(timeout=10)
        assert detector.changed.is_set()
        event.wait(timeout=10)
        assert event.is_set()
        assert leader_queue == [new_master_pid]
Esempio n. 52
0
import flask
import json
import logging
import pika
import pymongo
import subprocess
import threading
import uuid
from time import sleep
from kazoo.client import KazooClient

logging.basicConfig(level=logging.WARNING)

sleep(2) #Wait for zookeeper startup

zk = KazooClient(hosts='zookeeper:2181')
zk.start()

zk.ensure_path('/workers') #all workers create an ephemeral node as a child of this path

zk.create('/scale', b'1') #/scale znode contains number of slaves that need to be running at anytime

inited = False #inited will be set to True once
scaling = False #scaling will be set to True only while autoscaling is happening

@zk.ChildrenWatch('/workers', send_event=True) #watch children of /workers znode
def high_availability(children, event):
    #Spawn a new worker, if a worker crashed
    if inited and not scaling:
        data, stat = zk.get('/scale')
        expected = int(data.decode('utf-8')) + 1 #slaves + master
def start_kazoo_client(intf="10.0.0.1", port="2181"):
    url = f'{intf}:{port}'
    print(f"ZK client Started: {url}")
    zk = KazooClient(hosts=url)
    zk.start()
    return zk
Esempio n. 54
0
 def setUp(self):
     self.client = KazooClient('%s:2181' % os.environ['ZOOKEEPER_IP'])
     self.client.start()
     self.root = '/' + uuid.uuid4().hex
     self.client.ensure_path(self.root)
     self.uri = 'zk://%s:2181%s' % (os.environ['ZOOKEEPER_IP'], self.root)
Esempio n. 55
0
class TestWait(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        utdocker.pull_image(zk_tag)

    def setUp(self):

        utdocker.create_network()
        utdocker.start_container(zk_name,
                                 zk_tag,
                                 env={
                                     "ZOO_MY_ID": 1,
                                     "ZOO_SERVERS":
                                     "server.1=0.0.0.0:2888:3888",
                                 },
                                 port_bindings={2181: 21811})

        self.zk = KazooClient('127.0.0.1:21811')
        self.zk.start()

        dd('start zk-test in docker')

    def tearDown(self):

        self.zk.stop()
        utdocker.remove_container(zk_name)

    def test_wait_absent(self):

        for wait_time in (
                -1,
                0.0,
                0.1,
                1,
        ):

            dd('no node wait:', wait_time)

            with ututil.Timer() as t:
                zkutil.wait_absent(self.zk, 'a', wait_time)
                self.assertAlmostEqual(0, t.spent(), delta=0.2)

    def test_wait_absent_no_timeout(self):
        def _del():
            time.sleep(1)
            self.zk.delete('a')

        for kwargs in (
            {},
            {
                'timeout': None
            },
        ):

            self.zk.create('a')
            th = threadutil.start_daemon(target=_del)

            with ututil.Timer() as t:
                zkutil.wait_absent(self.zk, 'a', **kwargs)
                self.assertAlmostEqual(1, t.spent(), delta=0.1)

            th.join()

    def test_wait_absent_timeout(self):

        self.zk.create('a')

        for wait_time in (
                -1,
                0.0,
                0.1,
                1,
        ):
            dd('node present wait:', wait_time)
            expected = max([0, wait_time])

            with ututil.Timer() as t:
                self.assertRaises(zkutil.ZKWaitTimeout,
                                  zkutil.wait_absent,
                                  self.zk,
                                  'a',
                                  timeout=wait_time)
                self.assertAlmostEqual(expected, t.spent(), delta=0.2)

        self.zk.delete('a')

    def test_wait_absent_delete_node(self):

        delete_after = 0.2

        for wait_time in (
                0.5,
                1,
        ):
            dd('node present wait:', wait_time)

            self.zk.create('a')

            def _del():
                time.sleep(delete_after)
                self.zk.delete('a')

            th = threadutil.start_daemon(target=_del)
            with ututil.Timer() as t:
                zkutil.wait_absent(self.zk, 'a', wait_time)
                self.assertAlmostEqual(delete_after, t.spent(), delta=0.1)

            th.join()

    def test_wait_absent_change_node(self):

        self.zk.create('a')

        change_after = 0.2

        for wait_time in (
                0.5,
                1,
        ):
            dd('node present wait:', wait_time)
            expected = max([0, wait_time])

            def _change():
                time.sleep(change_after)
                self.zk.set('a', 'bbb')

            th = threadutil.start_daemon(target=_change)
            with ututil.Timer() as t:
                self.assertRaises(zkutil.ZKWaitTimeout,
                                  zkutil.wait_absent,
                                  self.zk,
                                  'a',
                                  timeout=wait_time)
                self.assertAlmostEqual(expected, t.spent(), delta=0.1)

            th.join()

        self.zk.delete('a')

    def test_wait_absent_connection_lost(self):

        self.zk.create('a')

        def _close():
            time.sleep(.3)
            self.zk.stop()

        th = threadutil.start_daemon(target=_close)

        with ututil.Timer() as t:
            self.assertRaises(ConnectionClosedError, zkutil.wait_absent,
                              self.zk, 'a')
            self.assertAlmostEqual(.3, t.spent(), delta=0.1)

        th.join()

    def test_get_next_no_version(self):

        cases = (
            -1,
            0.0,
            0.1,
            1,
        )

        for timeout in cases:

            self.zk.create('a', 'a-val')

            with ututil.Timer() as t:
                zkutil.get_next(self.zk, 'a', timeout=timeout, version=-1)
                self.assertAlmostEqual(0, t.spent(), delta=0.2)

            with ututil.Timer() as t:
                zkutil.get_next(self.zk, 'a', timeout=timeout)
                self.assertAlmostEqual(0, t.spent(), delta=0.2)

            self.zk.delete('a')

    def test_get_next_timeout(self):

        cases = (
            -1,
            0.0,
            0.2,
            1,
        )

        for timeout in cases:

            expected = max([timeout, 0])
            self.zk.create('a', 'a-val')

            with ututil.Timer() as t:
                self.assertRaises(zkutil.ZKWaitTimeout,
                                  zkutil.get_next,
                                  self.zk,
                                  'a',
                                  timeout=timeout,
                                  version=0)
                self.assertAlmostEqual(expected, t.spent(), delta=0.2)

            self.zk.delete('a')

    def test_get_next_changed(self):

        cases = (
            0.4,
            1,
        )

        def _set_a():
            self.zk.set('a', 'changed')

        for timeout in cases:

            self.zk.create('a', 'a-val')
            th = threadutil.start_daemon(target=_set_a, after=0.3)

            with ututil.Timer() as t:
                val, zstat = zkutil.get_next(self.zk,
                                             'a',
                                             timeout=timeout,
                                             version=0)
                self.assertAlmostEqual(0.3, t.spent(), delta=0.2)
                self.assertEqual('changed', val)
                self.assertEqual(1, zstat.version)

            th.join()
            self.zk.delete('a')

    def test_get_next_changed_but_unsatisfied(self):

        cases = (
            0.4,
            1,
        )

        def _set_a():
            self.zk.set('a', 'changed')

        for timeout in cases:

            self.zk.create('a', 'a-val')
            th = threadutil.start_daemon(target=_set_a, after=0.3)

            with ututil.Timer() as t:
                self.assertRaises(zkutil.ZKWaitTimeout,
                                  zkutil.get_next,
                                  self.zk,
                                  'a',
                                  timeout=timeout,
                                  version=5)
                self.assertAlmostEqual(timeout, t.spent(), delta=0.2)

            th.join()
            self.zk.delete('a')

    def test_get_next_deleted(self):

        cases = (
            0.4,
            1,
        )

        def _del_a():
            self.zk.delete('a')

        for timeout in cases:

            self.zk.create('a', 'a-val')
            th = threadutil.start_daemon(target=_del_a, after=0.3)

            with ututil.Timer() as t:
                self.assertRaises(NoNodeError,
                                  zkutil.get_next,
                                  self.zk,
                                  'a',
                                  timeout=timeout,
                                  version=0)
                self.assertAlmostEqual(0.3, t.spent(), delta=0.2)

            th.join()

    def test_get_next_conn_lost(self):

        self.zk.create('a', 'a-val')
        th = threadutil.start_daemon(target=self.zk.stop, after=0.3)

        with ututil.Timer() as t:
            self.assertRaises(ConnectionClosedError,
                              zkutil.get_next,
                              self.zk,
                              'a',
                              timeout=1,
                              version=0)
            self.assertAlmostEqual(0.3, t.spent(), delta=0.2)

        th.join()
Esempio n. 56
0
File: zk.py Progetto: lulugyf/idmm4
 def __init__(self, hosts):
     logging.basicConfig()
     self.hosts = hosts
     zk = KazooClient(hosts=hosts)
     #zk.add_listener(self.my_listener)
     self.zk = zk
Esempio n. 57
0
    def test_init_hierarchy(self):

        auth = ('digest', 'aa', 'pw_aa')
        hosts = '127.0.0.1:21811'
        users = {'aa': 'pw_aa', 'bb': 'pw_bb', 'cc': 'pw_cc'}
        hierarchy = {
            'node1': {
                '__val__': 'node1_val',
                '__acl__': {
                    'aa': 'cdrwa',
                    'bb': 'rw'
                },
                'node11': {
                    '__val__': 'node11_val',
                    '__acl__': {
                        'aa': 'cdrwa',
                        'cc': 'r'
                    },
                },
                'node12': {
                    '__val__': 'node12_val',
                    'node121': {
                        '__val__': 'node121_val'
                    }
                },
                'node13': {
                    '__acl__': {
                        'aa': 'cdrwa'
                    }
                }
            },
            'node2': {
                '__val__': 'node2_val',
                'node21': {
                    '__val__': 'node21_val'
                },
                'node22': {
                    '__acl__': {
                        'aa': 'rwa'
                    }
                }
            },
            'node3': {
                '__acl__': {
                    'aa': 'carw',
                    'cc': 'r'
                },
                'node31': {
                    'node311': {
                        'node3111': {},
                        'node3112': {}
                    }
                }
            }
        }

        zkutil.init_hierarchy(hosts, hierarchy, users, auth)

        zkcli = KazooClient(hosts)
        zkcli.start()
        zkcli.add_auth('digest', 'aa:pw_aa')

        expected_nodes = (
            ('/node1', '"node1_val"', [('digest', 'aa', 'cdrwa'),
                                       ('digest', 'bb', 'rw')],
             set(['node11', 'node12', 'node13'])),
            ('/node1/node11', '"node11_val"', [('digest', 'aa', 'cdrwa'),
                                               ('digest', 'cc', 'r')],
             set([])),
            ('/node1/node12', '"node12_val"', [('digest', 'aa', 'cdrwa'),
                                               ('digest', 'bb', 'rw')],
             set(['node121'])),
            ('/node1/node12/node121', '"node121_val"',
             [('digest', 'aa', 'cdrwa'), ('digest', 'bb', 'rw')], set([])),
            ('/node1/node13', '{}', [('digest', 'aa', 'cdrwa')], set([])),
            ('/node2', '"node2_val"', [('world', 'anyone', 'cdrwa')],
             set(['node21', 'node22'])),
            ('/node2/node21', '"node21_val"', [('world', 'anyone', 'cdrwa')],
             set([])),
            ('/node2/node22', '{}', [('digest', 'aa', 'rwa')], set([])),
            ('/node3', '{}', [('digest', 'aa', 'rwca'),
                              ('digest', 'cc', 'r')], set(['node31'])),
            ('/node3/node31', '{}', [('digest', 'aa', 'rwca'),
                                     ('digest', 'cc', 'r')], set(['node311'])),
            ('/node3/node31/node311', '{}', [('digest', 'aa', 'rwca'),
                                             ('digest', 'cc', 'r')],
             set(['node3111', 'node3112'])),
            ('/node3/node31/node311/node3111', '{}', [('digest', 'aa', 'rwca'),
                                                      ('digest', 'cc', 'r')],
             set([])),
            ('/node3/node31/node311/node3112', '{}', [('digest', 'aa', 'rwca'),
                                                      ('digest', 'cc', 'r')],
             set([])),
        )

        for node, val, acl, children in expected_nodes:

            actual_acl = zkutil.parse_kazoo_acl(zkcli.get_acls(node)[0])
            self.assertEqual(val, zkcli.get(node)[0])
            self.assertEqual(acl, actual_acl)
            self.assertEqual(children, set(zkcli.get_children(node)))

        zkcli.stop()
Esempio n. 58
0
class ZkClient():
    """
    Java modifiers:
         private static
    Type:
        Logger
    """
    LOG = get_logger(__name__)
    """
    Java modifiers:
         final static
    Type:
        int
    """
    DEFAULT_CONNECTION_TIMEOUT = 60 * 1000
    """
    Java modifiers:
         final static
    Type:
        int
    """
    DEFAULT_SESSION_TIMEOUT = 30 * 1000

    #    """
    #
    #    Parameters:
    #        IZkConnection connection
    #        int connectionTimeout
    #        PathBasedZkSerializer zkSerializer
    #    """
    #    def __init__(self, connection, connectionTimeout=DEFAULT_SESSION_TIMEOUT, zkSerializer=ByteArraySerializer()):
    ##        super(connection, connectionTimeout, ByteArraySerializer())
    #        self._zkSerializer = zkSerializer
    #        # StackTraceElement[]
    ##        calls = Thread.currentThread().getStackTrace()
    ##        calls = traceback.print_stack()
    #        LOG.info("create a new zkclient. " + repr(traceback.extract_stack()))
    #
    #
    #    """1
    #
    #    Parameters:
    #        IZkConnection connection
    #        int connectionTimeout
    #        ZkSerializer zkSerializer
    #    """
    #    def __init__(self, connection, connectionTimeout, zkSerializer):
    #        this(connection, connectionTimeout, BasicZkSerializer(zkSerializer))
    #
    #
    #    """
    #
    #    Parameters:
    #        IZkConnection connection
    #        int connectionTimeout
    #    """
    #    def __init__(self, connection, connectionTimeout=sys.maxint):
    #        this(connection, connectionTimeout, SerializableSerializer())
    #
    #
    #    """
    #
    #    Parameters:
    #        String zkServers
    #        int sessionTimeout
    #        int connectionTimeout
    #        ZkSerializer zkSerializer
    #    """
    #    def __init__(self, zkServers, sessionTimeout, connectionTimeout, zkSerializer):
    #        this.__init__((zkServers, sessionTimeout), connectionTimeout, zkSerializer)
    #
    #
    """

    Parameters:
        String zkServers
        int sessionTimeout
        int connectionTimeout
        PathBasedZkSerializer zkSerializer
    """

    # DEFAULT_ZK_SERIALIZER = ChainedPathZkSerializer.builder(ZNRecordStreamingSerializer()).serialize(propertyStorePath, ByteArraySerializer()).build()
    # TODO: more serilizer?
    def __init__(self,
                 zkServers,
                 sessionTimeout=DEFAULT_SESSION_TIMEOUT,
                 connectionTimeout=DEFAULT_CONNECTION_TIMEOUT,
                 zkSerializer=BasicZkSerializer):
        self._connection = KazooClient(hosts=zkServers, timeout=sessionTimeout)
        self._zkSerializer = zkSerializer
        self._connection.start(connectionTimeout)
        self.LOG.info("create a new zkclient. " +
                      repr(traceback.extract_stack()))

#        this(ZkConnection(zkServers, sessionTimeout), connectionTimeout, zkSerializer)
#
#
#    """
#
#    Parameters:
#        String zkServers
#        int sessionTimeout
#        int connectionTimeout
#    """
#    def __init__(self, zkServers, sessionTimeout, connectionTimeout):
#        this(ZkConnection(zkServers, sessionTimeout), connectionTimeout, SerializableSerializer())
#
#
#    """
#
#    Parameters:
#        String zkServers
#        int connectionTimeout
#    """
#    def __init__(self, zkServers, connectionTimeout):
#        this(ZkConnection(zkServers), connectionTimeout, SerializableSerializer())
#
#
#    """
#
#    Parameters:
#        String zkServers
#    """
#    def __init__(self, zkServers):
#        this(ZkConnection(zkServers), Integer.MAX_VALUE, SerializableSerializer())
#
#
#    static

    def setZkSerializer(self, zkSerializer):
        """
        Returns void
        Parameters:
            zkSerializer: ZkSerializer
        @Override


        """
        _zkSerializer = BasicZkSerializer(zkSerializer)

    def setZkSerializer(self, zkSerializer):
        """
        Returns void
        Parameters:
            zkSerializer: PathBasedZkSerializer


        """
        _zkSerializer = zkSerializer

    def getConnection(self):
        """
        Returns IZkConnection


        """
        return self._connection

    def close(self):
        """
        Returns void
        @Override


        Throws: 
            ZkInterruptedException
        """
        # StackTraceElement[]
        #        calls = Thread.currentThread().getStackTrace()
        self.LOG.info("closing a zkclient. zookeeper: " +
                      repr(self._connection) + ", callStack: " +
                      traceback.extract_stack())
        if self._connection:
            self._connection.close()

    def getStat(self, path):
        """
        Returns Stat
        Parameters:
            path: String


        """
        stat = self._connection.exists(path)
        return stat
        # long
#        startT = System.nanoTime()
#        try:
#            # Stat
#            stat = retryUntilConnected(Callable<Stat>() {
#
#                def call(self):
#                    """
#                    Returns Stat
#                    @Override
#
#
#                    Throws:
#                        Exception
#                    """
#                    # Stat
#                    stat = ((ZkConnection) self._connection).getZookeeper().exists(path, False)
#                    return stat
#
#            })
#            return stat
#        final:
#                # long
#                endT = System.nanoTime()
#                if LOG.isDebugEnabled():
#                    LOG.debug("exists, path: " + str(path)+ ", time: " + str((endT - startT) )+ " ns")

#    def hasListeners(self,path):
#        ''' given a path, find out if there is listener for the path
#        '''
#        ret = False
#        for childWatcher in self._connection._child_watchers:
#           if childWatcher.

    def exists(self, path, watch=None):
        """
        Returns boolean
        Parameters:
            path: Stringwatch: boolean
        @Override
        Java modifiers:
             protected

        """
        # long
        #        if not watch:
        #            watch = path in self._connection._child_watchers or path in self._connection._data_watchers
        # in java, watch is a boolean
        # in kazoo, it is a call back function

        #        stat = self._connection.exists(path, watch)
        stat = self._connection.exists(path)
        return stat
#        startT = System.nanoTime()
#        try:
#            return retryUntilConnected(Callable<Boolean>() {
#
#                def call(self):
#                    """
#                    Returns Boolean
#                    @Override
#
#
#                    Throws:
#                        Exception
#                    """
#                    return self._connection.exists(path, watch)
#
#            })
#        final:
#                # long
#                endT = System.nanoTime()
#                if LOG.isDebugEnabled():
#                    LOG.debug("exists, path: " + str(path)+ ", time: " + str((endT - startT) )+ " ns")

    def getChildren(self, path, watch=None):
        """
        Returns List<String>
        Parameters:
            path: Stringwatch: boolean
        @Override
        Java modifiers:
             protected

        """
        return self._connection.get_children(path, watch)
#        # long
#        startT = System.nanoTime()
#        try:
#            return retryUntilConnected(Callable<List<String>>() {
#
#                def call(self):
#                    """
#                    Returns List<String>
#                    @Override
#
#
#                    Throws:
#                        Exception
#                    """
#                    return self._connection.getChildren(path, watch)
#
#            })
#        final:
#                # long
#                endT = System.nanoTime()
#                if LOG.isDebugEnabled():
#                    LOG.debug("getChildren, path: " + str(path)+ ", time: " + str((endT - startT) )+ " ns")

    def deserialize(self, data, path):
        """
        Returns T
        Parameters:
            data: byte[]path: String
        # Annotation: @SuppressWarnings("unchecked")



        Parameterized: <T extends Object> 
        """
        if data == None:
            return None

        return self._zkSerializer.deserialize(data, path)

    def copyStat(self, src, dest):
        for fieldName in src._fields:
            setattr(dest, fieldName, getattr(src, fieldName))

    def readData(self, *args):
        if len(args) == 3 and isinstance(
                args[1], HelixZNodeStat) and isinstance(args[2], bool):
            return self.readDataAndStat(*args)
        elif len(args) == 2 and isinstance(
                args[1], bool):  # readData(path, nullIfNoExist )
            return self.readDataAndStat(args[0], HelixZNodeStat(), args[1])
        elif len(args) >= 2 and isinstance(args[1], HelixZNodeStat):
            return self.readDataStatInternal(*args)
        else:
            raise IllegalArgumentException("Wrong args: %s" % args)

    def readDataStatInternal(self, path, stat, watch=None):
        """
        Returns T
        Parameters:
            path: Stringstat: Statwatch: boolean
        @Override
        # Annotation: @SuppressWarnings("unchecked")

        Java modifiers:
             protected

        Parameterized: <T extends Object> 
        """
        data, statRet = self._connection.get(path, watch)
        self.copyStat(statRet, stat)  # copy over the stats
        return self.deserialize(data, path)
#
#        # long
#        startT = System.nanoTime()
#        try:
#            # byte[]
#            data = retryUntilConnected(Callable<byte[]>() {
#
#                def call(self):
#                    """
#                    Returns byte[]
#                    @Override
#
#
#                    Throws:
#                        Exception
#                    """
#                    return self._connection.readData(path, stat, watch)
#
#            })
#            return (T) deserialize(data, path)
#        final:
#                # long
#                endT = System.nanoTime()
#                if LOG.isDebugEnabled():
#                    LOG.debug("getData, path: " + str(path)+ ", time: " + str((endT - startT) )+ " ns")
#
#

    def readDataAndStat(self, path, stat, returnNullIfPathNotExists):
        """
        Returns T
        Parameters:
            path: Stringstat: StatreturnNullIfPathNotExists: boolean
        # Annotation: @SuppressWarnings("unchecked")



        Parameterized: <T extends Object> 
        """
        # T
        data = None
        try:
            data = self.readDataStatInternal(path, stat)
        except NoNodeException, e:
            if not returnNullIfPathNotExists:
                raise e
        return data
Esempio n. 59
0
class Zooqueue():
    def __init__(self, host, queue_path, agent_name):
        self.host = host
        self.queue_path = queue_path
        self.agent_name = agent_name
        self.zk = KazooClient(hosts=self.host)
        #1. Connect to Server
        self.zk.start()
        print("\n1. Connected to ZooKeeper server as : " + self.agent_name)

        #2. Enter Queue
        self.en_queue()

        try:
            while True:
                time.sleep(3)
        ## ***RUBRIC : Zookeeper disconnection
        except KeyboardInterrupt:
            print("\n!!Disconnecting agent from server.")
            self.zk.stop()
            self.zk.close()
            sys.exit(0)

    def en_queue(self):
        # ***RUBRIC : Agent's znode created under /queue with EPHEMERAL, SEQUENCE option enabled
        self.agent_abs_path = self.zk.create(path=self.queue_path + "/" +
                                             self.agent_name,
                                             ephemeral=True,
                                             sequence=True,
                                             makepath=True)
        self.agent_name_num = self.agent_abs_path[7:]
        print("\n2. Entered Queue as : " + self.agent_abs_path)

        #3. Check Queue Status
        self.check_queue()

    def check_queue(self, event=None):
        print("\n3. Checking queue status")
        sorted_queue_list = self.getchildren(self.queue_path)
        agent_index = sorted_queue_list.index(self.agent_name_num)

        #There is an agent ahead of queue
        if agent_index != 0:
            #4-0. Set watch to wait for turn (RUBRIC : WAITING FOR THE HEAD OF QUEUE (watch))
            self.zk.exists(self.queue_path + "/" +
                           sorted_queue_list[agent_index - 1],
                           watch=self.check_queue)
            print(">>Queue Status: ", sorted_queue_list)
            print(">>...Waiting for TURN...")

        elif agent_index == 0:
            print(">>First in line!")
            #4-1. Take a work item to process
            self.processwork()

    def processwork(self, event=None):
        print("\n4. Processing Work Item")
        sorted_work_items = self.getchildren(_WORKPATH)

        #Empty work item list
        if not sorted_work_items:
            # ***RUBRIC : WAITING FOR WORK ITEM (watch)
            self.zk.get_children(_WORKPATH, watch=self.processwork)
            print(">>...Waiting for WORK...")

        else:
            #5. Process a work item with the lowest sequence number
            self.zk.delete(_WORKPATH + "/" + sorted_work_items[0])
            print(">>Processed item : " + _WORKPATH + "/" +
                  sorted_work_items[0])

            self.addresult(sorted_work_items[0])

    def addresult(self, workitem):
        message = self.agent_name_num + " processed " + workitem
        bmessage = str.encode(message)

        #6. Update Result of processed work item (RUBRIC : result znode created under /results with SEQUENCE option enabled)
        result_path = self.zk.create(path=_RESULTPATH + "/" + self.agent_name,
                                     value=bmessage,
                                     sequence=True,
                                     makepath=True)
        print("\n5. Result added to : " + result_path)

        #7. Move to the back of the queue
        self.de_queue()

    def de_queue(self):
        print("\n6. Dequeue agent " + self.agent_name)
        self.zk.delete(path=self.agent_abs_path)
        print("---------------Round Completed------------------")
        self.en_queue()

    def get_last10(self, elem):
        return elem[-10:]

    #Wrapper for zk.get_children, provides sorted list of children in given path
    def getchildren(self, check_path):
        children = self.zk.get_children(check_path)
        return sorted(children, key=self.get_last10)
Esempio n. 60
0
    def test_export_hierarchy(self):

        auth = ('digest', 'aa', 'pw_aa')
        hosts = '127.0.0.1:21811'
        users = {'aa': 'pw_aa', 'bb': 'pw_bb', 'cc': 'pw_cc'}

        hierarchy = {
            'node0': {
                '__val__': '',
                '__acl__': {
                    'aa': 'cdrwa',
                    'bb': 'rw'
                }
            },
            'node1': {
                '__val__': 'node1_val',
                '__acl__': {
                    'aa': 'cdrwa',
                    'bb': 'rw'
                },
                'node11': {
                    '__val__': 'node11_val',
                    '__acl__': {
                        'aa': 'cdrwa',
                        'cc': 'r'
                    },
                },
                'node12': {
                    '__val__': 'node12_val',
                    'node121': {
                        '__val__': 'node121_val'
                    }
                },
                'node13': {
                    '__acl__': {
                        'aa': 'cdrwa'
                    }
                }
            },
            'node2': {
                '__val__': 'node2_val',
                'node21': {
                    '__val__': 'node21_val'
                },
                'node22': {
                    '__acl__': {
                        'aa': 'rwa'
                    }
                }
            },
            'node3': {
                '__acl__': {
                    'aa': 'carw',
                    'cc': 'r'
                },
                'node31': {
                    'node311': {
                        'node3111': {},
                        'node3112': {}
                    }
                }
            }
        }

        zkutil.zkutil.init_hierarchy(hosts, hierarchy, users, auth)

        zkcli = KazooClient(hosts)
        zkcli.start()
        zkcli.add_auth('digest', 'aa:pw_aa')

        invalid_zkpath_cases = ('a', 'a/', 'a/b')

        for zkpath in invalid_zkpath_cases:
            with self.assertRaises(zkutil.zkutil.ZkPathError):
                zkutil.zkutil.export_hierarchy(zkcli, zkpath)

        valid_cases = (('/', {
            '__acl__': {
                u'anyone': 'cdrwa'
            },
            '__val__': '',
            u'node0': {
                '__acl__': {
                    u'aa': 'cdrwa',
                    u'bb': 'rw'
                },
                '__val__': '""'
            },
            u'node1': {
                '__acl__': {
                    u'aa': 'cdrwa',
                    u'bb': 'rw'
                },
                '__val__': '"node1_val"',
                u'node11': {
                    '__acl__': {
                        u'aa': 'cdrwa',
                        u'cc': 'r'
                    },
                    '__val__': '"node11_val"'
                },
                u'node12': {
                    '__acl__': {
                        u'aa': 'cdrwa',
                        u'bb': 'rw'
                    },
                    '__val__': '"node12_val"',
                    u'node121': {
                        '__acl__': {
                            u'aa': 'cdrwa',
                            u'bb': 'rw'
                        },
                        '__val__': '"node121_val"'
                    }
                },
                u'node13': {
                    '__acl__': {
                        u'aa': 'cdrwa'
                    },
                    '__val__': '{}'
                }
            },
            u'node2': {
                '__acl__': {
                    u'anyone': 'cdrwa'
                },
                '__val__': '"node2_val"',
                u'node21': {
                    '__acl__': {
                        u'anyone': 'cdrwa'
                    },
                    '__val__': '"node21_val"'
                },
                u'node22': {
                    '__acl__': {
                        u'aa': 'rwa'
                    },
                    '__val__': '{}'
                }
            },
            u'node3': {
                '__acl__': {
                    u'aa': 'rwca',
                    u'cc': 'r'
                },
                '__val__': '{}',
                u'node31': {
                    '__acl__': {
                        u'aa': 'rwca',
                        u'cc': 'r'
                    },
                    '__val__': '{}',
                    u'node311': {
                        '__acl__': {
                            u'aa': 'rwca',
                            u'cc': 'r'
                        },
                        '__val__': '{}',
                        u'node3111': {
                            '__acl__': {
                                u'aa': 'rwca',
                                u'cc': 'r'
                            },
                            '__val__': '{}'
                        },
                        u'node3112': {
                            '__acl__': {
                                u'aa': 'rwca',
                                u'cc': 'r'
                            },
                            '__val__': '{}'
                        }
                    }
                }
            },
            u'zookeeper': {
                '__acl__': {
                    u'anyone': 'cdrwa'
                },
                '__val__': '',
                u'quota': {
                    '__acl__': {
                        u'anyone': 'cdrwa'
                    },
                    '__val__': ''
                }
            }
        }), ('/node0', {
            '__acl__': {
                u'aa': 'cdrwa',
                u'bb': 'rw'
            },
            '__val__': '""'
        }), ('/node1', {
            '__acl__': {
                u'aa': 'cdrwa',
                u'bb': 'rw'
            },
            '__val__': '"node1_val"',
            u'node11': {
                '__acl__': {
                    u'aa': 'cdrwa',
                    u'cc': 'r'
                },
                '__val__': '"node11_val"'
            },
            u'node12': {
                '__acl__': {
                    u'aa': 'cdrwa',
                    u'bb': 'rw'
                },
                '__val__': '"node12_val"',
                u'node121': {
                    '__acl__': {
                        u'aa': 'cdrwa',
                        u'bb': 'rw'
                    },
                    '__val__': '"node121_val"'
                }
            },
            u'node13': {
                '__acl__': {
                    u'aa': 'cdrwa'
                },
                '__val__': '{}'
            }
        }), ('/node1/node11', {
            '__acl__': {
                u'aa': 'cdrwa',
                u'cc': 'r'
            },
            '__val__': '"node11_val"'
        }), ('/node2', {
            '__acl__': {
                u'anyone': 'cdrwa'
            },
            '__val__': '"node2_val"',
            u'node21': {
                '__acl__': {
                    u'anyone': 'cdrwa'
                },
                '__val__': '"node21_val"'
            },
            u'node22': {
                '__acl__': {
                    u'aa': 'rwa'
                },
                '__val__': '{}'
            }
        }), ('/node3', {
            '__acl__': {
                u'aa': 'rwca',
                u'cc': 'r'
            },
            '__val__': '{}',
            u'node31': {
                '__acl__': {
                    u'aa': 'rwca',
                    u'cc': 'r'
                },
                '__val__': '{}',
                u'node311': {
                    '__acl__': {
                        u'aa': 'rwca',
                        u'cc': 'r'
                    },
                    '__val__': '{}',
                    u'node3111': {
                        '__acl__': {
                            u'aa': 'rwca',
                            u'cc': 'r'
                        },
                        '__val__': '{}'
                    },
                    u'node3112': {
                        '__acl__': {
                            u'aa': 'rwca',
                            u'cc': 'r'
                        },
                        '__val__': '{}'
                    }
                }
            }
        }))

        for path, expected_rst in valid_cases:
            rst = zkutil.export_hierarchy(zkcli, path)
            self.assertEqual(rst, expected_rst)

        zkcli.stop()