def zk_server(tmpdir): zk_container_name = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(6)) # TODO(cmaloney): Add a python context manager for dockerized daemons subprocess.check_call( ['docker', 'run', '-d', '--name', zk_container_name] + zookeeper_docker_run_args + [zookeeper_docker_image]) conn_retry_policy = KazooRetry(max_tries=-1, delay=0.1, max_delay=0.1) cmd_retry_policy = KazooRetry(max_tries=3, delay=0.3, backoff=1, max_delay=1, ignore_expire=False) zk = KazooClient(hosts=zk_hosts, connection_retry=conn_retry_policy, command_retry=cmd_retry_policy) zk.start() children = zk.get_children('/') for child in children: if child == 'zookeeper': continue zk.delete('/' + child, recursive=True) yield zk zk.stop() zk.close() subprocess.check_call(['docker', 'rm', '-f', zk_container_name])
def __init__(self, config): super(ZooKeeper, self).__init__(config) hosts = config.get('hosts', []) if isinstance(hosts, list): hosts = ','.join(hosts) self._client = KazooClient( hosts, handler=PatroniSequentialThreadingHandler(config['retry_timeout']), timeout=config['ttl'], connection_retry=KazooRetry(max_delay=1, max_tries=-1, sleep_func=time.sleep), command_retry=KazooRetry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1, sleep_func=time.sleep)) self._client.add_listener(self.session_listener) self._my_member_data = None self._fetch_cluster = True self._orig_kazoo_connect = self._client._connection._connect self._client._connection._connect = self._kazoo_connect self._client.start()
def __init__(self, config): super(ZooKeeper, self).__init__(config) hosts = config.get('hosts', []) if isinstance(hosts, list): hosts = ','.join(hosts) mapping = { 'use_ssl': 'use_ssl', 'verify': 'verify_certs', 'cacert': 'ca', 'cert': 'certfile', 'key': 'keyfile', 'key_password': '******' } kwargs = {v: config[k] for k, v in mapping.items() if k in config} if 'set_acls' in config: kwargs['default_acl'] = [] for principal, permissions in config['set_acls'].items(): normalizedPermissions = [p.upper() for p in permissions] kwargs['default_acl'].append( make_acl(scheme='x509', credential=principal, read='READ' in normalizedPermissions, write='WRITE' in normalizedPermissions, create='CREATE' in normalizedPermissions, delete='DELETE' in normalizedPermissions, admin='ADMIN' in normalizedPermissions, all='ALL' in normalizedPermissions)) self._client = PatroniKazooClient( hosts, handler=PatroniSequentialThreadingHandler(config['retry_timeout']), timeout=config['ttl'], connection_retry=KazooRetry(max_delay=1, max_tries=-1, sleep_func=time.sleep), command_retry=KazooRetry(max_delay=1, max_tries=-1, deadline=config['retry_timeout'], sleep_func=time.sleep), **kwargs) self._client.add_listener(self.session_listener) self._fetch_cluster = True self._fetch_status = True self.__last_member_data = None self._orig_kazoo_connect = self._client._connection._connect self._client._connection._connect = self._kazoo_connect self._client.start()
def execute(self): self.name = self.args.name if self.args.name is not None else names[ int(random.uniform(1, len(names) - 1))] # true unless header authentication is used reauthenticate = self.args.api_authn_header is None self.api = Api(endpoint=self.args.api_url, insecure=self.args.api_insecure, persist_cookie=False, reauthenticate=reauthenticate, authn_header=self.args.api_authn_header) try: if self.args.api_authn_header is None: if self.args.api_key and self.args.api_secret: response = self.api.login_apikey(self.args.api_key, self.args.api_secret) else: response = self.api.login_password(self.args.api_user, self.args.api_pass) if response.status_code == 403: raise ConnectionError( 'Login with following user/apikey {} failed!'.format(self.args.api_user)) # Uncomment following lines for manual remote test # session_id = self.api.current_session() # self.api.operation(self.api.get(session_id), 'switch-group', # {'claim': "group/nuvla-admin"}) except ConnectionError as e: logging.error('Unable to connect to Nuvla endpoint {}! {}'.format(self.api.endpoint, e)) exit(1) if self.args.zk_hosts: from kazoo.client import KazooClient, KazooRetry self._kz = KazooClient(','.join(self.args.zk_hosts), connection_retry=KazooRetry(max_tries=-1), command_retry=KazooRetry(max_tries=-1), timeout=30.0) self._kz.start() if self.args.statsd: statsd_hp = self.args.statsd.split(':') statsd_port = STATSD_PORT statsd_host = statsd_hp[0] if len(statsd_hp) > 1: statsd_port = statsd_hp[1] try: self.statsd = StatsClient(host=statsd_host, port=statsd_port, prefix=None, ipv6=False) except Exception as ex: logging.error(f'Failed to initialise StatsD client for {self.args.statsd}: {ex}') self.do_work() while True: signal.pause()
def run_tests(host: str) -> int: conn_retry_policy = KazooRetry(max_tries=10, delay=0.1, max_delay=0.1) cmd_retry_policy = KazooRetry(max_tries=5, delay=0.3, backoff=1, max_delay=1, ignore_expire=False) zk = KazooClient(hosts=host, timeout=10, command_retry=cmd_retry_policy, connection_retry=conn_retry_policy) zk.start() hostname = host.split(".")[0] if zk.exists(f"/test/{hostname}"): log.warning("Cleaning up leftover cruft") zk.delete(f"/test/{hostname}", recursive=True) zk.ensure_path(f"/test/{hostname}") if not zk.exists(f"/test/{hostname}"): log.error("Path did not exist after creation") return 255 zk.create(f"/test/{hostname}/znode", b"one") data, stat = zk.get(f"/test/{hostname}/znode") log.info(stat) log.info(data) if data.decode("utf-8") != "one": log.error("Node create/get failed") return 255 else: log.info("Node create/get succeeded") zk.set(f"/test/{hostname}/znode", b"two") data, stat = zk.get(f"/test/{hostname}/znode") log.info(stat) log.info(data) if data.decode("utf-8") != "two": log.error("Node set/get failed") return 255 else: log.info("Node set/get succeeded") zk.delete(f"/test/{hostname}/znode") children = zk.get_children(f"/test/{hostname}") if len(children) > 0: log.error("Delete failed") return 255 else: log.info("Node delete succeeded") zk.stop() return 0
def get_fake_zk(nodename, timeout=30.0): _fake_zk_instance = KazooClient( hosts=cluster.get_instance_ip(nodename) + ":9181", timeout=timeout, command_retry=KazooRetry(max_tries=10), ) _fake_zk_instance.start() return _fake_zk_instance
def final_check(hosts: List[str]) -> int: conn_retry_policy = KazooRetry(max_tries=10, delay=0.1, max_delay=0.1) cmd_retry_policy = KazooRetry(max_tries=5, delay=0.3, backoff=1, max_delay=1, ignore_expire=False) zk = KazooClient(hosts=hosts, timeout=10, command_retry=cmd_retry_policy, connection_retry=conn_retry_policy) zk.start() for host in hosts: hostname = host.split(".")[0] log.info(f"Checking /test/{hostname}") if not zk.exists(f"/test/{hostname}"): log.error(f"Expected /test/{hostname} to exist, but it vanished") return 255 log.info(f"Deleting /test/{hostname}") zk.delete(f"/test/{hostname}") zk.stop() return 0
def execute(self): self.name = self.args.name if self.args.name is not None else names[ int(random.uniform(1, len(names) - 1))] self.ss_api = Api(endpoint=self.args.ss_url, insecure=self.args.ss_insecure, reauthenticate=True) self.ss_api.login_internal(self.args.ss_user, self.args.ss_pass) self._kz = KazooClient(self.args.zk_hosts, connection_retry=KazooRetry(max_tries=-1), command_retry=KazooRetry(max_tries=-1), timeout=30.0) self._kz.start() self.do_work() while True: signal.pause()
def __init__(self, name, app): self.name = name self.app = app self.tick_time = app.tick_time # seconds: this should match the zookeeper server tick time (normally specified in milliseconds) self.logger = logging self._takeovers = {} self._kazoo_retry = KazooRetry( max_tries=10, deadline=60, ignore_expire=False, )
def __init__(self, config): super(ZooKeeper, self).__init__(config) hosts = config.get('hosts', []) if isinstance(hosts, list): hosts = ','.join(hosts) mapping = { 'use_ssl': 'use_ssl', 'verify': 'verify_certs', 'cacert': 'ca', 'cert': 'certfile', 'key': 'keyfile', 'key_password': '******' } kwargs = {v: config[k] for k, v in mapping.items() if k in config} self._client = KazooClient( hosts, handler=PatroniSequentialThreadingHandler(config['retry_timeout']), timeout=config['ttl'], connection_retry=KazooRetry(max_delay=1, max_tries=-1, sleep_func=time.sleep), command_retry=KazooRetry(deadline=config['retry_timeout'], max_delay=1, max_tries=-1, sleep_func=time.sleep), **kwargs) self._client.add_listener(self.session_listener) self._fetch_cluster = True self._fetch_optime = True self._orig_kazoo_connect = self._client._connection._connect self._client._connection._connect = self._kazoo_connect self._client.start()
def _kazoo_setup(self, zk_hosts): """ Create and configure Kazoo client :param list zk_hosts: List of Zookeeper instances to connect to, in the form of ``["host:port"..]``. """ # Disable exponential back-off kretry = KazooRetry(max_tries=-1, max_delay=1) # Stop kazoo from drowning the log with debug spam: logger = logging.getLogger("KazooClient") logger.setLevel(logging.ERROR) # (For low-level kazoo debugging): # import kazoo.loggingsupport # logger.setLevel(kazoo.loggingsupport.BLATHER) self.kazoo = KazooClient( hosts=",".join(zk_hosts), timeout=self._timeout, connection_retry=kretry, logger=logger)
def main(): logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) logger = logging.getLogger(__name__) zookeeper_locations = appscale_info.get_zk_locations_string() retry_policy = KazooRetry(max_tries=5) zk_client = KazooClient(zookeeper_locations, connection_retry=ZK_PERSISTENT_RECONNECTS, command_retry=retry_policy) zk_client.start() gc_zookeeper = zk.ZKTransaction(zk_client) logger.info("Using ZK locations {0}".format(zookeeper_locations)) datastore_location = ':'.join( [appscale_info.get_db_proxy(), str(DB_SERVER_PORT)]) ds_groomer = groomer.DatastoreGroomer(gc_zookeeper, "cassandra", datastore_location) try: ds_groomer.start() except Exception, exception: logger.warning("An exception slipped through:") logger.exception(exception) logger.warning("Exiting service.")
from kazoo.exceptions import ConnectionClosedError def zk_status_listener(state): if state == KazooState.LOST: print("Cleaned Working Tree.") # print("Session was lost") exit(0) elif state == KazooState.SUSPENDED: print("Handle being disconnected from Zookeeper") exit(0) try: zkr = KazooRetry(max_tries=-1) client = KazooClient(hosts="127.0.0.1:2181", connection_retry=zkr) client.add_listener(zk_status_listener) client.start() if client.exists("/servers"): client.delete("/servers", recursive=True) if client.exists("/mapping"): client.delete("/mapping", recursive=True) client.stop() client.close() except: raise ConnectionClosedError("Connection is closed") client.close() exit(0)
def create_app(test_config=None): # create the app configuration app = Flask( __name__, instance_path=environ.get('FLASK_APP_INSTANCE', '/user/src/app/instance')) # instance path app.config.from_mapping( POSTGRES_HOST=environ.get('POSTGRES_HOST', ''), POSTGRES_USER=environ.get('POSTGRES_USER', ''), POSTGRES_DATABASE=environ.get('POSTGRES_DATABASE', environ.get('POSTGRES_USER', '')), POSTGRES_PASSWORD=environ.get('POSTGRES_PASSWORD', ''), SQLALCHEMY_TRACK_MODIFICATIONS=False, AUTH_LEEWAY=timedelta(seconds=int(environ.get( 'AUTH_LEEWAY', '30'))), # leeway in seconds BASEURL=environ.get('BASEURL', ''), DOCKER_HOST=environ.get('DOCKER_HOST', ''), DOCKER_BASEURL='http://{}'.format(environ.get('DOCKER_HOST', '')), TOKEN_ISSUER=environ.get('TOKEN_ISSUER', environ.get('BASEURL', 'auth')), ZOOKEEPER_CONNECTION_STR=environ.get('ZOOKEEPER_CONNECTION_STR', 'zoo1,zoo2,zoo3'), ) app.config[ 'SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://{}:{}@{}/{}'.format( app.config['POSTGRES_USER'], app.config['POSTGRES_PASSWORD'], app.config['POSTGRES_HOST'], app.config['POSTGRES_DATABASE']) if test_config is None: # load the instance config if it exists, when not testing app.config.from_pyfile(path.join(app.instance_path, 'config.py'), silent=True) else: app.config.from_mapping(test_config) if not app.testing: if app.config.get('POSTGRES_HOST') == '': raise Exception( 'No postgres database host was provided. ' 'POSTGRES_HOST environment variable cannot be omitted') if app.config.get('POSTGRES_USER') == '': raise Exception( 'No postgres database user was provided. ' 'POSTGRES_USER environment variable cannot be omitted') if app.config.get('POSTGRES_PASSWORD') == '': raise Exception( 'No postgres database user password was provided. ' 'POSTGRES_PASSWORD environment variable cannot be omitted') if app.config.get('BASEURL') == '': raise Exception('No service base url was provided. ' 'BASEURL environment variable cannot be omitted') if app.config.get('DOCKER_HOST') == '': raise Exception( 'No network host within docker was provided. ' 'DOCKER_HOST environment variable cannot be omitted') app.config.update({ 'APISPEC_SPEC': APISpec( title='disastergram-auth', version='v1', openapi_version='2.0', plugins=[MarshmallowPlugin()], ), 'APISPEC_SWAGGER_URL': '/auth/spec', 'APISPEC_SWAGGER_UI_URL': '/auth/spec-ui', }) # Only do zookeeper for non testing configs for now znode_data = { 'TOKEN_ISSUER': app.config['TOKEN_ISSUER'], 'BASEURL': app.config['BASEURL'], 'DOCKER_HOST': app.config['DOCKER_HOST'], 'DOCKER_BASEURL': app.config['DOCKER_BASEURL'], 'PUBLIC_KEY': app.config['PUBLIC_KEY'].decode('utf-8') } global zk zk = AuthZoo( KazooClient(app.config['ZOOKEEPER_CONNECTION_STR'], connection_retry=KazooRetry(max_tries=-1), logger=app.logger), znode_data) # INIT db.init_app(app) mi.init_app(app, db, directory=environ.get('FLASK_APP_MIGRATIONS', 'migrations')) ma.init_app(app) bc.init_app(app) if not app.testing: docs.init_app(app) # for some reason when not in development # this call fails ¯\_(ツ)_/¯. # Probably some kind of problem with # threading and prefork. if app.env == 'development': from auth import models models.init_db(app) from auth import service app.register_blueprint(service.bp) if not app.testing: docs.register(service.user_register, blueprint='auth') docs.register(service.user_read, blueprint='auth') docs.register(service.user_replace, blueprint='auth') docs.register(service.user_update, blueprint='auth') docs.register(service.user_del, blueprint='auth') docs.register(service.user_read_id, blueprint='auth') docs.register(service.user_replace_id, blueprint='auth') docs.register(service.user_update_id, blueprint='auth') docs.register(service.user_del_id, blueprint='auth') docs.register(service.login, blueprint='auth') docs.register(service.refresh_token, blueprint='auth') docs.register(service.logout, blueprint='auth') docs.register(service.pub_key, blueprint='auth') return app
def create_app(test_config=None): # create the app configuration app = Flask(__name__, instance_path=environ.get('FLASK_APP_INSTANCE', '/user/src/app/instance')) # instance path app.config.from_mapping( UPLOAD_FOLDER=environ.get('UPLOAD_FOLDER', '/user/src/app/storage/images'), AUTH_LEEWAY=timedelta(seconds=int(environ.get('AUTH_LEEWAY', '30'))), # leeway in seconds STATIC_URL=environ.get('STATIC_URL', '/static'), STORAGE_ID=int(environ.get('STORAGE_ID', '-1')), BASEURL=environ.get('BASEURL', ''), DOCKER_HOST=environ.get('DOCKER_HOST', ''), DOCKER_BASEURL='http://{}'.format(environ.get('DOCKER_HOST', '')), REDIS_HOST=environ.get('REDIS_HOST'), REDIS_PORT=int(environ.get('REDIS_PORT', '6379')), ZOOKEEPER_CONNECTION_STR=environ.get('ZOOKEEPER_CONNECTION_STR', 'zoo1,zoo2,zoo3'), ) if test_config is None: # load the instance config if it exists, when not testing app.config.from_pyfile(path.join(app.instance_path, 'config.py'), silent=True) else: app.config.from_mapping(test_config) if app.config.get('STORAGE_ID') == -1: raise Exception('No storage id was provided. ' 'STORAGE_ID environment variable cannot be omitted') if app.config.get('BASEURL') == '': raise Exception('No service base url was provided. ' 'BASEURL environment variable cannot be omitted') if app.config.get('DOCKER_HOST') == '': raise Exception('No network host within docker was provided. ' 'DOCKER_HOST environment variable cannot be omitted') if app.config.get('REDIS_HOST') is None: raise Exception('No redis host was provided. ' 'REDIS_HOST environment variable cannot be omitted') # Make sure the upload folder exists os.makedirs(app.config.get('UPLOAD_FOLDER', '/'), exist_ok=True) # INIT global redis redis = Redis(host=app.config.get('REDIS_HOST'), port=app.config.get('REDIS_PORT'), db=0) global stats stats = Stats(redis) znode_data = { 'BASEURL': app.config['BASEURL'], 'DOCKER_HOST': app.config['DOCKER_HOST'], 'DOCKER_BASEURL': app.config['DOCKER_BASEURL'] } global zk zk = StorageZoo(KazooClient(hosts=app.config['ZOOKEEPER_CONNECTION_STR'], connection_retry=KazooRetry(max_tries=-1), logger=app.logger), app.config['STORAGE_ID'], znode_data) zk.wait_for_znode('/app') app_info = zk.get_znode_data('/app') if app_info is None: raise Exception('Could get retrieve app info from zookeeper') app.config['APP_PUBLIC_KEY'] = app_info['PUBLIC_KEY'] app.config['APP_TOKEN_ISSUER'] = app_info['TOKEN_ISSUER'] from storage import service app.register_blueprint(service.bp) return app
def create_app(test_config=None): # create the app configuration app = Flask(__name__, instance_path=environ.get('FLASK_APP_INSTANCE', '/user/src/app/instance')) # Itial config stage app.config.from_mapping( AUTH_LEEWAY=timedelta(seconds=int(environ.get( 'AUTH_LEEWAY', '30'))), # leeway in seconds POSTGRES_HOST=environ.get('POSTGRES_HOST', ''), POSTGRES_USER=environ.get('POSTGRES_USER', ''), POSTGRES_DATABASE=environ.get('POSTGRES_DATABASE', environ.get('POSTGRES_USER', '')), POSTGRES_PASSWORD=environ.get('POSTGRES_PASSWORD', ''), SQLALCHEMY_TRACK_MODIFICATIONS=False, BASEURL=environ.get('BASEURL', ''), DOCKER_HOST=environ.get('DOCKER_HOST', ''), DOCKER_BASEURL='http://{}'.format(environ.get('DOCKER_HOST', '')), TOKEN_ISSUER=environ.get('TOKEN_ISSUER', environ.get('BASEURL', 'app-logic')), ZOOKEEPER_CONNECTION_STR=environ.get('ZOOKEEPER_CONNECTION_STR', 'zoo1,zoo2,zoo3'), AUTH_CONFIG_FROM_ZOO=bool( strtobool(environ.get('AUTH_CONFIG_FROM_ZOO', 'False')))) # 'postgresql+psycopg2://username:password@host/databse' app.config[ 'SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://{}:{}@{}/{}'.format( app.config['POSTGRES_USER'], app.config['POSTGRES_PASSWORD'], app.config['POSTGRES_HOST'], app.config['POSTGRES_DATABASE']) if test_config is None: # load the instance config if it exists, when not testing app.config.from_pyfile(path.join(app.instance_path, 'config.py'), silent=True) else: app.config.from_mapping(test_config) if app.config.get('POSTGRES_HOST') == '': raise Exception('No postgres database host was provided. ' 'POSTGRES_HOST environment variable cannot be omitted') if app.config.get('POSTGRES_USER') == '': raise Exception('No postgres database user was provided. ' 'POSTGRES_USER environment variable cannot be omitted') if app.config.get('POSTGRES_PASSWORD') == '': raise Exception( 'No postgres database user password was provided. ' 'POSTGRES_PASSWORD environment variable cannot be omitted') if app.config.get('BASEURL') == '': raise Exception('No service base url was provided. ' 'BASEURL environment variable cannot be omitted') if app.config.get('DOCKER_HOST') == '': raise Exception('No network host within docker was provided. ' 'DOCKER_HOST environment variable cannot be omitted') global sm sm = StorageManager(app.logger, app.config['TOKEN_ISSUER'], app.config['PRIVATE_KEY'].decode('utf-8')) # Zookeeper init and connection stage znode_data = { 'TOKEN_ISSUER': app.config['TOKEN_ISSUER'], 'BASEURL': app.config['BASEURL'], 'DOCKER_HOST': app.config['DOCKER_HOST'], 'DOCKER_BASEURL': app.config['DOCKER_BASEURL'], 'PUBLIC_KEY': app.config['PUBLIC_KEY'].decode('utf-8') } global zk zk = AppZoo( KazooClient(app.config['ZOOKEEPER_CONNECTION_STR'], connection_retry=KazooRetry(max_tries=-1), logger=app.logger), znode_data, sm) # Get auth info before first request app.before_first_request(get_auth_info) db.init_app(app) mi.init_app(app, db, directory=environ.get('FLASK_APP_MIGRATIONS', 'migrations')) ma.init_app(app) # for some reason when not in development # this call fails /shrug if app.env == 'development': from app import models models.init_db(app) from app import service app.register_blueprint(service.bp) return app
def main(): """ Updates a composite index after prompting the user. """ parser = argparse.ArgumentParser(description='Updates composite indexes') parser.add_argument('--type', '-t', default='cassandra', help='The datastore backend type') parser.add_argument('--app_id', '-a', required=True, help='The project ID') parser.add_argument('--all', action='store_true', help='Updates all composite indexes') args = parser.parse_args() datastore_batch = appscale_datastore_batch.DatastoreFactory.\ getDatastore(args.type) zookeeper_locations = appscale_info.get_zk_locations_string() retry_policy = KazooRetry(max_tries=5) zk_client = KazooClient( zookeeper_locations, connection_retry=ZK_PERSISTENT_RECONNECTS, command_retry=retry_policy) zk_client.start() zookeeper = zk.ZKTransaction(zk_client) transaction_manager = TransactionManager(zookeeper.handle) datastore_access = DatastoreDistributed( datastore_batch, transaction_manager, zookeeper=zookeeper) index_manager = IndexManager(zookeeper.handle, datastore_access) datastore_access.index_manager = index_manager indices = index_manager.projects[args.app_id].indexes_pb if len(indices) == 0: print('No composite indices found for app {}'.format(args.app_id)) zk_client.stop() zk_client.close() return update_composite_index_sync = tornado_synchronous( datastore_access.update_composite_index) if args.all: for index in indices: update_composite_index_sync(args.app_id, index) print('Successfully updated all composite indexes') return selection = -1 selection_range = range(1, len(indices) + 1) while selection not in selection_range: for number, index in enumerate(indices, start=1): pretty_index = prettify_index(index.definition()) print('{}) {}'.format(number, pretty_index)) try: selection = int(raw_input('Select the index you want to update. (1-{}) ' .format(len(indices)))) except KeyboardInterrupt: zk_client.stop() zk_client.close() sys.exit() selected_index = indices[selection - 1] update_composite_index_sync(args.app_id, selected_index) zk_client.stop() zk_client.close() print('Index successfully updated')
to_write += check[:4] + ' running:' + str(found) + '\n' f.write(to_write) f.write('----------------------------------------------\n') out = '' err = '' # Issue Reads on all the nodes in the cluster and check its value for server_index in range(1, 4): returned = None zk = None connect_string = host_list[server_index - 1] + ':' + str( port_list[server_index - 1]) kz_retry = KazooRetry(max_tries=1, delay=0.25, backoff=2) zk = KazooClient(hosts=connect_string, connection_retry=kz_retry, command_retry=kz_retry, timeout=1) try: zk.start() returned, stat = zk.get("/zk_test") zk.stop() returned = returned.strip().replace('\n', '') out += 'Successful get at server ' + str( server_index - 1) + ' Proper:' + str(returned == present_value) + '\n' except Exception as e: err += 'Could not get at server ' + str(server_index - 1) + '\t:' + str(e) + '\n'
parser.add_argument( '--es-hosts', dest='es_hosts', default=['es'], nargs='+', metavar='HOST', help='Elasticsearch list of hosts [localhost:[port]] (default: [es])') return parser.parse_args() if __name__ == '__main__': args = _init_args_parser() es = Elasticsearch(args.es_hosts) kz = KazooClient(','.join(args.zk_hosts), connection_retry=KazooRetry(max_tries=-1), command_retry=KazooRetry(max_tries=-1), timeout=30.0) kz.start() queue = LockingQueue(kz, '/job') data = es.search(index='nuvla-job', body={ "query": { "bool": { "should": [{ "term": { "state": "QUEUED" } }, {