def ns_conf(self): """ Get the configuration of the local namespace ("sds.conf"). """ if self._ns_conf is None: self._ns_conf = load_namespace_conf(self.ns) self._ns_conf_backup = dict(self._ns_conf) return self._ns_conf
def __init__(self, conf, request_prefix="", no_ns_in_url=False, endpoint=None, request_attempts=REQUEST_ATTEMPTS, logger=None, **kwargs): """ :param request_prefix: text to insert in between endpoint and requested URL :type request_prefix: `str` :param no_ns_in_url: do not insert namespace name between endpoint and `request_prefix` :type no_ns_in_url: `bool` :param request_attempts: number of attempts for the request in case of error 503 (defaults to 1) :raise oio.common.exceptions.ServiceBusy: if all attempts fail """ assert request_attempts > 0 validate_service_conf(conf) self.ns = conf.get('namespace') self.conf = conf self.logger = logger or get_logger(conf) # Look for an endpoint in the application configuration if not endpoint: endpoint = self.conf.get('proxyd_url', None) # Look for an endpoint in the namespace configuration if not endpoint: ns_conf = load_namespace_conf(self.ns) endpoint = ns_conf.get('proxy') # Historically, the endpoint did not contain any scheme self.proxy_scheme = 'http' split_endpoint = endpoint.split('://', 1) if len(split_endpoint) > 1: self.proxy_scheme = split_endpoint[0] self.proxy_netloc = split_endpoint[-1] ep_parts = list() ep_parts.append(self.proxy_scheme + ':/') ep_parts.append(self.proxy_netloc) ep_parts.append("v3.0") if not no_ns_in_url: ep_parts.append(self.ns) if request_prefix: ep_parts.append(request_prefix.lstrip('/')) self._request_attempts = request_attempts super(ProxyClient, self).__init__(endpoint='/'.join(ep_parts), service_type='proxy', **kwargs)
def main(): from optparse import OptionParser as OptionParser parser = OptionParser() parser.add_option( '-v', '--verbose', action="store_true", dest="flag_verbose", help='Triggers debugging traces') parser.add_option( '-s', '--smart', action="store_true", dest="SMART", default=False, help="Delete onle the members belong to services with multiple" \ " members") parser.add_option( '-d', '--dry-run', action="store_true", dest="DRY", default=False, help="Do not delete, just print") parser.add_option( '-n', '--min-services', type=int, action="store", dest="NUM", default=4, help="Do not delete election if less the NUM") parser.add_option( '-1', '--alone', action="store_true", dest="ALONE", default=False, help="Also consider members alone in their group") (options, args) = parser.parse_args(sys.argv) # Logging configuration if options.flag_verbose: logging.basicConfig( format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.DEBUG) else: logging.basicConfig( format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.INFO) if len(args) < 2: raise ValueError("not enough CLI arguments: NS TYPE [TYPE...]") ns = args[1] cnxstr = load_namespace_conf(ns)['zookeeper'] zookeeper.set_debug_level(zookeeper.LOG_LEVEL_INFO) zh = zookeeper.init(cnxstr) for srvtype in args[2:]: for group in namespace_tree(ns, srvtype): logging.debug(">DIR %s", group) for node in list_problematic_nodes(zh, group, options): delete_node(zh, node, options) zookeeper.close(zh)
def take_action(self, parsed_args): from oio.common.configuration import load_namespace_conf self.log.debug('take_action(%s)', parsed_args) namespace = self.app.client_manager.cluster.conf['namespace'] sds_conf = load_namespace_conf(namespace) output = list() for k in sds_conf: output.append(("%s/%s" % (namespace, k), sds_conf[k])) return list(zip(*output))
def main(): parser = argparse.ArgumentParser(description="ZK bootstrap utility") parser.add_argument("ns", metavar='<NAMESPACE>', type=str, help="set the namespace") parser.add_argument('-v', '--verbose', action="store_true", dest="flag_verbose", default=False, help='Triggers debugging traces') parser.add_argument('--lazy', action="store_true", dest="flag_lazy", default=False, help='Quickly check if things seem OK.') parser.add_argument( '--slow', action="store_true", dest="flag_slow", default=False, help='Send small batches to avoid timeouts on slow hosts.') parser.add_argument('--avoid', action="append", dest="AVOID_TYPES", help='Avoid entries for the specified service types') args = parser.parse_args() # Logging configuration if args.flag_verbose: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.DEBUG) else: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.INFO) batch_size = 2048 if args.flag_slow: batch_size = 8 cnxstr = load_namespace_conf(args.ns)['zookeeper'] for zh in get_connected_handles(cnxstr): try: create_namespace_tree(zh.get(), args.ns, batch_size=batch_size, types_to_avoid=args.AVOID_TYPES, precheck=args.flag_lazy) finally: zh.close()
def main(): from optparse import OptionParser as OptionParser parser = OptionParser() parser.add_option('-v', '--verbose', action="store_true", dest="flag_verbose", help='Triggers debugging traces') parser.add_option( '-c', '--min-children', type=int, action="store", dest="CHILDREN", default=15, help="Do not print the children number if less than that value") parser.add_option('-s', '--min-size', type=int, action="store", dest="SIZE", default=0, help="Do not print unless the size is over that value") (options, args) = parser.parse_args(sys.argv) # Logging configuration if options.flag_verbose: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.DEBUG) else: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.INFO) if len(args) < 2: raise ValueError("not enough CLI arguments: NS TYPE [TYPE...]") ns = args[1] cnxstr = load_namespace_conf(ns)['zookeeper'] zookeeper.set_debug_level(zookeeper.LOG_LEVEL_INFO) zh = zookeeper.init(cnxstr) for srvtype in args[2:]: for group in namespace_tree(ns, srvtype): for child, meta in list_nodes(zh, group, options): if meta['dataLength'] > options.SIZE: print("NODE", meta['dataLength'], child) zookeeper.close(zh)
def __init__(self, conf, pool_manager=None, request_prefix="", no_ns_in_url=False, endpoint=None, request_attempts=REQUEST_ATTEMPTS, logger=None, **kwargs): """ :param pool_manager: an optional pool manager that will be reused :type pool_manager: `urllib3.PoolManager` :param request_prefix: text to insert in between endpoint and requested URL :type request_prefix: `str` :param no_ns_in_url: do not insert namespace name between endpoint and `request_prefix` :type no_ns_in_url: `bool` :param request_attempts: number of attempts for the request in case of error 503 :raise oio.common.exceptions.ServiceBusy: if all attempts fail """ assert request_attempts > 0 validate_service_conf(conf) self.ns = conf.get('namespace') self.conf = conf self.logger = logger or get_logger(conf) if not endpoint: endpoint = self.conf.get('proxyd_url', None) ep_parts = list() if endpoint: self.proxy_netloc = endpoint.lstrip("http://") else: ns_conf = load_namespace_conf(self.ns) self.proxy_netloc = ns_conf.get('proxy') ep_parts.append("http:/") ep_parts.append(self.proxy_netloc) ep_parts.append("v3.0") if not no_ns_in_url: ep_parts.append(self.ns) if request_prefix: ep_parts.append(request_prefix.lstrip('/')) self._request_attempts = request_attempts super(ProxyClient, self).__init__(endpoint='/'.join(ep_parts), **kwargs)
def __init__(self, namespace, **kwargs): ep_parts = ["http:/", load_namespace_conf(namespace).get('proxy'), "v3.0", namespace, "content"] super(CheckMeta2, self).__init__(namespace, "meta2", endpoint="/".join(ep_parts), **kwargs) self.account = AccountClient({"namespace": self.ns}) self.container = ContainerClient({"namespace": self.ns}) self.directory = DirectoryClient({"namespace": self.ns}) self.reference = random_buffer('0123456789ABCDEF', 64)
def main(): parser = argparse.ArgumentParser(description="ZK cleanup utility") parser.add_argument('-v', '--verbose', action="store_true", dest="flag_verbose", help='Triggers debugging traces') parser.add_argument('-a', '--all', action="store_true", dest="flag_all", help='Remove all oio-sds nodes (not only meta0)') parser.add_argument('-x', '--expunge', action="store_true", dest="flag_expunge", help='Remove all NS') parser.add_argument( "ns", metavar='<NAMESPACE>', help="set the namespace, used at least to locate the ZK") args = parser.parse_args() # Logging configuration if args.flag_verbose: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.DEBUG) else: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.INFO) cnxstr = load_namespace_conf(args.ns)['zookeeper'] for zh in get_connected_handles(cnxstr): try: if args.flag_all: logging.warn("FLUSHING all the oio-sds entries in the ZK") delete_children(zh.get(), args.ns, ("srv", "el")) elif args.flag_expunge: logging.info("EXPUNGING all the namespaces in ZK") expunge_any_ns(zh.get()) else: logging.info("Cleaning only the meta0 registrations in ZK") delete_children(zh.get(), args.ns, ("srv", )) except Exception as ex: logging.exception("!!! %s", ex) finally: zh.close()
def main(): from optparse import OptionParser as OptionParser parser = OptionParser() parser.add_option('-v', '--verbose', action="store_true", dest="flag_verbose", help='Triggers debugging traces') parser.add_option('-d', '--data', action="store", dest="VALUE", default='', help="Data to force") (options, args) = parser.parse_args(sys.argv) # Logging configuration if options.flag_verbose: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.DEBUG) else: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.INFO) if len(args) < 2: raise ValueError("not enough CLI arguments: NS TYPE [TYPE...]") ns = args[1] cnxstr = load_namespace_conf(ns)['zookeeper'] zookeeper.set_debug_level(zookeeper.LOG_LEVEL_INFO) zh = zookeeper.init(cnxstr) for srvtype in args[2:]: for group in namespace_tree(ns, srvtype): logging.debug(">DIR %s", group) for mom, key, first, last in list_groups(zh, group, options): tail = str(1 + int(last)).rjust(10, '0') path = mom + '/' + key + '-' + tail create_node(zh, options, path) logging.debug("Please send a signal to remove the ephemeral nodes") logging.warn("PAUSED ...") signal.pause() zookeeper.close(zh)
def main(): usage = "usage: %prog [options] NS" from optparse import OptionParser as OptionParser parser = OptionParser(usage=usage) parser.add_option('-v', '--verbose', action="store_true", dest="flag_verbose", help='Triggers debugging traces') parser.add_option('-a', '--all', action="store_true", dest="flag_all", help='Remove all oio-sds nodes (not only meta0)') (options, args) = parser.parse_args(sys.argv) # Logging configuration if options.flag_verbose: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.DEBUG) else: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.INFO) if len(args) < 2: raise ValueError("not enough CLI arguments") ns = args[1] cnxstr = load_namespace_conf(ns)['zookeeper'] zookeeper.set_debug_level(zookeeper.LOG_LEVEL_INFO) for shard in cnxstr.split(";"): logging.info("ZK=%s", shard) zh = zookeeper.init(shard) if options.flag_all: logging.warn("FLUSHING all the oio-sds entries in the ZK server") delete_children(zh, "/hc") else: logging.info("Cleaning only the meta0 registrations in ZK server") delete_children(zh, "/hc/ns/" + ns + "/srv/meta0") zookeeper.close(zh)
def create_app(conf, **kwargs): logger = get_logger(conf) iam_conn = conf.get('iam.connection') if not iam_conn: ns_conf = load_namespace_conf(conf['namespace'], failsafe=True) iam_conn = ns_conf.get('iam.connection', DEFAULT_IAM_CONNECTION) if iam_conn == DEFAULT_IAM_CONNECTION: logger.warning( 'Using the default connection (%s) is probably ' 'not what you want to do.', DEFAULT_IAM_CONNECTION) scheme, netloc, iam_kwargs = parse_conn_str(iam_conn) if scheme == 'redis+sentinel': iam_kwargs['sentinel_hosts'] = netloc else: iam_kwargs['host'] = netloc backend = AccountBackend(conf) iam_db = RedisIamDb(logger=logger, **iam_kwargs) app = Account(conf, backend, iam_db, logger=logger) return app
def main(): from optparse import OptionParser as OptionParser parser = OptionParser() parser.add_option( '-v', '--verbose', action="store_true", dest="flag_verbose", help='Triggers debugging traces') (options, args) = parser.parse_args(sys.argv) # Logging configuration if options.flag_verbose: logging.basicConfig( format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.DEBUG) else: logging.basicConfig( format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.INFO) if len(args) < 2: raise ValueError("not enough CLI arguments: NS SRVTYPE [SRVTYPE...]") ns = args[1] cnxstr = load_namespace_conf(ns)['zookeeper'] zookeeper.set_debug_level(zookeeper.LOG_LEVEL_INFO) zh = zookeeper.init(cnxstr) for srvtype in args[2:]: for group in namespace_tree(ns, srvtype): children = list(list_nodes(zh, group, options)) if len(children) > 0: logging.info("> %s", group) for k in children: data, meta = zookeeper.get(zh, group + '/' + k) logging.info(" %s : %s", k, data) zookeeper.close(zh)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) digits = self.app.client_manager.get_meta1_digits() workers_count = parsed_args.workers conf = {'namespace': self.app.client_manager.namespace} if parsed_args.proxy: conf.update({'proxyd_url': parsed_args.proxy}) else: ns_conf = load_namespace_conf(conf['namespace']) proxy = ns_conf.get('proxy') conf.update({'proxyd_url': proxy}) workers = list() with green.ContextPool(workers_count) as pool: pile = GreenPile(pool) prefix_queue = Queue(16) # Prepare some workers for i in range(workers_count): w = WarmupWorker(conf, self.log) workers.append(w) pile.spawn(w.run, prefix_queue) # Feed the queue trace_increment = 0.01 trace_next = trace_increment sent, total = 0, float(count_prefixes(digits)) for prefix in generate_prefixes(digits): sent += 1 prefix_queue.put(prefix) # Display the progression ratio = float(sent) / total if ratio >= trace_next: self.log.info("... %d%%", int(ratio * 100.0)) trace_next += trace_increment self.log.debug("Send the termination marker") prefix_queue.join() self.log.info("All the workers are done")
def __init__(self, conf, **kwargs): self.ns_conf = load_namespace_conf(conf["namespace"]) self.queue_url = self.ns_conf['event-agent'] self._beanstalk = None
def __init__(self, conf, logger, **kwargs): super(Meta1Rebuilder, self).__init__(conf, logger, None, **kwargs) self.conscience = ConscienceClient(self.conf, logger=self.logger) sds_conf = load_namespace_conf(self.conf['namespace']) or {} self.meta1_digits = int( sds_conf.get('ns.meta1_digits', sds_conf.get('meta1_digits', 4)))
def sds_conf(self): if not self._sds_conf: from oio.common.configuration import load_namespace_conf self._sds_conf = load_namespace_conf(self.namespace) or {} return self._sds_conf
def sds_conf(self): """Dict holding what's in local configuration files.""" if not self._sds_conf: from oio.common.configuration import load_namespace_conf self._sds_conf = load_namespace_conf(self.namespace, failsafe=True) return self._sds_conf
def main(): from optparse import OptionParser as OptionParser parser = OptionParser() parser.add_option('-v', '--verbose', action="store_true", dest="flag_verbose", default=False, help='Triggers debugging traces') parser.add_option('--lazy', action="store_true", dest="LAZY", default=False, help='Quickly check if things seem OK.') parser.add_option( '--slow', action="store_true", dest="SLOW", default=False, help='Send small batches to avoid timeouts on slow hosts.') parser.add_option('--avoid', action="append", type="string", dest="AVOID_TYPES", help='Avoid entries for the specified service types') (options, args) = parser.parse_args(sys.argv) # Logging configuration if options.flag_verbose: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.DEBUG) else: logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.INFO) if len(args) < 2: raise ValueError("not enough CLI arguments") ns = args[1] cnxstr = load_namespace_conf(ns)['zookeeper'] zookeeper.set_debug_level(zookeeper.LOG_LEVEL_INFO) for shard in cnxstr.split(";"): logging.info("ZK=%s", shard) zh = zookeeper.init(shard) # synchronous creation of the root try: zookeeper.create(zh, PREFIX, '', acl_openbar, 0) except zookeeper.NodeExistsException: pass missing = True if options.LAZY: _m = False for t, _, _ in SRVTYPES: try: _, _ = zookeeper.get(zh, PREFIX_NS + '/' + ns + '/el/' + t) except Exception: _m = True missing = _m if missing: create_tree(zh, namespace_tree(ns, options), options) zookeeper.close(zh)