def get_routes(ctx): log.debug('Requesting routes') log.debug("Creating logger") elog = elliptics.Logger(ctx.log_file, int(ctx.log_level)) log.debug("Creating node") node = elliptics_create_node(address=ctx.address, elog=elog, wait_timeout=ctx.wait_timeout, remotes=ctx.remotes) log.debug("Creating session for: {0}".format(ctx.address)) session = elliptics_create_session(node=node, group=0, trace_id=ctx.trace_id) log.debug("Parsing routing table") return RouteList.from_session(session)
log.debug("Setting up elliptics client") log.debug("Creating logger") elog = elliptics.Logger(ctx.log_file, int(ctx.log_level)) log.debug("Creating node") node = elliptics_create_node(address=ctx.address, elog=elog, wait_timeout=ctx.wait_timeout, remotes=ctx.remotes) log.debug("Creating session for: {0}".format(ctx.address)) session = elliptics_create_session(node=node, group=0) log.debug("Parsing routing table") ctx.routes = RouteList.from_session(session) log.debug("Parsed routing table:\n{0}".format(ctx.routes)) if not ctx.routes: raise RuntimeError("No routes was parsed from session") log.debug("Total routes: {0}".format(len(ctx.routes))) if len(ctx.groups) == 0: ctx.groups = ctx.routes.groups() log.info("No groups specified: using all available groups: {0}".format( ctx.groups)) try: log.info("Creating pool of processes: %d", ctx.nprocess) ctx.pool = Pool(processes=ctx.nprocess, initializer=worker_init) if recovery_type == TYPE_MERGE: if ctx.dump_file:
log.debug("Setting up elliptics client") log.debug("Creating logger") elog = elliptics.Logger(ctx.log_file, int(ctx.log_level)) log.debug("Creating node") node = elliptics_create_node(address=ctx.address, elog=elog, wait_timeout=ctx.wait_timeout, remotes=ctx.remotes) log.debug("Creating session for: {0}".format(ctx.address)) session = elliptics_create_session(node=node, group=0) log.debug("Parsing routing table") ctx.routes = RouteList.from_session(session) log.debug("Parsed routing table:\n{0}".format(ctx.routes)) if not ctx.routes: raise RuntimeError("No routes was parsed from session") log.debug("Total routes: {0}".format(len(ctx.routes))) if len(ctx.groups) == 0: ctx.groups = ctx.routes.groups() log.info("No groups specified: using all available groups: {0}".format(ctx.groups)) try: log.info("Creating pool of processes: %d", ctx.nprocess) ctx.pool = Pool(processes=ctx.nprocess, initializer=worker_init) if recovery_type == TYPE_MERGE: if ctx.dump_file: from elliptics_recovery.types.merge import dump_main
def recovery(one_node, remotes, backend_id, address, groups, session, rtype, log_file, tmp_dir): ''' Imports dnet_recovery tools and executes merge recovery. Checks result of merge. ''' from elliptics_recovery.ctx import Ctx from elliptics_recovery.route import RouteList from elliptics_recovery.monitor import Monitor from elliptics_recovery.etime import Time import os if rtype == RECOVERY.MERGE: from elliptics_recovery.types.merge import main elif rtype == RECOVERY.DC: from elliptics_recovery.types.dc import main else: assert 0 ctx = Ctx() cur_dir = os.getcwd() ctx.tmp_dir = os.path.join(cur_dir, tmp_dir) try: os.makedirs(ctx.tmp_dir, 0755) except: pass ctx.log_file = os.path.join(ctx.tmp_dir, 'recovery.log') import logging import logging.handlers log = logging.getLogger() log.setLevel(logging.DEBUG) formatter = logging.Formatter(fmt='%(asctime)-15s %(thread)d/%(process)d %(processName)s %(levelname)s %(message)s', datefmt='%d %b %y %H:%M:%S') ch = logging.FileHandler(ctx.log_file) ch.setFormatter(formatter) ch.setLevel(logging.DEBUG) log.addHandler(ch) ctx.dry_run = False ctx.safe = False ctx.one_node = one_node ctx.custom_recover = '' ctx.dump_file = None ctx.chunk_size = 1024 ctx.log_level = 4 ctx.remotes = remotes ctx.backend_id = backend_id ctx.address = address ctx.groups = groups ctx.batch_size = 100 ctx.nprocess = 3 ctx.attempts = 1 ctx.monitor_port = None ctx.wait_timeout = 36000 ctx.elog = elliptics.Logger(ctx.log_file, int(ctx.log_level)) ctx.routes = RouteList.from_session(session) ctx.monitor = Monitor(ctx, None) ctx.timestamp = Time.from_epoch(0) recovery_res = main(ctx) assert recovery_res ctx.monitor.shutdown()
def recovery(one_node, remotes, backend_id, address, groups, session, rtype, log_file, tmp_dir): ''' Imports dnet_recovery tools and executes merge recovery. Checks result of merge. ''' from elliptics_recovery.ctx import Ctx from elliptics_recovery.route import RouteList from elliptics_recovery.monitor import Monitor from elliptics_recovery.etime import Time import os if rtype == RECOVERY.MERGE: from elliptics_recovery.types.merge import main elif rtype == RECOVERY.DC: from elliptics_recovery.types.dc import main else: assert 0 ctx = Ctx() cur_dir = os.getcwd() ctx.tmp_dir = os.path.join(cur_dir, tmp_dir) try: os.makedirs(ctx.tmp_dir, 0755) except: pass ctx.log_file = os.path.join(ctx.tmp_dir, 'recovery.log') import logging import logging.handlers log = logging.getLogger() log.setLevel(logging.DEBUG) formatter = logging.Formatter( fmt= '%(asctime)-15s %(thread)d/%(process)d %(processName)s %(levelname)s %(message)s', datefmt='%d %b %y %H:%M:%S') ch = logging.FileHandler(ctx.log_file) ch.setFormatter(formatter) ch.setLevel(logging.DEBUG) log.addHandler(ch) ctx.dry_run = False ctx.safe = False ctx.one_node = one_node ctx.custom_recover = '' ctx.dump_file = None ctx.chunk_size = 1024 ctx.log_level = elliptics.log_level.debug ctx.remotes = remotes ctx.backend_id = backend_id ctx.address = address ctx.groups = groups ctx.batch_size = 100 ctx.nprocess = 3 ctx.attempts = 1 ctx.monitor_port = None ctx.wait_timeout = 36000 ctx.elog = elliptics.Logger(ctx.log_file, int(ctx.log_level)) ctx.routes = RouteList.from_session(session) ctx.monitor = Monitor(ctx, None) ctx.timestamp = Time.from_epoch(0) recovery_res = main(ctx) assert recovery_res ctx.monitor.shutdown()