示例#1
0
def recover(ctx):
    ret = True
    stats = ctx.monitor.stats['recover']

    stats.timer('recover', 'started')

    it = iterate_key(ctx.merged_filename, ctx.groups)

    node = elliptics_create_node(address=ctx.address,
                                 elog=ctx.elog,
                                 wait_timeout=ctx.wait_timeout,
                                 net_thread_num=4,
                                 io_thread_num=1,
                                 remotes=ctx.remotes)

    for batch_id, batch in groupby(enumerate(it),
                                   key=lambda x: x[0] / ctx.batch_size):
        recovers = []
        rs = RecoverStat()
        for _, val in batch:
            rec = KeyRecover(ctx, *val, node=node)
            recovers.append(rec)
        for r in recovers:
            r.wait()
            ret &= r.succeeded()
            rs += r.stats
        rs.apply(stats)
    stats.timer('recover', 'finished')
    return ret
示例#2
0
def recover(ctx):
    ret = True
    stats = ctx.monitor.stats['recover']

    stats.timer('recover', 'started')

    it = iterate_key(ctx.merged_filename, ctx.groups)

    node = elliptics_create_node(address=ctx.address,
                                 elog=ctx.elog,
                                 wait_timeout=ctx.wait_timeout,
                                 net_thread_num=4,
                                 io_thread_num=1,
                                 remotes=ctx.remotes)

    for batch_id, batch in groupby(enumerate(it),
                                   key=lambda x: x[0] / ctx.batch_size):
        recovers = []
        rs = RecoverStat()
        for _, val in batch:
            rec = KeyRecover(ctx, *val, node=node)
            recovers.append(rec)
        for r in recovers:
            r.wait()
            ret &= r.succeeded()
            rs += r.stats
        rs.apply(stats)
    stats.timer('recover', 'finished')
    return ret
示例#3
0
def recover(ctx):
    from itertools import islice
    import time
    ret = True
    stats = ctx.stats['recover']

    stats.timer('recover', 'started')

    it = iterate_key(ctx.merged_filename, ctx.groups)

    elog = elliptics.Logger(ctx.log_file, int(ctx.log_level))
    node = elliptics_create_node(address=ctx.address,
                                 elog=elog,
                                 wait_timeout=ctx.wait_timeout,
                                 net_thread_num=4,
                                 io_thread_num=1,
                                 remotes=ctx.remotes)
    processed_keys = 0
    start = time.time()
    while 1:
        batch = tuple(islice(it, ctx.batch_size))
        if not batch:
            break
        recovers = []
        rs = RecoverStat()
        for val in batch:
            rec = KeyRecover(ctx, *val, node=node)
            recovers.append(rec)
        successes, failures = 0, 0
        for r in recovers:
            r.wait()
            ret &= r.succeeded()
            rs += r.stats
            if r.succeeded():
                successes += 1
            else:
                failures += 1
        processed_keys += successes + failures
        rs.apply(stats)
        stats.counter('recovered_keys', successes)
        ctx.stats.counter('recovered_keys', successes)
        stats.counter('recovered_keys', -failures)
        ctx.stats.counter('recovered_keys', -failures)
        stats.set_counter('recovery_speed',
                          processed_keys / (time.time() - start))
    stats.timer('recover', 'finished')
    return ret
示例#4
0
def recover(ctx):
    from itertools import islice
    import time
    ret = True
    stats = ctx.stats['recover']

    stats.timer('recover', 'started')

    it = iterate_key(ctx.merged_filename, ctx.groups)

    elog = elliptics.Logger(ctx.log_file, int(ctx.log_level))
    node = elliptics_create_node(address=ctx.address,
                                 elog=elog,
                                 wait_timeout=ctx.wait_timeout,
                                 net_thread_num=4,
                                 io_thread_num=1,
                                 remotes=ctx.remotes)
    processed_keys = 0
    start = time.time()
    while 1:
        batch = tuple(islice(it, ctx.batch_size))
        if not batch:
            break
        recovers = []
        rs = RecoverStat()
        for val in batch:
            rec = KeyRecover(ctx, *val, node=node)
            recovers.append(rec)
        successes, failures = 0, 0
        for r in recovers:
            r.wait()
            ret &= r.succeeded()
            rs += r.stats
            if r.succeeded():
                successes += 1
            else:
                failures += 1
        processed_keys += successes + failures
        rs.apply(stats)
        stats.counter('recovered_keys', successes)
        ctx.stats.counter('recovered_keys', successes)
        stats.counter('recovered_keys', -failures)
        ctx.stats.counter('recovered_keys', -failures)
        stats.set_counter('recovery_speed', processed_keys / (time.time() - start))
    stats.timer('recover', 'finished')
    return ret