예제 #1
0
def lookup_keys(ctx):
    log.info("Start looking up keys")
    stats = ctx.monitor.stats["lookup"]
    stats.timer('process', 'started')
    ctx.elog = elliptics.Logger(ctx.log_file, int(ctx.log_level))
    node = elliptics_create_node(address=ctx.address,
                                 elog=ctx.elog,
                                 wait_timeout=ctx.wait_timeout,
                                 net_thread_num=1,
                                 io_thread_num=1,
                                 remotes=ctx.remotes)
    session = elliptics.Session(node)
    filename = os.path.join(ctx.tmp_dir, 'merged_result')
    merged_f = open(filename, 'w')
    pickler = pickle.Pickler(merged_f)
    with open(ctx.dump_file, 'r') as dump:
        for str_id in dump:
            id = elliptics.Id(str_id)
            lookups = []
            for g in ctx.groups:
                session.groups = [g]
                lookups.append(session.read_data(id, size=1))
            key_infos = []

            for i, l in enumerate(lookups):
                try:
                    result = l.get()[0]
                    address = result.address
                    key_infos.append(KeyInfo(address,
                                             ctx.groups[i],
                                             result.timestamp,
                                             result.size,
                                             result.user_flags))
                except Exception, e:
                    log.error("Failed to lookup key: {0} in group: {1}: {2}, traceback: {3}"
                              .format(id, ctx.groups[i], repr(e), traceback.format_exc()))
                    stats.counter("lookups", -1)
            if len(key_infos) > 0:
                key_data = (id, key_infos)
                pickler.dump(key_data)
                stats.counter("lookups", len(key_infos))
            else:
                log.error("Key: {0} is missing in all specified groups: {1}. It won't be recovered."
                          .format(id, ctx.groups))
예제 #2
0
    def _update_group(self, group_id, new_nodes, new_couple, manual=False):
        group = self.state[group_id]
        if new_nodes:
            new_nodes_state = {'set': new_nodes,
                               'timestamp': time.time()}
            if manual:
                new_nodes_state['manual'] = True

            group['nodes'].append(new_nodes_state)

        if new_couple:
            new_couples_state = {'couple': new_couple,
                                 'timestamp': time.time()}
            group['couples'].append(new_couples_state)

        eid = elliptics.Id(keys.MM_ISTRUCT_GROUP % group_id)
        logger.info('Updating state for group %s' % group_id)
        self.meta_session.update_indexes(eid, [keys.MM_GROUPS_IDX],
                                              [self._serialize(group)])
예제 #3
0
def combine_logs(remotes, groups, min_write, keys, new_key):
    elog = elliptics.Logger("/dev/stderr", 0)
    cfg = elliptics.Config()
    cfg.config.wait_timeout = 60
    cfg.config.check_timeout = 60
    cfg.config.io_thread_num = 16
    cfg.config.nonblocking_io_thread_num = 16
    cfg.config.net_thread_num = 16

    node = elliptics.Node(elog, cfg)

    for r in remotes:
        try:
            node.add_remote(addr=r[0], port=r[1], family=r[2])
        except Exception as e:
            print "Coudn't connect to elliptics node: {0}: {1}".format(r, e)

    log_s = elliptics.Session(node)
    log_s.set_groups(groups)
    log_s.set_ioflags(elliptics.io_flags.append)

    index_s = elliptics.Session(node)
    index_s.set_groups(groups)
    index_s.set_ioflags(elliptics.io_flags.cache)

    users = Set()

    print "Keys: {0}".format(keys)

    for key in keys:
        try:
            users.update(process_key(key, log_s, index_s, new_key))
        except Exception as e:
            print "Process key failed: {0}".format(e)

    print "Users: {0}".format(users)

    for u in users:
        try:
            index_s.update_indexes(elliptics.Id(u), [new_key + ".0"], [u])
        except Exception as e:
            print "Update_indexes failed: {0}".format(e)
예제 #4
0
파일: merge.py 프로젝트: s-mx/elliptics
    def _remove_bad_keys(self, responses):
        '''
        Removes invalid keys with older timestamp or invalid checksum.
        '''
        bad_keys = []
        for key, responses in responses.iteritems():
            for response in responses:
                if self._check_bad_key(response):
                    bad_keys.append((key, ) + response)
                    status, address, backend_id = response
                    if status == -errno.EILSEQ:
                        self.ctx.corrupted_keys.write(
                            '{key} {group} {address}/{backend_id}\n'.format(
                                key=key,
                                group=self.group,
                                address=address,
                                backend_id=backend_id))

        for attempt in range(self.ctx.attempts):
            if not bad_keys:
                break

            results = []
            for key, _, addr, backend_id in bad_keys:
                self.remove_session.set_direct_id(addr, backend_id)
                result = self.remove_session.remove(elliptics.Id(key))
                results.append(result)

            timeouted_keys = []
            is_last_attempt = (attempt == self.ctx.attempts - 1)
            for i, r in enumerate(results):
                status = r.get()[0].status
                log.info(
                    "Removing key: {0}, status: {1}, last attempt: {2}".format(
                        bad_keys[i], status, is_last_attempt))
                if status:
                    self.stats_cmd.counter("remove.{0}".format(status), 1)
                    self.stats_cmd_groups.counter(
                        "remove.{0}.{1}".format(self.group, status), 1)
                if status == -errno.ETIMEDOUT:
                    timeouted_keys.append(bad_keys[i])
            bad_keys = timeouted_keys
예제 #5
0
파일: merge.py 프로젝트: zmyer/elliptics
def dump_process_group((ctx, group)):
    log.debug("Processing group: {0}".format(group))
    stats = ctx.stats['group_{0}'.format(group)]
    stats.timer('process', 'started')
    if group not in ctx.routes.groups():
        log.error("Group: {0} is not presented in route list".format(group))
        return False
    elog = elliptics.Logger(ctx.log_file, int(ctx.log_level))
    node = elliptics_create_node(address=ctx.address,
                                 elog=elog,
                                 wait_timeout=ctx.wait_timeout,
                                 net_thread_num=1,
                                 io_thread_num=1,
                                 remotes=ctx.remotes)
    ret = True
    with open(ctx.dump_file, 'r') as dump:
        ss_rec = ServerSendRecovery(ctx, node, group)
        # splits ids from dump file in batchs and recovers it
        for batch_id, batch in groupby(enumerate(dump),
                                       key=lambda x: x[0] / ctx.batch_size):
            recovers = []
            rs = RecoverStat()
            keys = [elliptics.Id(val) for _, val in batch]
            keys = ss_rec.recover(keys)
            for k in keys:
                rec = DumpRecover(routes=ctx.routes,
                                  node=node,
                                  id=k,
                                  group=group,
                                  ctx=ctx)
                recovers.append(rec)
                rec.run()
            for r in recovers:
                r.wait()
                ret &= r.succeeded()
                rs += r.stats
            rs.apply(stats)
    stats.timer('process', 'finished')
    return ret
예제 #6
0
    def _parallel_read(self, requests):
        results = {}
        for session, result_key, key in requests:
            if callable(session):
                session = session()

            # read_latest_async is for elliptics 2.24.14.15, in later versions
            # read_data returns AsyncResult object
            self.__logging.debug('Request to read {0} for groups {1}'.format(
                key.replace('\0', '\\0'), result_key))
            read = getattr(session, 'read_latest_async', session.read_data)

            try:
                results[result_key] = read(elliptics.Id(key))
            except Exception as e:
                self.__logging.error('Failed to read {0} for groups '
                                     '{1}: {2}, {3}'.format(
                                         key, result_key, str(e),
                                         traceback.format_exc()))
                pass

        return results
예제 #7
0
    def _check_server_send_results(self, iterator, key_infos_map, group_id):
        '''
        Check result of remote sending for every key.
        Returns lists of timeouted keys and corrupted keys.
        '''
        start_time = time.time()
        recovers_in_progress = len(key_infos_map)

        timeouted_keys = []
        corrupted_keys = []
        succeeded_keys = set()
        index = 0
        for index, result in enumerate(iterator, 1):
            status = result.status
            self._update_stats(start_time, index, recovers_in_progress, status)

            key = result.key
            if status < 0:
                key_infos = key_infos_map[str(key)]
                self._on_server_send_fail(status, key, key_infos,
                                          timeouted_keys, corrupted_keys,
                                          group_id)
                continue
            elif status == 0:
                succeeded_keys.add(str(key))
                log.debug("Recovered key: %s, status %d", result.key, status)

        if index < recovers_in_progress:
            log.error(
                "Server-send operation failed: group_id: %d, received results: %d, expected: %d, error: %s",
                group_id, index, recovers_in_progress, iterator.error())
            self._update_stats(start_time, index, recovers_in_progress,
                               iterator.error().code)
            timeouted_keys = [
                elliptics.Id(k) for k in key_infos_map.iterkeys()
                if k not in succeeded_keys
            ]

        return timeouted_keys, corrupted_keys
예제 #8
0
def process_key(key, log_s, index_s, new_key):
    res = index_s.find_any_indexes([key + str(x) for x in range(1000)])
    users = Set()
    for r in res:
        for ind, data in r.indexes:
            users.add(data)

    print "Users: {0}".format(users)

    async_reads = []

    for u in users:
        try:
            k = u + "." + key
            print "Read latest: {0}".format(k)
            async_reads.append((log_s.read_latest(k), u))
        except Exception as e:
            print "Read latest async failed: {0}".format(e)

    for r, u in async_reads:
        try:
            k = u + "." + new_key
            r.wait()
            result = r.get()[0]
            print "Write: {0}".format(k)
            io = elliptics.IoAttr()
            io.id = elliptics.Id(k)
            io.timestamp = elliptics.Time(0, 0)
            io.user_flags = 0
            write_result = log_s.write_data(io, result.data)
            write_result.wait()
            print "Write is {0}".format(write_result.successful())
        except Exception as e:
            print "Write data async failed: {0}".format(e)

    return users
예제 #9
0
    def _history_entry_update(self, state):
        try:
            uid = state['uid']
            logger.debug('Started updating minion history entry '
                         'for command {0}'.format(uid))
            update_history_entry = False
            try:
                eid = elliptics.Id(keys.MINION_HISTORY_ENTRY_KEY %
                                   uid.encode('utf-8'))
                # set to read_latest when it raises NotFoundError on non-existent keys
                r = self.meta_session.read_data(eid).get()[0]
                history_state = self._unserialize(r.data)
                self.history[uid] = history_state['progress']
                # updating if process is finished
                if int(self.history[uid]) != int(state['progress']):
                    update_history_entry = True
            except elliptics.NotFoundError:
                logger.debug('History state is not found')
                update_history_entry = True

            if update_history_entry:
                logger.info(
                    'Updating minion history entry for command {0}'.format(
                        uid))
                start = datetime.datetime.fromtimestamp(state['start_ts'])
                key = keys.MINION_HISTORY_KEY % start.strftime('%Y-%m')
                self.meta_session.update_indexes(eid, [key],
                                                 [self._serialize(state)])
                self.history[uid] = state['progress']
            else:
                logger.debug('Update for minion history entry '
                             'of command {0} is not required'.format(uid))
        except Exception as e:
            logger.error('Failed to update minion history entry for '
                         'uid {0}: {1}\n{2}'.format(uid, str(e),
                                                    traceback.format_exc()))
예제 #10
0
파일: test.py 프로젝트: zmyer/elliptics
    n.add_remotes("localhost:1025:2")

    s = elliptics.Session(n)

    s.add_groups([1, 2, 3])

    group = 1
    try:
        obj = "qwerty.xml"
        addr = s.lookup_address(obj, group)
        print "object", obj, "should live at", addr, "in group", group
    except Exception as e:
        print "Failed to lookup in group", group, ":", e

    id = elliptics.Id([1, 2, 3, 4], 1)

    # write data by ID into specified group (# 2)
    # read data from the same group (# 2)
    try:
        data = '1234567890qwertyuio'
        s.write_data(id, data, 0).wait()
        print "WRITE:", data
    except Exception as e:
        print "Failed to write data by id:", e

    try:
        res = s.read_data(id, 0, 0).get()[0]
        print " READ:", res.data
    except Exception as e:
        print "Failed to read data by id:", e
예제 #11
0
def range(key_begin, key_end):
    ret = elliptics.IteratorRange()
    ret.key_begin = elliptics.Id(key_begin, 0)
    ret.key_end = elliptics.Id(key_end, 0)
    return ret
예제 #12
0

if __name__ == '__main__':
    log = elliptics.Logger("/dev/stderr", 1)
    cfg = elliptics.Config()
    cfg.cookie = "0123456789012345678901234567890123456789"
    cfg.config.wait_timeout = 60

    n = elliptics.Node(log, cfg)
    n.add_remote("localhost", 1025)

    s = elliptics.Session(n)
    s.add_groups([2])

    ranges = [
        range(elliptics.Id([0] * 64, 0), elliptics.Id([100] + [255] * 63, 0)),
        range(elliptics.Id([200] + [0] * 63, 0),
              elliptics.Id([220] + [255] * 63, 0))
    ]

    eid = elliptics.Id([0] * 64, 2)
    iterator = s.start_iterator(eid, ranges, \
                                elliptics.iterator_types.network, \
                                elliptics.iterator_flags.key_range \
                                    | elliptics.iterator_flags.ts_range \
                                    | elliptics.iterator_flags.data, \
                                elliptics.Time(0, 0), \
                                elliptics.Time(2**64-1, 2**64-1))

    for i, result in enumerate(iterator):
        if result.status != 0:
예제 #13
0
def parse_args():
    from optparse import OptionParser
    ctx = Ctx()

    parser = OptionParser()
    parser.usage = "%prog type [options]"
    parser.description = __doc__
    parser.add_option("-g",
                      "--groups",
                      action="store",
                      dest="groups",
                      default=None,
                      help="Comma separated list of groups [default: all]")
    parser.add_option(
        "-l",
        "--log",
        dest="log",
        default='/dev/stderr',
        metavar="FILE",
        help="Output log messages from library to file [default: %default]")
    parser.add_option("-L",
                      "--log-level",
                      action="store",
                      dest="log_level",
                      default="1",
                      help="Elliptics client verbosity [default: %default]")
    parser.add_option("-r",
                      "--remote",
                      action="append",
                      dest="remote",
                      help="Elliptics node address [default: %default]")
    parser.add_option(
        "-d",
        "--data",
        action="store_true",
        dest="data",
        default=False,
        help="Requests object's data with other info [default: %default]")
    parser.add_option("-k",
                      "--key-begin",
                      action="store",
                      dest="key_begin",
                      default=None,
                      help="Begin key of range for iterating")
    parser.add_option("-K",
                      "--key-end",
                      action="store",
                      dest="key_end",
                      default=None,
                      help="End key of range for iterating")
    parser.add_option("-t",
                      "--time-begin",
                      action="store",
                      dest="time_begin",
                      default=None,
                      help="Begin timestamp of time range for iterating")
    parser.add_option("-T",
                      "--time-end",
                      action="store",
                      dest="time_end",
                      default=None,
                      help="End timestamp of time range for iterating")
    parser.add_option(
        "-M",
        "--no-meta",
        action="store_true",
        dest="no_meta",
        default=False,
        help=
        "Run iterator without metadata (timestamp and user flags). This option conflicts with --data and --time-*, if one of them is specified, --no-meta will have no effect."
    )
    parser.add_option(
        "-A",
        "--addr",
        action="store",
        dest="route_addr",
        default=None,
        help=
        "Address to lookup in route file. This address will be used to determine iterator ranges - ranges which DO NOT belong to selected node."
    )
    parser.add_option(
        "-R",
        "--route-file",
        action="store",
        dest="route_file",
        default=None,
        help=
        "Route file contains 'dnet_balance' tool's output - route table dump, which will be parsed to find out ranges which DO NOT belong to selected address."
    )

    (options, args) = parser.parse_args()

    if len(args) > 1:
        raise ValueError("Too many arguments passed: {0}, expected: 1".format(
            len(args)))
    elif len(args) == 0:
        raise ValueError(
            "Please specify one of following modes: {0}".format(ALLOWED_MODES))

    if args[0].lower() not in ALLOWED_MODES:
        raise ValueError("Unknown mode: '{0}', allowed: {1}".format(
            args[0], ALLOWED_MODES))
    ctx.iterate_mode = args[0].lower()

    try:
        if options.groups:
            ctx.groups = map(int, options.groups.split(','))
        else:
            ctx.groups = []
    except Exception as e:
        raise ValueError("Can't parse grouplist: '{0}': {1}".format(
            options.groups, repr(e)))
    print("Using group list: {0}".format(ctx.groups))

    try:
        ctx.log_file = options.log
        ctx.log_level = int(options.log_level)
    except Exception as e:
        raise ValueError("Can't parse log_level: '{0}': {1}".format(
            options.log_level, repr(e)))
    print("Using elliptics client log level: {0}".format(ctx.log_level))

    if not options.remote:
        raise ValueError(
            "Please specify at least one remote address (-r option)")
    try:
        ctx.remotes = []
        for r in options.remote:
            ctx.remotes.append(elliptics.Address.from_host_port_family(r))
            print("Using remote host:port:family: {0}".format(ctx.remotes[-1]))
    except Exception as e:
        raise ValueError("Can't parse host:port:family: '{0}': {1}".format(
            options.remote, repr(e)))

    ctx.no_meta = options.no_meta

    try:
        if options.time_begin:
            ctx.time_begin = Time.from_epoch(options.time_begin)
        else:
            ctx.time_begin = None
    except Exception as e:
        raise ValueError("Can't parse timestamp: '{0}': {1}".format(
            options.timestamp, repr(e)))
    print("Using time_begin: {0}".format(ctx.time_begin))

    try:
        if options.time_end:
            ctx.time_end = Time.from_epoch(options.time_end)
        else:
            ctx.time_end = None
    except Exception as e:
        raise ValueError("Can't parse timestamp: '{0}': {1}".format(
            options.timestamp, repr(e)))
    print("Using time_end: {0}".format(ctx.time_end))

    ctx.data = options.data

    key_range = elliptics.IteratorRange()

    try:
        if options.key_begin == '-1':
            key_range.key_begin = elliptics.Id([255] * 64, 0)
        elif options.key_begin:
            key_range.key_begin = elliptics.Id(transf(options.key_begin), 0)
    except Exception as e:
        raise ValueError("Can't parse key_begin: '{0}': {1}".format(
            options.key_begin, repr(e)))

    try:
        if options.key_end == '-1':
            key_range.key_end = elliptics.Id([255] * 64, 0)
        elif options.key_end:
            key_range.key_end = elliptics.Id(transf(options.key_end), 0)
    except Exception as e:
        raise ValueError("Can't parse key_end: '{0}': {1}".format(
            options.key_end, repr(e)))

    ctx.ranges = []
    if options.key_begin or options.key_end:
        ctx.ranges = [key_range]

    try:
        if options.route_file and options.route_addr:
            ranges = parse_route_ranges(options.route_file, options.route_addr)
            ctx.ranges += ranges
    except Exception as e:
        raise ValueError(
            "Can't parse route_file '{0}' and route_addr '{1}' options: {2}".
            format(options.route_file, options.route_addr, repr(e)))

    return ctx
예제 #14
0
if __name__ == '__main__':
    ctx = parse_args()

    ctx.elog = elliptics.Logger(ctx.log_file, ctx.log_level)
    ctx.node = elliptics.Node(ctx.elog)
    ctx.node.add_remotes(ctx.remotes)

    ctx.session = elliptics.Session(ctx.node)
    ctx.session.set_timeout(60)

    if ctx.groups:
        ctx.session.groups = ctx.groups
    else:
        ctx.session.groups = ctx.session.routes.groups()
    #print ctx.session.routes

    if ctx.iterate_mode == MODE_NODES:
        for r in ctx.remotes:
            eid = elliptics.Id([0] * 64, 0)
            if len(ctx.ranges) != 0:
                eid = ctx.ranges[0].key_begin

            iterate_node(ctx, r, eid)
    elif ctx.iterate_mode == MODE_GROUP:
        iterate_groups(ctx)
    else:
        raise RuntimeError("Unknown iteration mode '{0}' ".format(
            ctx.iterate_mode))

    exit(0)
예제 #15
0
test_ind = "test_ind_" + str(rnd)
test_data = "test_data_" + str(rnd)

sys.path.insert(0, "bindings/python/")
import elliptics

elog = elliptics.Logger("/dev/stderr", 0)
cfg = elliptics.Config()

node = elliptics.Node(elog, cfg)
node.add_remote("localhost", 1025)

s = elliptics.Session(node)
s.set_groups([1])

r = s.set_indexes(elliptics.Id(test_id), [test_ind], [test_data])
r.wait()
assert r.successful()

r = s.find_any_indexes([test_ind])
r.wait()
assert r.successful()
assert len(r.get()) >= 1
assert r.get()[0].indexes[0].data == test_data

r = s.find_all_indexes([test_ind])
r.wait()
assert r.successful()
assert len(r.get()) >= 1
assert r.get()[0].indexes[0].data == test_data
예제 #16
0
 def route(self, key):
     return elliptics.Route(id=elliptics.Id(key, self.group_id),
                            address=self.address,
                            backend_id=self.backend_id)
예제 #17
0
 def get(self, shift):
     '''
     Returns elliptics.Id got by shifting base_key
     '''
     return elliptics.Id('%x' % (self.__base_key__ + shift))
예제 #18
0
파일: dc.py 프로젝트: sabramkin/elliptics
def lookup_keys(ctx):
    log.info("Start looking up keys")
    stats = ctx.stats["lookup"]
    stats_cmd = ctx.stats['commands']
    stats.timer('process', 'started')
    elog = elliptics.Logger(ctx.log_file, int(ctx.log_level), True)
    node = elliptics_create_node(address=ctx.address,
                                 elog=elog,
                                 wait_timeout=ctx.wait_timeout,
                                 flags=elliptics.config_flags.no_route_list,
                                 net_thread_num=1,
                                 io_thread_num=1,
                                 remotes=ctx.remotes)
    session = elliptics.newapi.Session(node)
    session.trace_id = ctx.trace_id
    session.exceptions_policy = elliptics.exceptions_policy.no_exceptions
    session.set_filter(elliptics.filters.all_final)

    filename = os.path.join(ctx.tmp_dir, 'merged')
    uncommitted_filename = os.path.join(ctx.tmp_dir, 'uncommitted')
    merged_keys = MergedKeys(filename, uncommitted_filename, None,
                             ctx.prepare_timeout, ctx.safe, False)

    with open(ctx.dump_file, 'r') as dump_f, open(filename, 'w') as merged_file, \
         open(uncommitted_filename, 'w') as uncommitted_file:
        for str_id in dump_f:
            id = elliptics.Id(str_id)
            lookups = []
            for g in ctx.groups:
                session.groups = [g]
                lookups.append(session.lookup(id))
            key_infos = []

            for i, l in enumerate(lookups):
                result = l.get()[0]
                status = result.status
                if status == 0:
                    address = result.address
                    key_infos.append(
                        KeyInfo(address, ctx.groups[i],
                                result.record_info.data_timestamp,
                                result.record_info.data_size,
                                result.record_info.user_flags,
                                result.record_info.record_flags,
                                result.record_info.data_offset, 0))  # blob_id
                else:
                    log.debug(
                        "Failed to lookup key: {0} in group: {1}: {2}".format(
                            id, ctx.groups[i], status))
                    stats_cmd.counter('lookup.{0}'.format(status), 1)
                    stats.counter("lookups", -1)
            if len(key_infos) > 0:
                key_infos.sort(key=lambda x:
                               (x.timestamp, x.size, x.user_flags),
                               reverse=True)
                key_data = (id, key_infos)
                if not skip_key_data(ctx, key_data):
                    merged_keys.on_key_data(key_data, merged_file,
                                            uncommitted_file, None)
                stats.counter("lookups", len(key_infos))
            else:
                log.error(
                    "Key: {0} is missing in all specified groups: {1}. It won't be recovered."
                    .format(id, ctx.groups))

    stats.timer('process', 'finished')
    return merged_keys
예제 #19
0
 def __getitem__(self, key):
     eid = elliptics.Id(self.key_tpl % key)
     return self.meta_session.list_indexes(eid).get()[0].data
예제 #20
0
 def __setitem__(self, key, val):
     eid = elliptics.Id(self.key_tpl % key)
     self.meta_session.set_indexes(eid, [self.idx], [val])
예제 #21
0
파일: iterate.py 프로젝트: kod3r/elliptics
def parse_args():
    from optparse import OptionParser
    ctx = Ctx()

    parser = OptionParser()
    parser.usage = "%prog type [options]"
    parser.description = __doc__
    parser.add_option("-g", "--groups", action="store", dest="groups", default=None,
                      help="Comma separated list of groups [default: all]")
    parser.add_option("-l", "--log", dest="log", default='/dev/stderr', metavar="FILE",
                      help="Output log messages from library to file [default: %default]")
    parser.add_option("-L", "--log-level", action="store", dest="log_level", default="1",
                      help="Elliptics client verbosity [default: %default]")
    parser.add_option("-r", "--remote", action="append", dest="remote",
                      help="Elliptics node address [default: %default]")
    parser.add_option("-d", "--data", action="store_true", dest="data", default=False,
                      help="Requests object's data with other info [default: %default]")
    parser.add_option("-k", "--key-begin", action="store", dest="key_begin", default="0",
                      help="Begin key of range for iterating")
    parser.add_option("-K", "--key-end", action="store", dest="key_end", default="-1",
                      help="End key of range for iterating")
    parser.add_option("-t", "--time-begin", action="store", dest="time_begin", default=None,
                      help="Begin timestamp of time range for iterating")
    parser.add_option("-T", "--time-end", action="store", dest="time_end", default=None,
                      help="End timestamp of time range for iterating")

    (options, args) = parser.parse_args()

    if len(args) > 1:
        raise ValueError("Too many arguments passed: {0}, expected: 1"
                         .format(len(args)))
    elif len(args) == 0:
        raise ValueError("Please specify one of following modes: {0}"
                         .format(ALLOWED_MODES))

    if args[0].lower() not in ALLOWED_MODES:
        raise ValueError("Unknown mode: '{0}', allowed: {1}"
                         .format(args[0], ALLOWED_MODES))
    ctx.iterate_mode = args[0].lower()

    try:
        if options.groups:
            ctx.groups = map(int, options.groups.split(','))
        else:
            ctx.groups = []
    except Exception as e:
        raise ValueError("Can't parse grouplist: '{0}': {1}".format(
            options.groups, repr(e)))
    print("Using group list: {0}".format(ctx.groups))

    try:
        ctx.log_file = options.log
        ctx.log_level = int(options.log_level)
    except Exception as e:
        raise ValueError("Can't parse log_level: '{0}': {1}"
                         .format(options.log_level, repr(e)))
    print("Using elliptics client log level: {0}".format(ctx.log_level))

    if not options.remote:
        raise ValueError("Please specify at least one remote address (-r option)")
    try:
        ctx.remotes = []
        for r in options.remote:
            ctx.remotes.append(elliptics.Address.from_host_port_family(r))
            print("Using remote host:port:family: {0}".format(ctx.remotes[-1]))
    except Exception as e:
        raise ValueError("Can't parse host:port:family: '{0}': {1}"
                         .format(options.remote, repr(e)))

    try:
        if options.time_begin:
            ctx.time_begin = Time.from_epoch(options.time_begin)
        else:
            ctx.time_begin = None
    except Exception:
        raise ValueError("Can't parse timestamp: '{0}': {1}"
                         .format(options.timestamp, repr(e)))
    print("Using time_begin: {0}".format(ctx.time_begin))

    try:
        if options.time_end:
            ctx.time_end = Time.from_epoch(options.time_end)
        else:
            ctx.time_end = None
    except Exception:
        raise ValueError("Can't parse timestamp: '{0}': {1}"
                         .format(options.timestamp, repr(e)))
    print("Using time_end: {0}".format(ctx.time_end))

    ctx.data = options.data

    key_range = elliptics.IteratorRange()

    try:
        if options.key_begin == '-1':
            key_range.key_begin = elliptics.Id([255] * 64, 0)
        elif options.key_begin:
            key_range.key_begin = elliptics.Id(transf(options.key_begin), 0)
        else:
            key_range.key_begin = elliptics.Id([0] * 64, 0)
    except Exception:
        raise ValueError("Can't parse key_begin: '{0}': {1}"
                         .format(options.key_begin, repr(e)))

    try:
        if options.key_end == '-1':
            key_range.key_end = elliptics.Id([255] * 64, 0)
        elif options.key_end:
            key_range.key_end = elliptics.Id(transf(options.key_end), 0)
        else:
            key_range.key_end = elliptics.Id([255] * 64, 0)
    except Exception:
        raise ValueError("Can't parse key_end: '{0}': {1}"
                         .format(options.key_end, repr(e)))

    ctx.ranges = [key_range]

    return ctx
예제 #22
0
    def distribute(self, top):
        """ Distributes top keys among available cache groups.
        Parameter "top" is a map of key id to key top statistics:

        {('123', '42:69'): {'groups': [42, 69],
                            'couple': '42:69',
                            'ns': 'magic',
                            'size': 31415,
                            'id': 123,
                            'size': 1024,    # approximate size of key traffic
                            'frequency': 2,  # approximate number of key events
                            'period': 1      # statistics collection period
                           }, ...
        }"""

        self._update_cache_groups()

        top = self._filter_by_bandwidth(top)
        logger.info('Keys after applying bandwidth filter: {0}'.format(
            [elliptics.Id(key_k[0].encode('utf-8')) for key_k in top]))

        # update currently distributed keys
        logger.info('Updating already distributed keys')
        for key in self._get_distributed_keys():
            copies_diff, key_stat = self._key_copies_diff(key, top)
            top.pop((key['id'], key['couple']), None)

            if copies_diff <= 0:
                logger.info(
                    'Key {}, couple {}, bandwidth {}; '
                    'cached, extra copies: {}, skipped'.format(
                        key['id'], key['couple'],
                        mb_per_s(_key_bw(key_stat)), -copies_diff))
                continue

            logger.info(
                'Key {}, couple {}, bandwidth {}; '
                'cached, expanding to {} more '
                'copies'.format(
                    key['id'], key['couple'],
                    mb_per_s(_key_bw(key_stat)), copies_diff))
            with self._cache_groups_lock:
                try:
                    self._update_key(key, key_stat, copies_diff)
                except Exception:
                    logger.exception(
                        'Key {}, couple {}: failed to expand'.format(
                            key['id'], key['couple']))
                    continue

        # process new keys
        logger.info('Distributing new keys')
        for (key_id, key_couple), key_stat in top.iteritems():
            try:
                key = self._new_key(key_stat)
            except Exception as e:
                logger.exception(
                    'Key {}, couple {}: failed to create new key record, '
                    '{}:'.format(key_id, key_couple, e))
                continue
            copies_diff, key_stat = self._key_copies_diff(key, top)
            if copies_diff == 0:
                logger.info(
                    'Key {}, couple {}, bandwidth {}; not cached, '
                    'does not require cache copies, skipped'.format(
                        key['id'], key['couple'], mb_per_s(_key_bw(key_stat))))
                continue
            logger.info(
                'Key {}, couple {}, bandwidth {}; not cached, '
                'expanding to {} copies'.format(
                    key['id'], key['couple'],
                    mb_per_s(_key_bw(key_stat)), copies_diff))
            with self._cache_groups_lock:
                try:
                    self._update_key(key, key_stat, copies_diff)
                except Exception:
                    logger.exception(
                        'Key {}, couple {}: failed to expand'.format(
                            key['id'], key['couple']))
                    continue
예제 #23
0
파일: misc.py 프로젝트: zmyer/elliptics
def load_key_data_from_file(keys_file):
    import msgpack
    unpacker = msgpack.Unpacker(keys_file)
    for data in unpacker:
        yield (elliptics.Id(data[0], 0), tuple(KeyInfo.load(d) for d in data[1]))