Пример #1
0
    def test_clone(self, server, simple_node):
        orig_s = make_session(node=simple_node,
                              test_name='TestSession.test_clone')

        orig_s.groups = [1, 2, 3]
        orig_s.timeout = 13
        orig_s.exceptions_policy = elliptics.exceptions_policy.throw_at_wait
        orig_s.cflags = elliptics.command_flags.direct
        orig_s.ioflags = elliptics.io_flags.overwrite
        orig_s.timestamp = elliptics.Time(213, 415)
        orig_s.trace_id = 731
        orig_s.user_flags = 19731

        clone_s = orig_s.clone()

        assert clone_s.groups == orig_s.groups == [1, 2, 3]
        assert clone_s.timeout == orig_s.timeout == 13
        assert clone_s.exceptions_policy == orig_s.exceptions_policy == \
            elliptics.exceptions_policy.throw_at_wait
        assert clone_s.cflags == orig_s.cflags == elliptics.command_flags.direct
        assert clone_s.ioflags == orig_s.ioflags == elliptics.io_flags.overwrite
        assert clone_s.timestamp == orig_s.timestamp == elliptics.Time(
            213, 415)
        assert clone_s.trace_id == orig_s.trace_id == 731
        assert clone_s.user_flags == orig_s.user_flags == 19731
Пример #2
0
    def test_iterate_one_range(self, simple_node):
        '''
        Runs iterator on first node/backend from route-list with using only first range of it.
        Checks iterated keys by check_iterator_results.
        '''
        session = make_session(node=simple_node,
                               test_name='TestSession.test_iterate_one_range')
        session.groups = session.routes.groups()
        node_id, node, backend = iter(session.routes.get_unique_routes()[0])
        ranges = convert_ranges(
            (session.routes.get_address_backend_ranges(node, backend)[0], ))

        # We could set flags=elliptics.iterator_flags.key_range,
        # but it should be done automatically if there is at least one range.
        # So this test also checks that behaviour.
        flags = 0
        iterator = session.start_iterator(
            id=node_id,
            ranges=ranges,
            type=elliptics.iterator_types.network,
            flags=flags,
            time_begin=elliptics.Time(0, 0),
            time_end=elliptics.Time(2**64 - 1, 2**64 - 1))

        check_iterator_results(node, backend, iterator, session, node_id)
Пример #3
0
def iterate_node(ctx, node):
    eid = ctx.session.routes.get_address_eid(node)
    print eid

    iflags = elliptics.iterator_flags.key_range
    if ctx.time_begin or ctx.time_end:
        iflags |= elliptics.iterator_flags.ts_range
    if ctx.data:
        iflags |= elliptics.iterator_flags.data

    if not ctx.time_begin:
        ctx.time_begin = elliptics.Time(0, 0)
    if not ctx.time_end:
        ctx.time_end = elliptics.Time(2**64-1, 2**64-1)

    print ctx.session.groups, eid, ctx.ranges, iflags, ctx.time_begin, ctx.time_end

    ranges = ctx.ranges

    for r in ranges:
        print repr(r.key_begin), repr(r.key_end)

    print bin(iflags)

    ctx.session.groups = [ctx.session.routes.get_address_group_id(node)]

    iterator = ctx.session.start_iterator(eid,
                                          ranges,
                                          elliptics.iterator_types.network,
                                          iflags,
                                          ctx.time_begin,
                                          ctx.time_end)

    for r in ranges:
        print repr(r.key_begin), repr(r.key_end)

    for result in iterator:
        if result.status != 0:
            raise AssertionError("Wrong status: {0}".format(result.status))

        print ("node: {0}, key: {1}, flags: {2}, ts: {3}/{4}, keys: {5}/{6}, status: {7}, size: {8}, data: {9}"
               .format(node,
                       result.response.key,
                       result.response.user_flags,
                       result.response.timestamp.tsec,
                       result.response.timestamp.tnsec,
                       result.response.iterated_keys,
                       result.response.total_keys,
                       result.response.status,
                       result.response.size,
                       result.response_data))
Пример #4
0
def iterate_node(ctx, node, eid):
    iflags = elliptics.iterator_flags.key_range
    if ctx.time_begin or ctx.time_end:
        iflags |= elliptics.iterator_flags.ts_range
    if ctx.data:
        iflags |= elliptics.iterator_flags.data

    if ctx.no_meta:
        # no-meta has only effect if we do not request data and timestamp information
        # since meta lives with data and no-meta iterator only runs over indexes
        if not ctx.data:
            if not ctx.time_begin and not ctx.time_end:
                iflags |= elliptics.iterator_flags.no_meta

    if not ctx.time_begin:
        ctx.time_begin = elliptics.Time(0, 0)
    if not ctx.time_end:
        ctx.time_end = elliptics.Time(2**64 - 1, 2**64 - 1)

    print ctx.session.groups, eid, ctx.ranges, iflags, ctx.time_begin, ctx.time_end

    ranges = ctx.ranges

    for r in ranges:
        print repr(r.key_begin), repr(r.key_end)

    print bin(iflags)

    ctx.session.groups = ctx.session.routes.get_address_groups(node)

    iterator = ctx.session.start_iterator(eid, ranges,
                                          elliptics.iterator_types.network,
                                          iflags, ctx.time_begin, ctx.time_end)

    for r in ranges:
        print repr(r.key_begin), repr(r.key_end)

    for result in iterator:
        if result.status != 0:
            raise AssertionError("Wrong status: {0}".format(result.status))

        print(
            "node: {0}, key: {1}, flags: {2}, ts: {3}/{4}, keys: {5}/{6}, status: {7}, size: {8}, data: {9}"
            .format(node, result.response.key, result.response.user_flags,
                    result.response.timestamp.tsec,
                    result.response.timestamp.tnsec,
                    result.response.iterated_keys, result.response.total_keys,
                    result.response.status, result.response.size,
                    result.response_data))
Пример #5
0
def check_iterator_results(node,
                           backend,
                           iterator,
                           session,
                           node_id,
                           no_meta=False):
    '''
    Checks iteration result: validates status, user_flags and tiemstamp;
    one time pauses and continues iterator;
    prints iteration result and progress.
    '''
    counter = 0
    for result in iterator:
        assert result.status == 0, "if iterator is ok status of result should be 0"
        if result.response.status != 0:
            #this is one of keepalive response, we should skip it because it contains iterator statistics only
            print format_stat(node, backend, result, counter)
            continue

        if no_meta:
            assert result.response.user_flags == 0
            assert result.response.timestamp == elliptics.Time(0, 0)

        # Test flow control on after result
        if counter == 0:
            print "Pause iterator"
            session.pause_iterator(node_id, result.id)
            print "Continue iterator"
            session.continue_iterator(node_id, result.id)

        counter += 1
        print format_result(node, backend, result)
Пример #6
0
 def load(cls, data):
     return cls(elliptics.Address(data[0][0], data[0][1], data[0][2]),
                data[1],
                elliptics.Time(data[2][0], data[2][1]),
                data[3],
                data[4],
                data[5])
Пример #7
0
def process_users(users, key, new_key, log_session, activity_session):
    users_len = len(users)
    log.debug("Processing users: {0} for key: {1}".format(users_len, key))

    if not users or len(users) == 0:
        log.debug("No users specified, skipping")
        return

    log.debug("Async updating indexes for {0} users for index: {1}".format(
        users_len, new_key))
    async_indexes = []
    for u in users:
        async_indexes.append(
            activity_session.update_indexes_internal(elliptics.Id(u),
                                                     [new_key], [u]))

    log.debug("Async reading logs for {0} users".format(users_len))
    async_read = log_session.bulk_read_async(
        [elliptics.Id(u + '.' + key) for u in users])

    log.debug("Async writing read logs")
    async_writes = []
    it = iter(async_read)
    failed = 0
    while True:
        try:
            r = next(it)
            async_writes.append((log_session.write_data_async(
                (r.id, elliptics.Time(2**64 - 1, 2**64 - 1), 0),
                r.data), len(r.data)))
        except StopIteration:
            break
        except Exception as e:
            failed += 1
            log.debug("Write failed: {0}".format(e))

    writed_bytes = 0
    for r, size in async_writes:
        r.wait()
        if r.successful():
            writed_bytes += size
        else:
            failed += 1

    log.info("Aggregated bytes: {0}".format(writed_bytes))
    log.info("Successed: {0}".format(users_len))
    log.info("Failures: {0}".format(failed))

    successes, failures = (0, 0)
    for a in async_indexes:
        a.wait()
        if a.successful():
            successes += 1
        else:
            failures += 1
    log.info("Activtiy updates successes: {0}".format(users_len))
    log.info("Activtiy updates failures: {0}".format(failed))
Пример #8
0
    def test_iterate_all_node_ranges(self, server, simple_node):
        '''
        Runs iterator on first node/backend from route-list with using all ranges covered by it.
        '''
        session = make_session(node=simple_node,
                               test_name='TestSession.test_iterate_all_node_ranges')
        session.groups = session.routes.groups()
        node_id, node, backend = iter(session.routes.get_unique_routes()[0])
        ranges = convert_ranges(session.routes.get_address_backend_ranges(node, backend))

        iterator = session.start_iterator(
            id=node_id,
            ranges=ranges,
            type=elliptics.iterator_types.network,
            flags=elliptics.iterator_flags.key_range,
            time_begin=elliptics.Time(0, 0),
            time_end=elliptics.Time(2 ** 64 - 1, 2 ** 64 - 1))

        check_iterator_results(node, backend, iterator, session, node_id)
Пример #9
0
    def test_iterate_default(self, server, simple_node):
        '''
        Runs iterator on first node/backend from route-list without specified ranges and special flags
        '''
        session = make_session(node=simple_node,
                               test_name='TestSession.test_iterate_one_backend')
        session.groups = session.routes.groups()
        routes = session.routes
        addresses_with_backends = routes.addresses_with_backends()
        first_node, first_backend = addresses_with_backends[0]
        node_id = routes.get_address_backend_route_id(first_node, first_backend)

        iterator = session.start_iterator(
            id=node_id,
            ranges=[],
            type=elliptics.iterator_types.network,
            flags=elliptics.iterator_flags.default,
            time_begin=elliptics.Time(0, 0),
            time_end=elliptics.Time(2 ** 64 - 1, 2 ** 64 - 1))

        check_iterator_results(first_node, first_backend, iterator, session, node_id)
Пример #10
0
class TestDC:
    '''
        Description:
            checks that dc correctly recovers keys with different combination of replicas in 3 groups
        Steps:
        setup:
            disable all backends
            remove all blobs
            enable all backends
            prepare keys in groups for recovery
        recover;
            run dc recovery
        check:
            check by reading all keys accessibility and data correctness
        teardown:
            disable enabled backends
            remove all blobs
            enable all backends
    '''
    data = os.urandom(1024)
    cur_ts = elliptics.Time.now()
    old_ts = elliptics.Time(cur_ts.tsec - 24 * 60 * 60, cur_ts.tnsec)
    new_ts = elliptics.Time(cur_ts.tsec + 24 * 60 * 60, cur_ts.tnsec)

    def get_result(self, case):
        '''
        Estimates result for the case
        '''
        timestamps = [c.timestamp for c in case if c]
        if len(timestamps) < 1:
            return [None] * len(case)
        max_ts = max(timestamps)
        if all(c.action == elliptics.Session.write_prepare for c in case
               if c and c.timestamp == max_ts):
            if max_ts == self.old_ts:
                return [None] * len(case)
            else:
                get_ts = lambda c: c.timestamp if c and c.action == elliptics.Session.write_data else None
                return map(get_ts, case)
        else:
Пример #11
0
def test_session_timestamps(simple_node):
    """Test session.timestamp and session.json_timestamp."""
    session = elliptics.newapi.Session(simple_node)
    session.trace_id = make_trace_id('test_lookup_read_existent_key')
    session.groups = session.routes.groups()

    key = 'test_lookup_read_existent_key'
    json_string = json.dumps({'some': 'field'})
    data = 'some data'

    data_ts = elliptics.Time.now()
    json_ts = elliptics.Time(data_ts.tsec, data_ts.tnsec + 1)
    assert json_ts > data_ts

    assert session.timestamp is None
    assert session.json_timestamp is None
    # write and check timestamps from result
    result = session.write(key, json_string, len(json_string), data,
                           len(data)).get()[0]
    assert elliptics.Time.now() > result.record_info.data_timestamp > data_ts
    assert elliptics.Time.now() > result.record_info.json_timestamp > data_ts

    session.timestamp = data_ts
    assert session.timestamp == data_ts
    assert session.json_timestamp is None
    # write and check timestamps from result
    result = session.write(key, json_string, len(json_string), data,
                           len(data)).get()[0]
    assert result.record_info.data_timestamp == data_ts
    assert result.record_info.json_timestamp == data_ts

    session.json_timestamp = json_ts
    assert session.timestamp == data_ts
    assert session.json_timestamp == json_ts
    # write and check timestamps from result
    result = session.write(key, json_string, len(json_string), data,
                           len(data)).get()[0]
    assert result.record_info.data_timestamp == data_ts
    assert result.record_info.json_timestamp == json_ts

    session.timestamp = None
    assert session.timestamp is None
    assert session.json_timestamp == json_ts
    # write and check timestamps from result
    result = session.write(key, json_string, len(json_string), data,
                           len(data)).get()[0]
    assert elliptics.Time.now() > result.record_info.data_timestamp > json_ts
    assert result.record_info.json_timestamp == json_ts
Пример #12
0
 def __init__(self,
              key,
              user_flags=100500,
              data_timestamp=elliptics.Time(100, 500),
              data_size=100,
              status=0,
              record_flags=0,
              blob_id=0,
              data_offset=0):
     self.key = elliptics.Id.from_hex(hashlib.sha512(key).hexdigest())
     self.user_flags = user_flags
     self.data_timestamp = data_timestamp
     self.data_size = data_size
     self.status = status
     self.record_flags = record_flags
     self.blob_id = blob_id
     self.data_offset = data_offset
Пример #13
0
    def test_write_and_corrupt_data(self, server, simple_node):
        '''
        Writes one by one the key with different data and
        incremental timestamp to groups 1, 2, 3 and corrupts data in the group #3.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_write_and_corrupt_data',
            test_namespace=self.namespace)

        timestamp3 = elliptics.Time(self.corrupted_timestamp.tsec + 7200,
                                    self.corrupted_timestamp.tnsec)

        session.groups = [scope.test_group]
        session.timestamp = self.corrupted_timestamp
        write_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.1'])
        check_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.1'], self.corrupted_timestamp)

        session.groups = [scope.test_group2]
        session.timestamp = self.corrupted_timestamp2
        write_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.2'])
        check_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.2'], self.corrupted_timestamp2)

        session.groups = [scope.test_group3]
        session.timestamp = timestamp3
        write_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.3'])
        check_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.3'], timestamp3)

        res = session.lookup(self.corrupted_key).get()[0]

        with open(res.filepath, 'r+b') as f:
            f.seek(res.offset, 0)
            tmp = '123' + f.read()[3:]
            f.seek(res.offset, 0)
            f.write(tmp)
            f.flush()
Пример #14
0
def process_key(key, log_s, index_s, new_key):
    res = index_s.find_any_indexes([key + str(x) for x in range(1000)])
    users = Set()
    for r in res:
        for ind, data in r.indexes:
            users.add(data)

    print "Users: {0}".format(users)

    async_reads = []

    for u in users:
        try:
            k = u + "." + key
            print "Read latest: {0}".format(k)
            async_reads.append((log_s.read_latest(k), u))
        except Exception as e:
            print "Read latest async failed: {0}".format(e)

    for r, u in async_reads:
        try:
            k = u + "." + new_key
            r.wait()
            result = r.get()[0]
            print "Write: {0}".format(k)
            io = elliptics.IoAttr()
            io.id = elliptics.Id(k)
            io.timestamp = elliptics.Time(0, 0)
            io.user_flags = 0
            write_result = log_s.write_data(io, result.data)
            write_result.wait()
            print "Write is {0}".format(write_result.successful())
        except Exception as e:
            print "Write data async failed: {0}".format(e)

    return users
Пример #15
0
class TestMerge:
    '''
        Description:
            checks that merge correctly recovers keys with different combination of replicas in both
            hidden and real backends.
        Steps:
        setup:
            disable all backends
            remove all blobs
            enable 2 backends from group 1
            prepare keys on both backend for recovery
        recover;
            run merge recovery
        check:
            check via reading all keys accessibility and data correctness
        teardown:
            disable enabled backends
            remove all blobs
            enable all backends
    '''
    data = os.urandom(1024)
    cur_ts = elliptics.Time.now()
    old_ts = elliptics.Time(cur_ts.tsec - 24 * 60 * 60, cur_ts.tnsec)
    new_ts = elliptics.Time(cur_ts.tsec + 24 * 60 * 60, cur_ts.tnsec)

    def get_result(self, case):
        '''
        Estimates result for the case
        '''
        assert len(case) == 2
        if case[0] is None or \
           case[0].action == elliptics.Session.write_prepare:
            if case[1] is None or \
               case[1].action == elliptics.Session.write_prepare:
                return None
            else:
                return case[1].timestamp
        else:
            assert case[0].action == elliptics.Session.write_data
            if case[1] is None or \
               case[1].action == elliptics.Session.write_prepare or \
               case[1].timestamp < case[0].timestamp:
                return case[0].timestamp
            else:
                return case[1].timestamp

    def make_action(self, key, session, (method, ts), backend):
        '''
        Makes action `method` against session for key, ts, self.data and backend.
        Returns AsyncResult with ts and backend that will be used for checking results
        '''
        args = {'data': self.data, 'key': key}

        if method == elliptics.Session.write_prepare:
            args['remote_offset'] = 0
            args['psize'] = len(self.data)
        elif method == elliptics.Session.write_data:
            args['offset'] = 0

        session.timestamp = ts
        return (method(session, **args), ts, backend)
Пример #16
0
                    key=
                    'key with different replicas in two groups and missed in third'
                ),
                DummyRecord(key='key missed in second group #1'),
                DummyRecord(key='key missed in second group #2'),
                DummyRecord(key='key uncommitted in third group'),
            ]),
        Backend(
            address='121.0.0.2:1',
            backend_id=1,
            group_id=2,
            records=[
                DummyRecord(
                    key=
                    'key with different replicas in two groups and missed in third',
                    data_timestamp=elliptics.Time(101, 500)),
            ]),
        Backend(address='121.0.0.3:1',
                backend_id=1,
                group_id=3,
                records=[
                    DummyRecord(key='key missed in second group #1'),
                    DummyRecord(key='key missed in second group #2'),
                    DummyRecord(
                        key='key uncommitted in third group',
                        record_flags=elliptics.record_flags.uncommitted),
                ])
    ])
])
@pytest.mark.usefixtures('mock_route_list', 'mock_iterator', 'mock_pool')
def test_specific_case_with_readonly_groups(cluster):
Пример #17
0
 def __init__(self, tsec, tnsec):
     self.time = elliptics.Time(tsec, tnsec)
Пример #18
0
class TestSession:
    def test_flags(self):
        assert set(elliptics.io_flags.values.values()) == io_flags
        assert set(elliptics.command_flags.values.values()) == command_flags
        assert set(
            elliptics.exceptions_policy.values.values()) == exceptions_policy
        assert set(elliptics.filters.values.values()) == filters
        assert set(elliptics.checkers.values.values()) == checkers

    @pytest.mark.parametrize(
        "prop, value",
        [('timeout', 5), ('groups', []),
         ('exceptions_policy', elliptics.exceptions_policy.default_exceptions),
         ('cflags', 0), ('ioflags', 0),
         ('timestamp', elliptics.Time(2**64 - 1, 2**64 - 1)), ('trace_id', 0),
         ('user_flags', 0)])
    def test_properties_default(self, server, simple_node, prop, value):
        session = elliptics.Session(node=simple_node)
        assert getattr(session, prop) == value

    @pytest.mark.parametrize(
        'prop, setter, getter, values',
        [('groups', 'set_groups', 'get_groups',
          ([], range(1, 100), range(1, 100000), range(10, 10000))),
         ('cflags', 'set_cflags', 'get_cflags', command_flags),
         ('ioflags', 'set_ioflags', 'get_ioflags', io_flags),
         ('exceptions_policy', 'set_exceptions_policy',
          'get_exceptions_policy', tuple(exceptions_policy) +
          (elliptics.exceptions_policy.throw_at_start
           | elliptics.exceptions_policy.throw_at_wait,
           elliptics.exceptions_policy.throw_at_start
           | elliptics.exceptions_policy.throw_at_wait
           | elliptics.exceptions_policy.throw_at_get
           | elliptics.exceptions_policy.throw_at_iterator_end)),
         ('timeout', 'set_timeout', 'get_timeout', (28376487, 2**63 - 1)),
         ('timestamp', 'set_timestamp', 'get_timestamp',
          (elliptics.Time(0, 0), elliptics.Time(2**64 - 1, 2**64 - 1),
           elliptics.Time(238689126897, 1723861827))),
         ('trace_id', None, None, (0, 32423946, 2**32 - 1)),
         ('user_flags', 'set_user_flags', 'get_user_flags',
          (0, 438975345, 2**64 - 1))])
    def test_properties(self, server, simple_node, prop, setter, getter,
                        values):
        session = elliptics.Session(node=simple_node)
        assert type(session) == elliptics.Session
        for value in values:
            set_property(session, prop, value, setter=setter, getter=getter)

    def test_resetting_timeout(self, server, simple_node):
        session = make_session(node=simple_node,
                               test_name='TestSession.test_resetting_timeout')
        assert session.timeout == 5  # check default timeout value
        session.timeout = 1  # set different value
        assert session.timeout == 1  # check that the value has been set
        session.timeout = 0  # set timeout to 0 which should reset to default
        assert session.timeout == 5  # check default timeout value

    @pytest.mark.parametrize("prop, value", [('cflags', 2**64),
                                             ('ioflags', 2**32),
                                             ('exceptions_policy', 2**32),
                                             ('timeout', 2**63),
                                             ('trace_id', 2**64),
                                             ('user_flags', 2**64)])
    def test_properties_out_of_limits(self, server, simple_node, prop, value):
        session = elliptics.Session(simple_node)
        pytest.raises(OverflowError,
                      "set_property(session, '{0}', {1})".format(prop, value))

    def test_clone(self, server, simple_node):
        orig_s = make_session(node=simple_node,
                              test_name='TestSession.test_clone')

        orig_s.groups = [1, 2, 3]
        orig_s.timeout = 13
        orig_s.exceptions_policy = elliptics.exceptions_policy.throw_at_wait
        orig_s.cflags = elliptics.command_flags.direct
        orig_s.ioflags = elliptics.io_flags.overwrite
        orig_s.timestamp = elliptics.Time(213, 415)
        orig_s.trace_id = 731
        orig_s.user_flags = 19731

        clone_s = orig_s.clone()

        assert clone_s.groups == orig_s.groups == [1, 2, 3]
        assert clone_s.timeout == orig_s.timeout == 13
        assert clone_s.exceptions_policy == orig_s.exceptions_policy == \
            elliptics.exceptions_policy.throw_at_wait
        assert clone_s.cflags == orig_s.cflags == elliptics.command_flags.direct
        assert clone_s.ioflags == orig_s.ioflags == elliptics.io_flags.overwrite
        assert clone_s.timestamp == orig_s.timestamp == elliptics.Time(
            213, 415)
        assert clone_s.trace_id == orig_s.trace_id == 731
        assert clone_s.user_flags == orig_s.user_flags == 19731
Пример #19
0
class TestRecovery:
    '''
    Turns off all backends from all node except one.
    Makes few writes in the backend group. Checks written data availability.
    Turns on one backend from the same node and the same group
    Runs dnet_recovery merge with --one-node and --backend-id.
    Checks written data availability.
    Enables another one backend from the same group.
    Runs dnet_recovery merge without --one-node and without --backend-id and with -f merge.dump.file.
    Checks written data availability.
    Turns on all backends from the same group from all node.
    Runs dnet_recovery merge without --one-node and without --backend-id.
    Checks written data availability.
    Turns on one backend from other node and from one second group.
    Runs dnet_recovery dc with --one-node and with --backend-id.
    Checks written data availability in both groups.
    Turns on all nodes from on second group.
    Runs dnet_recovery dc without --one-node and without --backend-id and with -f merge.dump.file.
    Checks written data availability in both groups.
    Turns on third group nodes.
    Writes new data on the same keys.
    Runs dnet_recovery without --one-node and without --backend-id.
    Checks written data availability in all groups.
    Writes one key with different data and incremental timestamp to 1,2,3 groups.
    Corrupts record at 3d group.
    Run dnet_recovery for all groups (1,2,3).
    Checks that all groups have the key with the same data and timestamp that was written to the second group.
    Runs defragmentation on all backends from all group.
    Checks written data availability in all groups.
    '''
    namespace = "TestRecovery"
    count = 1024
    # keys which will be written, readed, recovered and checked by recovery tests
    keys = map('{0}'.format, range(count))
    # at first steps datas of all keys written to first and second group would be equal to key
    datas = keys
    # to make it simplier all keys from first and second group will be have similar timestamp
    timestamp = elliptics.Time.now()
    # this data will be written to the third group
    datas2 = map('{0}.{0}'.format, keys)
    # this timestamp will be used for writing data to the third group
    timestamp2 = elliptics.Time(timestamp.tsec + 3600, timestamp.tnsec)
    corrupted_key = 'corrupted_test.key'
    corrupted_data = 'corrupted_test.data'
    # timestamp of corrupted_key from first group
    corrupted_timestamp = elliptics.Time.now()
    # timestamp of corrupted_key from second group which should be recovered to first and third group
    corrupted_timestamp2 = elliptics.Time(corrupted_timestamp.tsec + 3600,
                                          corrupted_timestamp.tnsec)

    def test_disable_backends(self, server, simple_node):
        '''
        Turns off all backends from all node except one.
        '''
        session = make_session(node=simple_node,
                               test_name='TestRecovery.test_disable_backends',
                               test_namespace=self.namespace)
        session.set_timeout(10)
        groups = session.routes.groups()
        scope.test_group = groups[0]
        scope.test_group2 = groups[1]
        scope.test_group3 = groups[2]
        scope.test_other_groups = groups[3:]
        scope.disabled_backends = []
        scope.init_routes = session.routes.filter_by_groups(groups)

        # disables backends from other than scope.test_group group from all node
        res = []
        for group in session.routes.groups()[1:]:
            addr_back = session.routes.filter_by_group(
                group).addresses_with_backends()
            for address, backend in addr_back:
                res.append((disable_backend(scope, session, group, address,
                                            backend), backend))

        routes = session.routes.filter_by_group(scope.test_group)

        # chooses one backend from one node to leave it enabled
        scope.test_address = routes[0].address
        scope.test_backend = routes[0].backend_id

        # disables all other backends from that groups.
        addr_back = routes.addresses_with_backends()
        for address, backend in addr_back:
            if (address, backend) != (scope.test_address, scope.test_backend):
                res.append((disable_backend(scope, session, scope.test_group,
                                            address, backend), backend))

        for r, backend in res:
            check_backend_status(r.get(), backend, state=0)

        # checks that routes contains only chosen backend address.
        assert session.routes.addresses_with_backends() == ((
            scope.test_address, scope.test_backend), )
        # checks that routes contains only chosen backend group
        assert session.routes.groups() == (scope.test_group, )

    def test_prepare_data(self, server, simple_node):
        '''
        Writes self.keys to chosen group and checks their availability.
        '''
        session = make_session(node=simple_node,
                               test_name='TestRecovery.test_prepare_data',
                               test_namespace=self.namespace)
        session.groups = [scope.test_group]
        session.timestamp = self.timestamp

        write_data(scope, session, self.keys, self.datas)
        check_data(scope, session, self.keys, self.datas, self.timestamp)

    def test_enable_group_one_backend(self, server, simple_node):
        '''
        Turns on one backend from the same group.
        '''
        assert scope.disabled_backends[-1][0] == scope.test_group
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_enable_group_one_backend',
            test_namespace=self.namespace)
        group, address, backend = scope.disabled_backends[-1]
        r = enable_backend(scope, session, group, address, backend)
        check_backend_status(r.get(), backend, state=1)
        wait_backends_in_route(session, ((address, backend), ))

    def test_merge_two_backends(self, server, simple_node):
        '''
        Runs dnet_recovery merge with --one-node=scope.test_address and --backend-id==scope.test_backend.
        Checks self.keys availability after recovering.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_merge_two_backends',
            test_namespace=self.namespace)

        recovery(one_node=True,
                 remotes=map(elliptics.Address.from_host_port_family,
                             server.remotes),
                 backend_id=scope.test_backend,
                 address=scope.test_address,
                 groups=(scope.test_group, ),
                 session=session.clone(),
                 rtype=RECOVERY.MERGE,
                 no_meta=True,
                 log_file='merge_2_backends.log',
                 tmp_dir='merge_2_backends')

        session.groups = (scope.test_group, )
        check_data(scope, session, self.keys, self.datas, self.timestamp)
        check_keys_absence(scope, session, self.keys)

    def test_enable_another_one_backend(self, server, simple_node):
        '''
        Enables another one backend from the same group.
        '''
        assert scope.disabled_backends[-2][0] == scope.test_group
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_enable_another_one_backend',
            test_namespace=self.namespace)
        group, address, backend = scope.disabled_backends[-1]
        r = enable_backend(scope, session, group, address, backend)
        check_backend_status(r.get(), backend, state=1)
        wait_backends_in_route(session, ((address, backend), ))

    def test_merge_from_dump_3_backends(self, server, simple_node):
        '''
        Writes all keys to dump file: 'merge.dump.file'.
        Runs dnet_recovery merge without --one-node and without --backend-id and with -f merge.dump.file.
        Checks that all keys are available and have correct data.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_merge_from_dump_3_backends',
            test_namespace=self.namespace)

        dump_filename = 'merge.dump.file'
        with open(dump_filename, 'w') as dump_file:
            for key in self.keys + ['unknown_key']:
                dump_file.write('{0}\n'.format(str(session.transform(key))))

        recovery(one_node=False,
                 remotes=map(elliptics.Address.from_host_port_family,
                             server.remotes),
                 backend_id=None,
                 address=scope.test_address,
                 groups=(scope.test_group, ),
                 session=session.clone(),
                 rtype=RECOVERY.MERGE,
                 log_file='merge_from_dump_3_backends.log',
                 tmp_dir='merge_from_dump_3_backends',
                 dump_file=dump_filename)

        session.groups = (scope.test_group, )
        check_data(scope, session, self.keys, self.datas, self.timestamp)
        check_keys_absence(scope, session, self.keys)

    def test_enable_all_group_backends(self, server, simple_node):
        '''
        Enables all backends from all nodes from first group
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_enable_all_group_backends',
            test_namespace=self.namespace)
        enable_group(scope, session, scope.test_group)

    def test_merge_one_group(self, server, simple_node):
        '''
        Runs dnet_recovery merge without --one-node and without --backend-id.
        Checks self.keys availability after recovering.
        '''
        session = make_session(node=simple_node,
                               test_name='TestRecovery.test_merge_one_group',
                               test_namespace=self.namespace)

        recovery(one_node=False,
                 remotes=map(elliptics.Address.from_host_port_family,
                             server.remotes),
                 backend_id=None,
                 address=scope.test_address,
                 groups=(scope.test_group, ),
                 session=session.clone(),
                 rtype=RECOVERY.MERGE,
                 log_file='merge_one_group.log',
                 tmp_dir='merge_one_group')

        session.groups = (scope.test_group, )
        check_data(scope, session, self.keys, self.datas, self.timestamp)
        check_keys_absence(scope, session, self.keys)

    def test_enable_second_group_one_backend(self, server, simple_node):
        '''
        Enables one backend from one node from second group.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_enable_second_group_one_backend',
            test_namespace=self.namespace)
        group, address, backend = next(((g, a, b)
                                        for g, a, b in scope.disabled_backends
                                        if g == scope.test_group2))
        scope.test_address2 = address
        scope.test_backend2 = backend

        r = enable_backend(scope, session, group, address, backend)
        check_backend_status(r.get(), backend, state=1)
        wait_backends_in_route(session, ((address, backend), ))

    def test_dc_one_backend_and_one_group(self, server, simple_node):
        '''
        Runs dnet_recovery dc with --one-node=scope.test_address2,
        --backend-id=scope.test_backend2 and against both groups.
        Checks self.keys availability after recovering in both groups.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_dc_one_backend_and_one_group',
            test_namespace=self.namespace)

        recovery(one_node=True,
                 remotes=map(elliptics.Address.from_host_port_family,
                             server.remotes),
                 backend_id=scope.test_backend2,
                 address=scope.test_address2,
                 groups=(
                     scope.test_group,
                     scope.test_group2,
                 ),
                 session=session.clone(),
                 rtype=RECOVERY.DC,
                 log_file='dc_one_backend.log',
                 tmp_dir='dc_one_backend',
                 no_meta=True)

        session.groups = (scope.test_group2, )
        check_data(scope, session, self.keys, self.datas, self.timestamp)

    def test_enable_all_second_group_backends(self, server, simple_node):
        '''
        Enables all backends from all node in second group.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_enable_all_second_group_backends',
            test_namespace=self.namespace)
        enable_group(scope, session, scope.test_group2)

    def test_dc_from_dump_two_groups(self, server, simple_node):
        '''
        Runs dnet_recovery dc without --one-node and
        without --backend-id against both groups and with -f merge.dump.file.
        Checks self.keys availability after recovering in both groups.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_dc_from_dump_two_groups',
            test_namespace=self.namespace)

        dump_filename = 'dc.dump.file'
        with open(dump_filename, 'w') as dump_file:
            for key in self.keys + ['unknown_key']:
                dump_file.write('{0}\n'.format(str(session.transform(key))))

        recovery(one_node=False,
                 remotes=map(elliptics.Address.from_host_port_family,
                             server.remotes),
                 backend_id=None,
                 address=scope.test_address2,
                 groups=(
                     scope.test_group,
                     scope.test_group2,
                 ),
                 session=session.clone(),
                 rtype=RECOVERY.DC,
                 log_file='dc_from_dump_two_groups.log',
                 tmp_dir='dc_from_dump_two_groups',
                 dump_file=dump_filename)

        session.groups = (scope.test_group, )
        check_data(scope, session, self.keys, self.datas, self.timestamp)

        session.groups = (scope.test_group2, )
        check_data(scope, session, self.keys, self.datas, self.timestamp)

    def test_enable_all_third_group_backends(self, server, simple_node):
        '''
        Enables all backends from all node from third group.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_enable_all_third_group_backends',
            test_namespace=self.namespace)
        enable_group(scope, session, scope.test_group3)

    def test_write_data_to_third_group(self, server, simple_node):
        '''
        Writes different data by self.key in third group
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_write_data_to_third_group',
            test_namespace=self.namespace)
        session.groups = [scope.test_group3]
        session.timestamp = self.timestamp2

        write_data(scope, session, self.keys, self.datas2)
        check_data(scope, session, self.keys, self.datas2, self.timestamp2)

    def test_dc_three_groups(self, server, simple_node):
        '''
        Run dc recovery without --one-node and without --backend-id against all three groups.
        Checks that all three groups contain data from third group.
        '''
        session = make_session(node=simple_node,
                               test_name='TestRecovery.test_dc_three_groups',
                               test_namespace=self.namespace)

        recovery(one_node=False,
                 remotes=map(elliptics.Address.from_host_port_family,
                             server.remotes),
                 backend_id=None,
                 address=scope.test_address2,
                 groups=(scope.test_group, scope.test_group2,
                         scope.test_group3),
                 session=session.clone(),
                 rtype=RECOVERY.DC,
                 log_file='dc_three_groups.log',
                 tmp_dir='dc_three_groups')

        for group in (scope.test_group, scope.test_group2, scope.test_group3):
            session.groups = [group]
            check_data(scope, session, self.keys, self.datas2, self.timestamp2)

    def test_write_and_corrupt_data(self, server, simple_node):
        '''
        Writes one by one the key with different data and
        incremental timestamp to groups 1, 2, 3 and corrupts data in the group #3.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_write_and_corrupt_data',
            test_namespace=self.namespace)

        timestamp3 = elliptics.Time(self.corrupted_timestamp.tsec + 7200,
                                    self.corrupted_timestamp.tnsec)

        session.groups = [scope.test_group]
        session.timestamp = self.corrupted_timestamp
        write_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.1'])
        check_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.1'], self.corrupted_timestamp)

        session.groups = [scope.test_group2]
        session.timestamp = self.corrupted_timestamp2
        write_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.2'])
        check_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.2'], self.corrupted_timestamp2)

        session.groups = [scope.test_group3]
        session.timestamp = timestamp3
        write_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.3'])
        check_data(scope, session, [self.corrupted_key],
                   [self.corrupted_data + '.3'], timestamp3)

        res = session.lookup(self.corrupted_key).get()[0]

        with open(res.filepath, 'r+b') as f:
            f.seek(res.offset, 0)
            tmp = '123' + f.read()[3:]
            f.seek(res.offset, 0)
            f.write(tmp)
            f.flush()

    def test_dc_corrupted_data(self, server, simple_node):
        '''
        Runs dc recovery and checks that second version of data is recovered to all groups.
        This test checks that dc recovery correctly handles corrupted key on his way:
        Group #3 which key was corrupted has a newest timestamp and recovery tries to used it at first.
        But read fails and recovery switchs to the group #2 and recovers data from this group to groups #1 and #3.
        '''
        session = make_session(node=simple_node,
                               test_name='TestRecovery.test_dc_corrupted_data',
                               test_namespace=self.namespace)

        recovery(one_node=False,
                 remotes=map(elliptics.Address.from_host_port_family,
                             server.remotes),
                 backend_id=None,
                 address=scope.test_address2,
                 groups=(scope.test_group, scope.test_group2,
                         scope.test_group3),
                 session=session.clone(),
                 rtype=RECOVERY.DC,
                 log_file='dc_corrupted_data.log',
                 tmp_dir='dc_corrupted_data')

        for group in (scope.test_group, scope.test_group2, scope.test_group3):
            session.groups = [group]
            check_data(scope, session, [self.corrupted_key],
                       [self.corrupted_data + '.2'], self.corrupted_timestamp2)

    def test_defragmentation(self, server, simple_node):
        '''
        Runs defragmentation on all backends from all nodes and groups.
        Waiting defragmentation stops and checks results.
        '''
        session = make_session(node=simple_node,
                               test_name='TestRecovery.test_defragmentation',
                               test_namespace=self.namespace)
        res = []
        for address, backend in session.routes.addresses_with_backends():
            res.append((session.start_defrag(address, backend), backend))
        cnt = 0
        for r, backend in res:
            assert len(r.get()) == 1
            assert len(r.get()[0].backends) == 1
            assert r.get()[0].backends[0].backend_id == backend
            assert r.get()[0].backends[0].state == 1
            assert r.get()[0].backends[0].defrag_state == 1
            assert r.get()[0].backends[0].last_start_err == 0
            cnt += r.get()[0].backends[0].defrag_state

        while cnt > 0:
            cnt = 0
            for address in session.routes.addresses():
                res = session.request_backends_status(address).get()
                backends = session.routes.get_address_backends(address)
                assert len(res) == 1
                assert len(res[0].backends) == len(backends)
                for r in res[0].backends:
                    assert r.backend_id in backends
                    assert r.state == 1
                    assert r.last_start_err == 0
                    cnt += r.defrag_state
            print "In defragmentation:", cnt

        for group in (scope.test_group, scope.test_group2, scope.test_group3):
            session.groups = [group]
            check_data(scope, session, self.keys, self.datas2, self.timestamp2)

    def test_enable_rest_backends(self, server, simple_node):
        '''
        Restore all groups with all nodes and all backends.
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_enable_rest_backends',
            test_namespace=self.namespace)
        for g in scope.test_other_groups:
            enable_group(scope, session, g)

    def test_checks_all_enabled(self, server, simple_node):
        '''
        Checks statuses of all backends from all nodes and groups
        '''
        session = make_session(
            node=simple_node,
            test_name='TestRecovery.test_checks_all_enabled',
            test_namespace=self.namespace)
        assert set(scope.init_routes.addresses()) == set(
            session.routes.addresses())
Пример #20
0
    s = elliptics.Session(n)
    s.add_groups([2])

    ranges = [
        range(elliptics.Id([0] * 64, 0), elliptics.Id([100] + [255] * 63, 0)),
        range(elliptics.Id([200] + [0] * 63, 0),
              elliptics.Id([220] + [255] * 63, 0))
    ]

    eid = elliptics.Id([0] * 64, 2)
    iterator = s.start_iterator(eid, ranges, \
                                elliptics.iterator_types.network, \
                                elliptics.iterator_flags.key_range \
                                    | elliptics.iterator_flags.ts_range \
                                    | elliptics.iterator_flags.data, \
                                elliptics.Time(0, 0), \
                                elliptics.Time(2**64-1, 2**64-1))

    for i, result in enumerate(iterator):
        if result.status != 0:
            raise AssertionError("Wrong status: {0}".format(result.status))

        print "key: {0}, flags: {1}, ts: {2}/{3}, data: {4}".format(
            result.response.key, result.response.user_flags,
            result.response.timestamp.tsec, result.response.timestamp.tnsec,
            result.response_data)

        # Test flow control
        if i % 10 == 0:
            print "Pause iterator"
            pause_it = s.pause_iterator(eid, result.id)
Пример #21
0
class TestRecoveryUserFlags:
    '''
    Checks recovery with specified user_flags_set: recover key if at least one replica
    has user_flags from specified user_flags_set
    '''
    user_flags_set = [2]
    timestamp = elliptics.Time.now()
    timestamp_new = elliptics.Time(timestamp.tsec + 3600, timestamp.tnsec)
    test_key = 'skip_test.key'
    test_key2 = 'skip_test.key2'
    test_key3 = 'skip_test.key3'
    test_data = 'skip_test.data'
    namespace = 'TestRecoveryUserFlags'

    def prepare_test_data(self):
        '''
        Writes test keys with a specific user_flags and checks that operation was successfull:
        1. Write test_key to test_groups with different user_flags that are not in user_flags_set.
        2. Write test_key2 with different user_flags including ones from user_flags_set.
        3. Write test_key3 with different user_flags. Replicas with user_flags from user_flags_set
           are written with older timestamp.
        '''
        session = scope.session.clone()
        session.timestamp = self.timestamp

        for i, group_id in enumerate(scope.test_groups):
            session.groups = [group_id]
            session.user_flags = i
            assert i not in self.user_flags_set

            write_data(scope, session, [self.test_key], [self.test_data])
            check_data(scope, session, [self.test_key], [self.test_data],
                       self.timestamp)

        for i, group_id in enumerate(scope.groups):
            session.groups = [group_id]
            session.user_flags = i

            write_data(scope, session, [self.test_key2], [self.test_data])
            check_data(scope, session, [self.test_key2], [self.test_data],
                       self.timestamp)

        for i, group_id in enumerate(scope.groups):
            if i in self.user_flags_set:
                timestamp = self.timestamp
            else:
                timestamp = self.timestamp_new

            session.timestamp = timestamp
            session.groups = [group_id]
            session.user_flags = i

            write_data(scope, session, [self.test_key3], [self.test_data])
            check_data(scope, session, [self.test_key3], [self.test_data],
                       timestamp)

    def cleanup_backends(self):
        '''
        Cleanup test that makes follow:
        1. disables all backends
        2. removes all blobs
        3. enables all backends on all nodes
        '''
        disable_backends(scope.session,
                         scope.session.routes.addresses_with_backends())
        remove_all_blobs(scope.session)
        enable_backends(scope.session, scope.routes.addresses_with_backends())

    def test_setup(self, server, simple_node):
        '''
        Initial test cases that prepare test cluster before running recovery. It includes:
        1. preparing whole test class scope - making session, choosing node and backends etc.
        2. initial cleanup - disabling all backends at all nodes and removing all blobs
        3. enabling backends that will be used at test
        4. preparing test keys
        '''
        self.scope = scope
        self.scope.session = make_session(node=simple_node,
                                          test_name='TestRecoveryUserFlags')
        self.scope.routes = self.scope.session.routes
        self.scope.groups = self.scope.routes.groups()[:3]
        self.scope.test_groups = self.scope.groups[1:]

        self.cleanup_backends()
        self.prepare_test_data()

    def test_recovery(self, server, simple_node):
        '''
        Runs recovery with filtration of keys by specifying user_flags_set and checks that:
        1. test_key shouldn't be recovered
        2. test_key2 replicas shouldn't countain user_flags that are not in user_flags_set
        3. test_key3 replicas shouldn't countain user_flags that are in user_flags_set
        '''
        recovery(one_node=False,
                 remotes=scope.routes.addresses(),
                 backend_id=None,
                 address=scope.routes.addresses()[0],
                 groups=scope.groups,
                 rtype=RECOVERY.DC,
                 log_file='dc_recovery_user_flags.log',
                 tmp_dir='dc_recovery_user_flags',
                 user_flags_set=self.user_flags_set)

        session = scope.session.clone()
        session.exceptions_policy = elliptics.core.exceptions_policy.no_exceptions
        session.set_filter(elliptics.filters.all)

        for group_id in scope.groups:
            session.groups = [group_id]

            results = session.lookup(self.test_key).get()
            if group_id in scope.test_groups:
                assert all(r.status == 0 for r in results)
            else:
                assert all(r.status == -errno.ENOENT for r in results)

            results = session.read_data(self.test_key2).get()
            assert all(r.user_flags in self.user_flags_set for r in results)

            results = session.read_data(self.test_key3).get()
            assert all(r.user_flags not in self.user_flags_set
                       for r in results)

    def test_teardown(self, server, simple_node):
        self.cleanup_backends()