Exemple #1
0
    def test_maintenance_failure_recovery_with_capacity(self):
        backend = RedisBackend(capacity=10, truncation_chance=0.0)

        t = time.time()

        # Add 10 items to the timeline.
        for i in xrange(10):
            backend.add('timeline', Record('record:{}'.format(i), '{}'.format(i), t + i))

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # The 10 existing items should now be in the digest set (the exception
        # prevented the close operation from occurring, so they were never
        # deleted from Redis or removed from the digest set.) If we add 10 more
        # items, they should be added to the timeline set (not the digest set.)
        for i in xrange(10, 20):
            backend.add('timeline', Record('record:{}'.format(i), '{}'.format(i), t + i))

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # The schedule should now contain the timeline.
        assert set(entry.key for entry in backend.schedule(time.time())) == set(['timeline'])

        # Only the new records should exist -- the older one should have been
        # trimmed to avoid the digest growing beyond the timeline capacity.
        with backend.digest('timeline', 0) as records:
            expected_keys = set('record:{}'.format(i) for i in xrange(10, 20))
            assert set(record.key for record in records) == expected_keys
Exemple #2
0
    def test_maintenance_failure_recovery(self):
        backend = RedisBackend()

        record_1 = Record('record:1', 'value', time.time())
        backend.add('timeline', record_1)

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # ...and you can't send a digest in the waiting state.
        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                pass

        record_2 = Record('record:2', 'value', time.time())
        backend.add('timeline', record_2)

        # The schedule should now contain the timeline.
        assert set(entry.key for entry in backend.schedule(time.time())) == set(['timeline'])

        # The existing and new record should be there because the timeline
        # contents were merged back into the digest.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])
    def test_maintenance_failure_recovery(self):
        backend = RedisBackend()

        record_1 = Record('record:1', 'value', time.time())
        backend.add('timeline', record_1)

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # ...and you can't send a digest in the waiting state.
        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                pass

        record_2 = Record('record:2', 'value', time.time())
        backend.add('timeline', record_2)

        # The schedule should now contain the timeline.
        assert set(entry.key
                   for entry in backend.schedule(time.time())) == set(
                       ['timeline'])

        # The existing and new record should be there because the timeline
        # contents were merged back into the digest.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])
Exemple #4
0
    def test_maintenance_failure_recovery_with_capacity(self):
        backend = RedisBackend(capacity=10, truncation_chance=0.0)

        t = time.time()

        # Add 10 items to the timeline.
        for i in range(10):
            backend.add("timeline", Record(f"record:{i}", f"{i}", t + i))

        try:
            with backend.digest("timeline", 0) as records:
                raise Exception("This causes the digest to not be closed.")
        except Exception:
            pass

        # The 10 existing items should now be in the digest set (the exception
        # prevented the close operation from occurring, so they were never
        # deleted from Redis or removed from the digest set.) If we add 10 more
        # items, they should be added to the timeline set (not the digest set.)
        for i in range(10, 20):
            backend.add("timeline", Record(f"record:{i}", f"{i}", t + i))

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # The schedule should now contain the timeline.
        assert {entry.key for entry in backend.schedule(time.time())} == {"timeline"}

        # Only the new records should exist -- the older one should have been
        # trimmed to avoid the digest growing beyond the timeline capacity.
        with backend.digest("timeline", 0) as records:
            expected_keys = {f"record:{i}" for i in range(10, 20)}
            assert {record.key for record in records} == expected_keys
Exemple #5
0
    def test_maintenance(self):
        timeline = 'timeline'
        backend = RedisBackend(ttl=3600)

        timeline_key = make_timeline_key(backend.namespace, timeline)
        digest_key = make_digest_key(timeline_key)
        waiting_set_key = make_schedule_key(backend.namespace,
                                            SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace,
                                          SCHEDULE_STATE_READY)

        now = time.time()

        connection = backend.cluster.get_local_client_for_key(timeline_key)
        schedule_time = now - 60
        connection.zadd(ready_set_key, schedule_time, timeline)
        connection.zadd(timeline_key, 0, '1')
        connection.set(make_record_key(timeline_key, '1'), 'data')
        connection.zadd(digest_key, 0, '2')
        connection.set(make_record_key(timeline_key, '2'), 'data')

        # Move the digest from the ready set to the waiting set.
        backend.maintenance(now)
        assert connection.zcard(ready_set_key) == 0
        assert connection.zrange(waiting_set_key, 0, -1, withscores=True) == [
            (timeline, schedule_time)
        ]

        connection.zrem(waiting_set_key, timeline)
        connection.zadd(ready_set_key, schedule_time, timeline)

        # Delete the digest from the ready set.
        with mock.patch('time.time', return_value=now + (backend.ttl + 1)):
            backend.maintenance(now)

        keys = (
            ready_set_key,
            waiting_set_key,
            timeline_key,
            digest_key,
            make_record_key(timeline_key, '1'),
            make_record_key(timeline_key, '2'),
        )
        for key in keys:
            assert connection.exists(key) is False
Exemple #6
0
    def test_maintenance(self):
        timeline = 'timeline'
        backend = RedisBackend(ttl=3600)

        timeline_key = make_timeline_key(backend.namespace, timeline)
        digest_key = make_digest_key(timeline_key)
        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        now = time.time()

        connection = backend.cluster.get_local_client_for_key(timeline_key)
        schedule_time = now - 60
        connection.zadd(ready_set_key, schedule_time, timeline)
        connection.zadd(timeline_key, 0, '1')
        connection.set(make_record_key(timeline_key, '1'), 'data')
        connection.zadd(digest_key, 0, '2')
        connection.set(make_record_key(timeline_key, '2'), 'data')

        # Move the digest from the ready set to the waiting set.
        backend.maintenance(now)
        assert connection.zcard(ready_set_key) == 0
        assert connection.zrange(waiting_set_key, 0, -1, withscores=True) == [(timeline, schedule_time)]

        connection.zrem(waiting_set_key, timeline)
        connection.zadd(ready_set_key, schedule_time, timeline)

        # Delete the digest from the ready set.
        with mock.patch('time.time', return_value=now + (backend.ttl + 1)):
            backend.maintenance(now)

        keys = (
            ready_set_key,
            waiting_set_key,
            timeline_key,
            digest_key,
            make_record_key(timeline_key, '1'),
            make_record_key(timeline_key, '2'),
        )
        for key in keys:
            assert connection.exists(key) is False