def test_basic(self):
        backend = RedisBackend()

        # The first item should return "true", indicating that this timeline
        # can be immediately dispatched to be digested.
        record_1 = Record('record:1', 'value', time.time())
        assert backend.add('timeline', record_1) is True

        # The second item should return "false", since it's ready to be
        # digested but dispatching again would cause it to be sent twice.
        record_2 = Record('record:2', 'value', time.time())
        assert backend.add('timeline', record_2) is False

        # There's nothing to move between sets, so scheduling should return nothing.
        assert set(backend.schedule(time.time())) == set()

        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])

        # The schedule should now contain the timeline.
        assert set(entry.key
                   for entry in backend.schedule(time.time())) == set(
                       ['timeline'])

        # We didn't add any new records so there's nothing to do here.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([])

        # There's nothing to move between sets since the timeline contents no
        # longer exist at this point.
        assert set(backend.schedule(time.time())) == set()
예제 #2
0
    def test_basic(self):
        backend = RedisBackend()

        # The first item should return "true", indicating that this timeline
        # can be immediately dispatched to be digested.
        record_1 = Record('record:1', 'value', time.time())
        assert backend.add('timeline', record_1) is True

        # The second item should return "false", since it's ready to be
        # digested but dispatching again would cause it to be sent twice.
        record_2 = Record('record:2', 'value', time.time())
        assert backend.add('timeline', record_2) is False

        # There's nothing to move between sets, so scheduling should return nothing.
        assert set(backend.schedule(time.time())) == set()

        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])

        # The schedule should now contain the timeline.
        assert set(entry.key for entry in backend.schedule(time.time())) == set(['timeline'])

        # We didn't add any new records so there's nothing to do here.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([])

        # There's nothing to move between sets since the timeline contents no
        # longer exist at this point.
        assert set(backend.schedule(time.time())) == set()
예제 #3
0
    def test_digesting(self):
        backend = RedisBackend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace,
                                            SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace,
                                          SCHEDULE_STATE_READY)

        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_waiting_set_size = functools.partial(get_set_size, backend.cluster,
                                                 waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster,
                                               ready_set_key)

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0):

            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == records[::-1]

            next_scheduled_delivery = timestamp + backend.minimum_delay
            assert client.zscore(waiting_set_key,
                                 timeline) == next_scheduled_delivery
            assert int(
                client.get(make_last_processed_timestamp_key(
                    timeline_key))) == int(timestamp)

        # Move the timeline back to the ready set.
        for entry in backend.schedule(next_scheduled_delivery):
            pass

        # The digest should be removed from the schedule if it is empty.
        with self.assertDoesNotChange(get_waiting_set_size), \
                self.assertChanges(get_ready_set_size, before=1, after=0):
            with backend.digest(timeline) as entries:
                assert list(entries) == []

        assert client.get(
            make_last_processed_timestamp_key(timeline_key)) is None
예제 #4
0
    def test_digesting(self):
        backend = RedisBackend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key)

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0):

            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == records[::-1]

            next_scheduled_delivery = timestamp + backend.minimum_delay
            assert client.zscore(waiting_set_key, timeline) == next_scheduled_delivery
            assert int(client.get(make_last_processed_timestamp_key(timeline_key))) == int(timestamp)

        # Move the timeline back to the ready set.
        for entry in backend.schedule(next_scheduled_delivery):
            pass

        # The digest should be removed from the schedule if it is empty.
        with self.assertDoesNotChange(get_waiting_set_size), \
                self.assertChanges(get_ready_set_size, before=1, after=0):
            with backend.digest(timeline) as entries:
                assert list(entries) == []

        assert client.get(make_last_processed_timestamp_key(timeline_key)) is None
예제 #5
0
    def test_scheduling(self):
        backend = RedisBackend()

        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        n = 10

        for i in range(n):
            with backend.cluster.map() as client:
                client.zadd(waiting_set_key, i, 'timelines:{0}'.format(i))

        for i in range(n, n * 2):
            with backend.cluster.map() as client:
                client.zadd(ready_set_key, i, 'timelines:{0}'.format(i))

        get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key)

        with self.assertChanges(get_waiting_set_size, before=n, after=0), \
                self.assertChanges(get_ready_set_size, before=n, after=n * 2):
            results = list(zip(range(n), list(backend.schedule(n, chunk=5))))
            assert len(results) is n

            # Ensure scheduled entries are returned earliest first.
            for i, entry in results:
                assert entry.key == 'timelines:{0}'.format(i)
                assert entry.timestamp == float(i)
예제 #6
0
    def test_maintenance_failure_recovery_with_capacity(self):
        backend = RedisBackend(capacity=10, truncation_chance=0.0)

        t = time.time()

        # Add 10 items to the timeline.
        for i in xrange(10):
            backend.add('timeline', Record('record:{}'.format(i), '{}'.format(i), t + i))

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # The 10 existing items should now be in the digest set (the exception
        # prevented the close operation from occurring, so they were never
        # deleted from Redis or removed from the digest set.) If we add 10 more
        # items, they should be added to the timeline set (not the digest set.)
        for i in xrange(10, 20):
            backend.add('timeline', Record('record:{}'.format(i), '{}'.format(i), t + i))

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # The schedule should now contain the timeline.
        assert set(entry.key for entry in backend.schedule(time.time())) == set(['timeline'])

        # Only the new records should exist -- the older one should have been
        # trimmed to avoid the digest growing beyond the timeline capacity.
        with backend.digest('timeline', 0) as records:
            expected_keys = set('record:{}'.format(i) for i in xrange(10, 20))
            assert set(record.key for record in records) == expected_keys
예제 #7
0
    def test_maintenance_failure_recovery(self):
        backend = RedisBackend()

        record_1 = Record('record:1', 'value', time.time())
        backend.add('timeline', record_1)

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # ...and you can't send a digest in the waiting state.
        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                pass

        record_2 = Record('record:2', 'value', time.time())
        backend.add('timeline', record_2)

        # The schedule should now contain the timeline.
        assert set(entry.key for entry in backend.schedule(time.time())) == set(['timeline'])

        # The existing and new record should be there because the timeline
        # contents were merged back into the digest.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])
    def test_maintenance_failure_recovery(self):
        backend = RedisBackend()

        record_1 = Record('record:1', 'value', time.time())
        backend.add('timeline', record_1)

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # ...and you can't send a digest in the waiting state.
        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                pass

        record_2 = Record('record:2', 'value', time.time())
        backend.add('timeline', record_2)

        # The schedule should now contain the timeline.
        assert set(entry.key
                   for entry in backend.schedule(time.time())) == set(
                       ['timeline'])

        # The existing and new record should be there because the timeline
        # contents were merged back into the digest.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])
예제 #9
0
    def test_maintenance_failure_recovery_with_capacity(self):
        backend = RedisBackend(capacity=10, truncation_chance=0.0)

        t = time.time()

        # Add 10 items to the timeline.
        for i in range(10):
            backend.add("timeline", Record(f"record:{i}", f"{i}", t + i))

        try:
            with backend.digest("timeline", 0) as records:
                raise Exception("This causes the digest to not be closed.")
        except Exception:
            pass

        # The 10 existing items should now be in the digest set (the exception
        # prevented the close operation from occurring, so they were never
        # deleted from Redis or removed from the digest set.) If we add 10 more
        # items, they should be added to the timeline set (not the digest set.)
        for i in range(10, 20):
            backend.add("timeline", Record(f"record:{i}", f"{i}", t + i))

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # The schedule should now contain the timeline.
        assert {entry.key for entry in backend.schedule(time.time())} == {"timeline"}

        # Only the new records should exist -- the older one should have been
        # trimmed to avoid the digest growing beyond the timeline capacity.
        with backend.digest("timeline", 0) as records:
            expected_keys = {f"record:{i}" for i in range(10, 20)}
            assert {record.key for record in records} == expected_keys
예제 #10
0
    def test_digesting_failure_recovery(self):
        backend = RedisBackend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace,
                                            SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace,
                                          SCHEDULE_STATE_READY)

        get_waiting_set_size = functools.partial(get_set_size, backend.cluster,
                                                 waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster,
                                               ready_set_key)
        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_digest_size = functools.partial(client.zcard,
                                            make_digest_key(timeline_key))

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_digest_size, before=0, after=n), \
                self.assertDoesNotChange(get_waiting_set_size), \
                self.assertDoesNotChange(get_ready_set_size):
            try:
                with backend.digest(timeline) as entries:
                    raise ExpectedError
            except ExpectedError:
                pass

        # Add another few records to the timeline to ensure they end up in the digest.
        extra = list(itertools.islice(self.records, 5))
        for record in extra:
            backend.add(timeline, record)

        with self.assertChanges(get_timeline_size, before=len(extra), after=0), \
                self.assertChanges(get_digest_size, before=len(records), after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0):
            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == (records + extra)[::-1]

            assert client.zscore(waiting_set_key,
                                 timeline) == timestamp + backend.minimum_delay
예제 #11
0
    def test_delete(self):
        backend = RedisBackend()
        backend.add('timeline', Record('record:1', 'value', time.time()))
        backend.delete('timeline')

        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                assert set(records) == set([])

        assert set(backend.schedule(time.time())) == set()
        assert len(backend._get_connection('timeline').keys('d:*')) == 0
    def test_delete(self):
        backend = RedisBackend()
        backend.add('timeline', Record('record:1', 'value', time.time()))
        backend.delete('timeline')

        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                assert set(records) == set([])

        assert set(backend.schedule(time.time())) == set()
        assert len(backend._get_connection('timeline').keys('d:*')) == 0
예제 #13
0
    def test_delete(self):
        backend = RedisBackend()
        backend.add("timeline", Record("record:1", "value", time.time()))
        backend.delete("timeline")

        with pytest.raises(InvalidState):
            with backend.digest("timeline", 0) as records:
                assert set(records) == set()

        assert set(backend.schedule(time.time())) == set()
        assert len(backend._get_connection("timeline").keys("d:*")) == 0
예제 #14
0
    def test_digesting_failure_recovery(self):
        backend = RedisBackend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key)
        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_digest_size = functools.partial(client.zcard, make_digest_key(timeline_key))

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_digest_size, before=0, after=n), \
                self.assertDoesNotChange(get_waiting_set_size), \
                self.assertDoesNotChange(get_ready_set_size):
            try:
                with backend.digest(timeline) as entries:
                    raise ExpectedError
            except ExpectedError:
                pass

        # Add another few records to the timeline to ensure they end up in the digest.
        extra = list(itertools.islice(self.records, 5))
        for record in extra:
            backend.add(timeline, record)

        with self.assertChanges(get_timeline_size, before=len(extra), after=0), \
                self.assertChanges(get_digest_size, before=len(records), after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0):
            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == (records + extra)[::-1]

            assert client.zscore(waiting_set_key, timeline) == timestamp + backend.minimum_delay