Ejemplo n.º 1
0
    def test_delete(self):
        timeline = 'timeline'
        backend = RedisBackend()

        timeline_key = make_timeline_key(backend.namespace, timeline)
        digest_key = make_digest_key(timeline_key)
        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        connection = backend.cluster.get_local_client_for_key(timeline_key)
        connection.zadd(waiting_set_key, 0, timeline)
        connection.zadd(ready_set_key, 0, timeline)
        connection.zadd(timeline_key, 0, '1')
        connection.set(make_record_key(timeline_key, '1'), 'data')
        connection.zadd(digest_key, 0, '2')
        connection.set(make_record_key(timeline_key, '2'), 'data')

        keys = (
            waiting_set_key,
            ready_set_key,
            digest_key,
            timeline_key,
            make_record_key(timeline_key, '1'),
            make_record_key(timeline_key, '2')
        )

        def check_keys_exist():
            return map(connection.exists, keys)

        with self.assertChanges(check_keys_exist, before=[True] * len(keys), after=[False] * len(keys)):
            backend.delete(timeline)
Ejemplo n.º 2
0
    def test_digesting_failure_recovery(self):
        backend = self.get_backend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace,
                                            SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace,
                                          SCHEDULE_STATE_READY)

        get_waiting_set_size = functools.partial(get_set_size, backend.cluster,
                                                 waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster,
                                               ready_set_key)
        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_digest_size = functools.partial(client.zcard,
                                            make_digest_key(timeline_key))

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_digest_size, before=0, after=n), \
                self.assertDoesNotChange(get_waiting_set_size), \
                self.assertDoesNotChange(get_ready_set_size):
            try:
                with backend.digest(timeline) as entries:
                    raise ExpectedError
            except ExpectedError:
                pass

        # Add another few records to the timeline to ensure they end up in the digest.
        extra = list(itertools.islice(self.records, 5))
        for record in extra:
            backend.add(timeline, record)

        with self.assertChanges(get_timeline_size, before=len(extra), after=0), \
                self.assertChanges(get_digest_size, before=len(records), after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0):
            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == (records + extra)[::-1]

            assert client.zscore(waiting_set_key,
                                 timeline) == timestamp + backend.minimum_delay
Ejemplo n.º 3
0
    def test_digesting_failure_recovery(self):
        backend = self.get_backend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key)
        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_digest_size = functools.partial(client.zcard, make_digest_key(timeline_key))
        get_iteration_counter = functools.partial(client.get, make_iteration_key(timeline_key))

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_digest_size, before=0, after=n), \
                self.assertDoesNotChange(get_waiting_set_size), \
                self.assertDoesNotChange(get_ready_set_size), \
                self.assertDoesNotChange(get_iteration_counter):
            try:
                with backend.digest(timeline) as entries:
                    raise ExpectedError
            except ExpectedError:
                pass

        # Add another few records to the timeline to ensure they end up in the digest.
        extra = list(itertools.islice(self.records, 5))
        for record in extra:
            backend.add(timeline, record)

        with self.assertChanges(get_timeline_size, before=len(extra), after=0), \
                self.assertChanges(get_digest_size, before=len(records), after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0), \
                self.assertChanges(get_iteration_counter, before='0', after='1'):

            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == (records + extra)[::-1]

            assert client.zscore(waiting_set_key, timeline) == timestamp + backend.backoff(1)
Ejemplo n.º 4
0
    def test_maintenance(self):
        timeline = 'timeline'
        backend = RedisBackend(ttl=3600)

        timeline_key = make_timeline_key(backend.namespace, timeline)
        digest_key = make_digest_key(timeline_key)
        waiting_set_key = make_schedule_key(backend.namespace,
                                            SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace,
                                          SCHEDULE_STATE_READY)

        now = time.time()

        connection = backend.cluster.get_local_client_for_key(timeline_key)
        schedule_time = now - 60
        connection.zadd(ready_set_key, schedule_time, timeline)
        connection.zadd(timeline_key, 0, '1')
        connection.set(make_record_key(timeline_key, '1'), 'data')
        connection.zadd(digest_key, 0, '2')
        connection.set(make_record_key(timeline_key, '2'), 'data')

        # Move the digest from the ready set to the waiting set.
        backend.maintenance(now)
        assert connection.zcard(ready_set_key) == 0
        assert connection.zrange(waiting_set_key, 0, -1, withscores=True) == [
            (timeline, schedule_time)
        ]

        connection.zrem(waiting_set_key, timeline)
        connection.zadd(ready_set_key, schedule_time, timeline)

        # Delete the digest from the ready set.
        with mock.patch('time.time', return_value=now + (backend.ttl + 1)):
            backend.maintenance(now)

        keys = (
            ready_set_key,
            waiting_set_key,
            timeline_key,
            digest_key,
            make_record_key(timeline_key, '1'),
            make_record_key(timeline_key, '2'),
        )
        for key in keys:
            assert connection.exists(key) is False
Ejemplo n.º 5
0
    def test_maintenance(self):
        timeline = 'timeline'
        backend = RedisBackend(ttl=3600)

        timeline_key = make_timeline_key(backend.namespace, timeline)
        digest_key = make_digest_key(timeline_key)
        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        now = time.time()

        connection = backend.cluster.get_local_client_for_key(timeline_key)
        schedule_time = now - 60
        connection.zadd(ready_set_key, schedule_time, timeline)
        connection.zadd(timeline_key, 0, '1')
        connection.set(make_record_key(timeline_key, '1'), 'data')
        connection.zadd(digest_key, 0, '2')
        connection.set(make_record_key(timeline_key, '2'), 'data')

        # Move the digest from the ready set to the waiting set.
        backend.maintenance(now)
        assert connection.zcard(ready_set_key) == 0
        assert connection.zrange(waiting_set_key, 0, -1, withscores=True) == [(timeline, schedule_time)]

        connection.zrem(waiting_set_key, timeline)
        connection.zadd(ready_set_key, schedule_time, timeline)

        # Delete the digest from the ready set.
        with mock.patch('time.time', return_value=now + (backend.ttl + 1)):
            backend.maintenance(now)

        keys = (
            ready_set_key,
            waiting_set_key,
            timeline_key,
            digest_key,
            make_record_key(timeline_key, '1'),
            make_record_key(timeline_key, '2'),
        )
        for key in keys:
            assert connection.exists(key) is False