def test_truncation(self): timeline = 'timeline' capacity = 5 backend = self.get_backend({ 'capacity': capacity, 'truncation_chance': 0.5, }) timeline_key = make_timeline_key(backend.namespace, timeline) connection = backend.cluster.get_local_client_for_key(timeline_key) get_timeline_size = functools.partial(connection.zcard, timeline_key) fill = 10 with mock.patch('random.random', return_value=1.0): with self.assertChanges(get_timeline_size, before=0, after=fill): for _ in xrange(fill): backend.add(timeline, next(self.records)) with mock.patch('random.random', return_value=0.0): with self.assertChanges(get_timeline_size, before=fill, after=capacity): backend.add(timeline, next(self.records))
def test_delete(self): timeline = 'timeline' backend = RedisBackend() timeline_key = make_timeline_key(backend.namespace, timeline) digest_key = make_digest_key(timeline_key) waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING) ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY) connection = backend.cluster.get_local_client_for_key(timeline_key) connection.zadd(waiting_set_key, 0, timeline) connection.zadd(ready_set_key, 0, timeline) connection.zadd(timeline_key, 0, '1') connection.set(make_record_key(timeline_key, '1'), 'data') connection.zadd(digest_key, 0, '2') connection.set(make_record_key(timeline_key, '2'), 'data') keys = ( waiting_set_key, ready_set_key, digest_key, timeline_key, make_record_key(timeline_key, '1'), make_record_key(timeline_key, '2') ) def check_keys_exist(): return map(connection.exists, keys) with self.assertChanges(check_keys_exist, before=[True] * len(keys), after=[False] * len(keys)): backend.delete(timeline)
def test_add_record(self): timeline = 'timeline' backend = self.get_backend() timeline_key = make_timeline_key(backend.namespace, timeline) connection = backend.cluster.get_local_client_for_key(timeline_key) record = next(self.records) ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY) record_key = make_record_key(timeline_key, record.key) get_timeline_score_in_ready_set = functools.partial( connection.zscore, ready_set_key, timeline) get_record_score_in_timeline_set = functools.partial( connection.zscore, timeline_key, record.key) def get_record_value(): value = connection.get(record_key) return backend.codec.decode(value) if value is not None else None with self.assertChanges(get_timeline_score_in_ready_set, before=None, after=record.timestamp), \ self.assertChanges(get_record_score_in_timeline_set, before=None, after=record.timestamp), \ self.assertChanges(get_record_value, before=None, after=record.value): backend.add(timeline, record)
def test_digesting_failure_recovery(self): backend = self.get_backend() # XXX: This assumes the that adding records and scheduling are working # correctly to set up the state needed for this test! timeline = 'timeline' n = 10 records = list(itertools.islice(self.records, n)) for record in records: backend.add(timeline, record) for entry in backend.schedule(time.time()): pass timeline_key = make_timeline_key(backend.namespace, timeline) client = backend.cluster.get_local_client_for_key(timeline_key) waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING) ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY) get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key) get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key) get_timeline_size = functools.partial(client.zcard, timeline_key) get_digest_size = functools.partial(client.zcard, make_digest_key(timeline_key)) with self.assertChanges(get_timeline_size, before=n, after=0), \ self.assertChanges(get_digest_size, before=0, after=n), \ self.assertDoesNotChange(get_waiting_set_size), \ self.assertDoesNotChange(get_ready_set_size): try: with backend.digest(timeline) as entries: raise ExpectedError except ExpectedError: pass # Add another few records to the timeline to ensure they end up in the digest. extra = list(itertools.islice(self.records, 5)) for record in extra: backend.add(timeline, record) with self.assertChanges(get_timeline_size, before=len(extra), after=0), \ self.assertChanges(get_digest_size, before=len(records), after=0), \ self.assertChanges(get_waiting_set_size, before=0, after=1), \ self.assertChanges(get_ready_set_size, before=1, after=0): timestamp = time.time() with mock.patch('time.time', return_value=timestamp), \ backend.digest(timeline) as entries: entries = list(entries) assert entries == (records + extra)[::-1] assert client.zscore(waiting_set_key, timeline) == timestamp + backend.minimum_delay
def test_digesting(self): backend = self.get_backend() # XXX: This assumes the that adding records and scheduling are working # correctly to set up the state needed for this test! timeline = 'timeline' n = 10 records = list(itertools.islice(self.records, n)) for record in records: backend.add(timeline, record) for entry in backend.schedule(time.time()): pass timeline_key = make_timeline_key(backend.namespace, timeline) client = backend.cluster.get_local_client_for_key(timeline_key) waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING) ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY) get_timeline_size = functools.partial(client.zcard, timeline_key) get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key) get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key) with self.assertChanges(get_timeline_size, before=n, after=0), \ self.assertChanges(get_waiting_set_size, before=0, after=1), \ self.assertChanges(get_ready_set_size, before=1, after=0): timestamp = time.time() with mock.patch('time.time', return_value=timestamp), \ backend.digest(timeline) as entries: entries = list(entries) assert entries == records[::-1] next_scheduled_delivery = timestamp + backend.minimum_delay assert client.zscore(waiting_set_key, timeline) == next_scheduled_delivery assert int( client.get(make_last_processed_timestamp_key( timeline_key))) == int(timestamp) # Move the timeline back to the ready set. for entry in backend.schedule(next_scheduled_delivery): pass # The digest should be removed from the schedule if it is empty. with self.assertDoesNotChange(get_waiting_set_size), \ self.assertChanges(get_ready_set_size, before=1, after=0): with backend.digest(timeline) as entries: assert list(entries) == [] assert client.get( make_last_processed_timestamp_key(timeline_key)) is None
def test_digesting_failure_recovery(self): backend = self.get_backend() # XXX: This assumes the that adding records and scheduling are working # correctly to set up the state needed for this test! timeline = 'timeline' n = 10 records = list(itertools.islice(self.records, n)) for record in records: backend.add(timeline, record) for entry in backend.schedule(time.time()): pass timeline_key = make_timeline_key(backend.namespace, timeline) client = backend.cluster.get_local_client_for_key(timeline_key) waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING) ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY) get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key) get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key) get_timeline_size = functools.partial(client.zcard, timeline_key) get_digest_size = functools.partial(client.zcard, make_digest_key(timeline_key)) get_iteration_counter = functools.partial(client.get, make_iteration_key(timeline_key)) with self.assertChanges(get_timeline_size, before=n, after=0), \ self.assertChanges(get_digest_size, before=0, after=n), \ self.assertDoesNotChange(get_waiting_set_size), \ self.assertDoesNotChange(get_ready_set_size), \ self.assertDoesNotChange(get_iteration_counter): try: with backend.digest(timeline) as entries: raise ExpectedError except ExpectedError: pass # Add another few records to the timeline to ensure they end up in the digest. extra = list(itertools.islice(self.records, 5)) for record in extra: backend.add(timeline, record) with self.assertChanges(get_timeline_size, before=len(extra), after=0), \ self.assertChanges(get_digest_size, before=len(records), after=0), \ self.assertChanges(get_waiting_set_size, before=0, after=1), \ self.assertChanges(get_ready_set_size, before=1, after=0), \ self.assertChanges(get_iteration_counter, before='0', after='1'): timestamp = time.time() with mock.patch('time.time', return_value=timestamp), \ backend.digest(timeline) as entries: entries = list(entries) assert entries == (records + extra)[::-1] assert client.zscore(waiting_set_key, timeline) == timestamp + backend.backoff(1)
def test_digesting(self): backend = RedisBackend() # XXX: This assumes the that adding records and scheduling are working # correctly to set up the state needed for this test! timeline = 'timeline' n = 10 records = list(itertools.islice(self.records, n)) for record in records: backend.add(timeline, record) for entry in backend.schedule(time.time()): pass timeline_key = make_timeline_key(backend.namespace, timeline) client = backend.cluster.get_local_client_for_key(timeline_key) waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING) ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY) get_timeline_size = functools.partial(client.zcard, timeline_key) get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key) get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key) with self.assertChanges(get_timeline_size, before=n, after=0), \ self.assertChanges(get_waiting_set_size, before=0, after=1), \ self.assertChanges(get_ready_set_size, before=1, after=0): timestamp = time.time() with mock.patch('time.time', return_value=timestamp), \ backend.digest(timeline) as entries: entries = list(entries) assert entries == records[::-1] next_scheduled_delivery = timestamp + backend.minimum_delay assert client.zscore(waiting_set_key, timeline) == next_scheduled_delivery assert int(client.get(make_last_processed_timestamp_key(timeline_key))) == int(timestamp) # Move the timeline back to the ready set. for entry in backend.schedule(next_scheduled_delivery): pass # The digest should be removed from the schedule if it is empty. with self.assertDoesNotChange(get_waiting_set_size), \ self.assertChanges(get_ready_set_size, before=1, after=0): with backend.digest(timeline) as entries: assert list(entries) == [] assert client.get(make_last_processed_timestamp_key(timeline_key)) is None
def test_maintenance(self): timeline = 'timeline' backend = RedisBackend(ttl=3600) timeline_key = make_timeline_key(backend.namespace, timeline) digest_key = make_digest_key(timeline_key) waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING) ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY) now = time.time() connection = backend.cluster.get_local_client_for_key(timeline_key) schedule_time = now - 60 connection.zadd(ready_set_key, schedule_time, timeline) connection.zadd(timeline_key, 0, '1') connection.set(make_record_key(timeline_key, '1'), 'data') connection.zadd(digest_key, 0, '2') connection.set(make_record_key(timeline_key, '2'), 'data') # Move the digest from the ready set to the waiting set. backend.maintenance(now) assert connection.zcard(ready_set_key) == 0 assert connection.zrange(waiting_set_key, 0, -1, withscores=True) == [ (timeline, schedule_time) ] connection.zrem(waiting_set_key, timeline) connection.zadd(ready_set_key, schedule_time, timeline) # Delete the digest from the ready set. with mock.patch('time.time', return_value=now + (backend.ttl + 1)): backend.maintenance(now) keys = ( ready_set_key, waiting_set_key, timeline_key, digest_key, make_record_key(timeline_key, '1'), make_record_key(timeline_key, '2'), ) for key in keys: assert connection.exists(key) is False
def test_truncation(self): timeline = 'timeline' capacity = 5 backend = RedisBackend(capacity=capacity, truncation_chance=0.5) timeline_key = make_timeline_key(backend.namespace, timeline) connection = backend.cluster.get_local_client_for_key(timeline_key) get_timeline_size = functools.partial(connection.zcard, timeline_key) fill = 10 with mock.patch('random.random', return_value=1.0): with self.assertChanges(get_timeline_size, before=0, after=fill): for _ in range(fill): backend.add(timeline, next(self.records)) with mock.patch('random.random', return_value=0.0): with self.assertChanges(get_timeline_size, before=fill, after=capacity): backend.add(timeline, next(self.records))
def test_maintenance(self): timeline = 'timeline' backend = RedisBackend(ttl=3600) timeline_key = make_timeline_key(backend.namespace, timeline) digest_key = make_digest_key(timeline_key) waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING) ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY) now = time.time() connection = backend.cluster.get_local_client_for_key(timeline_key) schedule_time = now - 60 connection.zadd(ready_set_key, schedule_time, timeline) connection.zadd(timeline_key, 0, '1') connection.set(make_record_key(timeline_key, '1'), 'data') connection.zadd(digest_key, 0, '2') connection.set(make_record_key(timeline_key, '2'), 'data') # Move the digest from the ready set to the waiting set. backend.maintenance(now) assert connection.zcard(ready_set_key) == 0 assert connection.zrange(waiting_set_key, 0, -1, withscores=True) == [(timeline, schedule_time)] connection.zrem(waiting_set_key, timeline) connection.zadd(ready_set_key, schedule_time, timeline) # Delete the digest from the ready set. with mock.patch('time.time', return_value=now + (backend.ttl + 1)): backend.maintenance(now) keys = ( ready_set_key, waiting_set_key, timeline_key, digest_key, make_record_key(timeline_key, '1'), make_record_key(timeline_key, '2'), ) for key in keys: assert connection.exists(key) is False
def test_add_record(self): timeline = 'timeline' backend = RedisBackend() timeline_key = make_timeline_key(backend.namespace, timeline) connection = backend.cluster.get_local_client_for_key(timeline_key) record = next(self.records) ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY) record_key = make_record_key(timeline_key, record.key) get_timeline_score_in_ready_set = functools.partial(connection.zscore, ready_set_key, timeline) get_record_score_in_timeline_set = functools.partial(connection.zscore, timeline_key, record.key) def get_record_value(): value = connection.get(record_key) return backend.codec.decode(value) if value is not None else None with self.assertChanges(get_timeline_score_in_ready_set, before=None, after=record.timestamp), \ self.assertChanges(get_record_score_in_timeline_set, before=None, after=record.timestamp), \ self.assertChanges(get_record_value, before=None, after=record.value): backend.add(timeline, record)
def test_add_record(self): timeline = 'timeline' backend = self.get_backend() timeline_key = make_timeline_key(backend.namespace, timeline) connection = backend.cluster.get_local_client_for_key(timeline_key) record = next(self.records) waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING) record_key = make_record_key(timeline_key, record.key) get_timeline_score_in_waiting_set = functools.partial(connection.zscore, waiting_set_key, timeline) get_timeline_iteration_counter = functools.partial(connection.get, make_iteration_key(timeline_key)) get_record_score_in_timeline_set = functools.partial(connection.zscore, timeline_key, record.key) def get_record_value(): value = connection.get(record_key) return backend.codec.decode(value) if value is not None else None with self.assertChanges(get_timeline_score_in_waiting_set, before=None, after=record.timestamp + backend.backoff(0)), \ self.assertChanges(get_timeline_iteration_counter, before=None, after='0'), \ self.assertChanges(get_record_score_in_timeline_set, before=None, after=record.timestamp), \ self.assertChanges(get_record_value, before=None, after=record.value): backend.add(timeline, record)