Example #1
0
    def test_maintenance_failure_recovery_with_capacity(self):
        backend = RedisBackend(capacity=10, truncation_chance=0.0)

        t = time.time()

        # Add 10 items to the timeline.
        for i in xrange(10):
            backend.add('timeline', Record('record:{}'.format(i), '{}'.format(i), t + i))

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # The 10 existing items should now be in the digest set (the exception
        # prevented the close operation from occurring, so they were never
        # deleted from Redis or removed from the digest set.) If we add 10 more
        # items, they should be added to the timeline set (not the digest set.)
        for i in xrange(10, 20):
            backend.add('timeline', Record('record:{}'.format(i), '{}'.format(i), t + i))

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # The schedule should now contain the timeline.
        assert set(entry.key for entry in backend.schedule(time.time())) == set(['timeline'])

        # Only the new records should exist -- the older one should have been
        # trimmed to avoid the digest growing beyond the timeline capacity.
        with backend.digest('timeline', 0) as records:
            expected_keys = set('record:{}'.format(i) for i in xrange(10, 20))
            assert set(record.key for record in records) == expected_keys
Example #2
0
    def test_digest_enabled(self, digests, mock_func):
        """
        Test that with digests enabled, but Slack notification settings
        (and not email settings), we send a Slack notification
        """
        backend = RedisBackend()
        digests.digest = backend.digest
        digests.enabled.return_value = True

        rule = Rule.objects.create(project=self.project, label="my rule")
        event = self.store_event(
            data={"message": "Hello world", "level": "error"}, project_id=self.project.id
        )
        key = f"mail:p:{self.project.id}"
        backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0)

        with self.tasks():
            deliver_digest(key)

        assert digests.call_count == 0

        attachment, text = get_attachment()

        assert attachment["title"] == "Hello world"
        assert attachment["text"] == ""
Example #3
0
    def test_scheduling(self):
        backend = RedisBackend()

        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        n = 10

        for i in range(n):
            with backend.cluster.map() as client:
                client.zadd(waiting_set_key, i, 'timelines:{0}'.format(i))

        for i in range(n, n * 2):
            with backend.cluster.map() as client:
                client.zadd(ready_set_key, i, 'timelines:{0}'.format(i))

        get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key)

        with self.assertChanges(get_waiting_set_size, before=n, after=0), \
                self.assertChanges(get_ready_set_size, before=n, after=n * 2):
            results = list(zip(range(n), list(backend.schedule(n, chunk=5))))
            assert len(results) is n

            # Ensure scheduled entries are returned earliest first.
            for i, entry in results:
                assert entry.key == 'timelines:{0}'.format(i)
                assert entry.timestamp == float(i)
Example #4
0
    def test_delete(self):
        timeline = 'timeline'
        backend = RedisBackend()

        timeline_key = make_timeline_key(backend.namespace, timeline)
        digest_key = make_digest_key(timeline_key)
        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        connection = backend.cluster.get_local_client_for_key(timeline_key)
        connection.zadd(waiting_set_key, 0, timeline)
        connection.zadd(ready_set_key, 0, timeline)
        connection.zadd(timeline_key, 0, '1')
        connection.set(make_record_key(timeline_key, '1'), 'data')
        connection.zadd(digest_key, 0, '2')
        connection.set(make_record_key(timeline_key, '2'), 'data')

        keys = (
            waiting_set_key,
            ready_set_key,
            digest_key,
            timeline_key,
            make_record_key(timeline_key, '1'),
            make_record_key(timeline_key, '2')
        )

        def check_keys_exist():
            return map(connection.exists, keys)

        with self.assertChanges(check_keys_exist, before=[True] * len(keys), after=[False] * len(keys)):
            backend.delete(timeline)
Example #5
0
    def test_add_record(self):
        timeline = 'timeline'
        backend = RedisBackend()

        timeline_key = make_timeline_key(backend.namespace, timeline)
        connection = backend.cluster.get_local_client_for_key(timeline_key)

        record = next(self.records)
        ready_set_key = make_schedule_key(backend.namespace,
                                          SCHEDULE_STATE_READY)
        record_key = make_record_key(timeline_key, record.key)

        get_timeline_score_in_ready_set = functools.partial(
            connection.zscore, ready_set_key, timeline)
        get_record_score_in_timeline_set = functools.partial(
            connection.zscore, timeline_key, record.key)

        def get_record_value():
            value = connection.get(record_key)
            return backend.codec.decode(value) if value is not None else None

        with self.assertChanges(get_timeline_score_in_ready_set, before=None, after=record.timestamp), \
                self.assertChanges(get_record_score_in_timeline_set, before=None, after=record.timestamp), \
                self.assertChanges(get_record_value, before=None, after=record.value):
            backend.add(timeline, record)
Example #6
0
    def test_truncation(self):
        backend = RedisBackend(capacity=2, truncation_chance=1.0)

        records = [Record(f"record:{i}", "value", time.time()) for i in range(4)]
        for record in records:
            backend.add("timeline", record)

        with backend.digest("timeline", 0) as records:
            assert set(records) == set(records[-2:])
Example #7
0
    def test_truncation(self):
        backend = RedisBackend(capacity=2, truncation_chance=1.0)

        records = [Record('record:{}'.format(i), 'value', time.time()) for i in xrange(4)]
        for record in records:
            backend.add('timeline', record)

        with backend.digest('timeline', 0) as records:
            assert set(records) == set(records[-2:])
Example #8
0
    def test_large_digest(self):
        backend = RedisBackend()

        n = 8192
        t = time.time()
        for i in range(n):
            backend.add("timeline", Record(f"record:{i}", f"{i}", t))

        with backend.digest("timeline", 0) as records:
            assert len(set(records)) == n
Example #9
0
    def test_large_digest(self):
        backend = RedisBackend()

        n = 8192
        t = time.time()
        for i in xrange(n):
            backend.add('timeline', Record('record:{}'.format(i), '{}'.format(i), t))

        with backend.digest('timeline', 0) as records:
            assert len(set(records)) == n
    def test_basic(self):
        backend = RedisBackend()

        # The first item should return "true", indicating that this timeline
        # can be immediately dispatched to be digested.
        record_1 = Record('record:1', 'value', time.time())
        assert backend.add('timeline', record_1) is True

        # The second item should return "false", since it's ready to be
        # digested but dispatching again would cause it to be sent twice.
        record_2 = Record('record:2', 'value', time.time())
        assert backend.add('timeline', record_2) is False

        # There's nothing to move between sets, so scheduling should return nothing.
        assert set(backend.schedule(time.time())) == set()

        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])

        # The schedule should now contain the timeline.
        assert set(entry.key
                   for entry in backend.schedule(time.time())) == set(
                       ['timeline'])

        # We didn't add any new records so there's nothing to do here.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([])

        # There's nothing to move between sets since the timeline contents no
        # longer exist at this point.
        assert set(backend.schedule(time.time())) == set()
    def test_large_digest(self):
        backend = RedisBackend()

        n = 8192
        t = time.time()
        for i in xrange(n):
            backend.add('timeline',
                        Record('record:{}'.format(i), '{}'.format(i), t))

        with backend.digest('timeline', 0) as records:
            assert len(set(records)) == n
Example #12
0
    def test_digesting_failure_recovery(self):
        backend = RedisBackend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace,
                                            SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace,
                                          SCHEDULE_STATE_READY)

        get_waiting_set_size = functools.partial(get_set_size, backend.cluster,
                                                 waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster,
                                               ready_set_key)
        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_digest_size = functools.partial(client.zcard,
                                            make_digest_key(timeline_key))

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_digest_size, before=0, after=n), \
                self.assertDoesNotChange(get_waiting_set_size), \
                self.assertDoesNotChange(get_ready_set_size):
            try:
                with backend.digest(timeline) as entries:
                    raise ExpectedError
            except ExpectedError:
                pass

        # Add another few records to the timeline to ensure they end up in the digest.
        extra = list(itertools.islice(self.records, 5))
        for record in extra:
            backend.add(timeline, record)

        with self.assertChanges(get_timeline_size, before=len(extra), after=0), \
                self.assertChanges(get_digest_size, before=len(records), after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0):
            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == (records + extra)[::-1]

            assert client.zscore(waiting_set_key,
                                 timeline) == timestamp + backend.minimum_delay
    def test_truncation(self):
        backend = RedisBackend(capacity=2, truncation_chance=1.0)

        records = [
            Record('record:{}'.format(i), 'value', time.time())
            for i in xrange(4)
        ]
        for record in records:
            backend.add('timeline', record)

        with backend.digest('timeline', 0) as records:
            assert set(records) == set(records[-2:])
Example #14
0
    def test_basic(self):
        backend = RedisBackend()

        # The first item should return "true", indicating that this timeline
        # can be immediately dispatched to be digested.
        record_1 = Record('record:1', 'value', time.time())
        assert backend.add('timeline', record_1) is True

        # The second item should return "false", since it's ready to be
        # digested but dispatching again would cause it to be sent twice.
        record_2 = Record('record:2', 'value', time.time())
        assert backend.add('timeline', record_2) is False

        # There's nothing to move between sets, so scheduling should return nothing.
        assert set(backend.schedule(time.time())) == set()

        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])

        # The schedule should now contain the timeline.
        assert set(entry.key for entry in backend.schedule(time.time())) == set(['timeline'])

        # We didn't add any new records so there's nothing to do here.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([])

        # There's nothing to move between sets since the timeline contents no
        # longer exist at this point.
        assert set(backend.schedule(time.time())) == set()
Example #15
0
    def test_delete(self):
        backend = RedisBackend()
        backend.add("timeline", Record("record:1", "value", time.time()))
        backend.delete("timeline")

        with pytest.raises(InvalidState):
            with backend.digest("timeline", 0) as records:
                assert set(records) == set()

        assert set(backend.schedule(time.time())) == set()
        assert len(backend._get_connection("timeline").keys("d:*")) == 0
    def test_delete(self):
        backend = RedisBackend()
        backend.add('timeline', Record('record:1', 'value', time.time()))
        backend.delete('timeline')

        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                assert set(records) == set([])

        assert set(backend.schedule(time.time())) == set()
        assert len(backend._get_connection('timeline').keys('d:*')) == 0
Example #17
0
    def test_digesting(self):
        backend = RedisBackend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace,
                                            SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace,
                                          SCHEDULE_STATE_READY)

        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_waiting_set_size = functools.partial(get_set_size, backend.cluster,
                                                 waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster,
                                               ready_set_key)

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0):

            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == records[::-1]

            next_scheduled_delivery = timestamp + backend.minimum_delay
            assert client.zscore(waiting_set_key,
                                 timeline) == next_scheduled_delivery
            assert int(
                client.get(make_last_processed_timestamp_key(
                    timeline_key))) == int(timestamp)

        # Move the timeline back to the ready set.
        for entry in backend.schedule(next_scheduled_delivery):
            pass

        # The digest should be removed from the schedule if it is empty.
        with self.assertDoesNotChange(get_waiting_set_size), \
                self.assertChanges(get_ready_set_size, before=1, after=0):
            with backend.digest(timeline) as entries:
                assert list(entries) == []

        assert client.get(
            make_last_processed_timestamp_key(timeline_key)) is None
    def test_missing_record_contents(self):
        backend = RedisBackend()

        record_1 = Record('record:1', 'value', time.time())
        backend.add('timeline', record_1)
        backend._get_connection('timeline').delete('d:t:timeline:r:record:1')

        record_2 = Record('record:2', 'value', time.time())
        backend.add('timeline', record_2)

        # The existing and new record should be there because the timeline
        # contents were merged back into the digest.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_2])
Example #19
0
    def test_missing_record_contents(self):
        backend = RedisBackend()

        record_1 = Record("record:1", "value", time.time())
        backend.add("timeline", record_1)
        backend._get_connection("timeline").delete("d:t:timeline:r:record:1")

        record_2 = Record("record:2", "value", time.time())
        backend.add("timeline", record_2)

        # The existing and new record should be there because the timeline
        # contents were merged back into the digest.
        with backend.digest("timeline", 0) as records:
            assert set(records) == {record_2}
Example #20
0
    def test_delete(self):
        backend = RedisBackend()
        backend.add('timeline', Record('record:1', 'value', time.time()))
        backend.delete('timeline')

        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                assert set(records) == set([])

        assert set(backend.schedule(time.time())) == set()
        assert len(backend._get_connection('timeline').keys('d:*')) == 0
Example #21
0
    def test_digesting_failure_recovery(self):
        backend = RedisBackend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key)
        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_digest_size = functools.partial(client.zcard, make_digest_key(timeline_key))

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_digest_size, before=0, after=n), \
                self.assertDoesNotChange(get_waiting_set_size), \
                self.assertDoesNotChange(get_ready_set_size):
            try:
                with backend.digest(timeline) as entries:
                    raise ExpectedError
            except ExpectedError:
                pass

        # Add another few records to the timeline to ensure they end up in the digest.
        extra = list(itertools.islice(self.records, 5))
        for record in extra:
            backend.add(timeline, record)

        with self.assertChanges(get_timeline_size, before=len(extra), after=0), \
                self.assertChanges(get_digest_size, before=len(records), after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0):
            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == (records + extra)[::-1]

            assert client.zscore(waiting_set_key, timeline) == timestamp + backend.minimum_delay
Example #22
0
    def run_test(self, digests, event_count: int):
        backend = RedisBackend()
        digests.digest = backend.digest

        for i in range(event_count):
            self.add_event(f"group-{i}", backend)

        with self.tasks():
            deliver_digest(self.key)

        assert len(mail.outbox) == USER_COUNT
Example #23
0
    def test_maintenance(self):
        timeline = 'timeline'
        backend = RedisBackend(ttl=3600)

        timeline_key = make_timeline_key(backend.namespace, timeline)
        digest_key = make_digest_key(timeline_key)
        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        now = time.time()

        connection = backend.cluster.get_local_client_for_key(timeline_key)
        schedule_time = now - 60
        connection.zadd(ready_set_key, schedule_time, timeline)
        connection.zadd(timeline_key, 0, '1')
        connection.set(make_record_key(timeline_key, '1'), 'data')
        connection.zadd(digest_key, 0, '2')
        connection.set(make_record_key(timeline_key, '2'), 'data')

        # Move the digest from the ready set to the waiting set.
        backend.maintenance(now)
        assert connection.zcard(ready_set_key) == 0
        assert connection.zrange(waiting_set_key, 0, -1, withscores=True) == [(timeline, schedule_time)]

        connection.zrem(waiting_set_key, timeline)
        connection.zadd(ready_set_key, schedule_time, timeline)

        # Delete the digest from the ready set.
        with mock.patch('time.time', return_value=now + (backend.ttl + 1)):
            backend.maintenance(now)

        keys = (
            ready_set_key,
            waiting_set_key,
            timeline_key,
            digest_key,
            make_record_key(timeline_key, '1'),
            make_record_key(timeline_key, '2'),
        )
        for key in keys:
            assert connection.exists(key) is False
 def run_test(self, key, digests):
     """
     Simple integration test to make sure that digests are firing as expected.
     """
     backend = RedisBackend()
     rule = Rule.objects.create(project=self.project,
                                label="Test Rule",
                                data={})
     event = self.store_event(
         data={
             "timestamp": iso_format(before_now(days=1)),
             "fingerprint": ["group-1"]
         },
         project_id=self.project.id,
     )
     event_2 = self.store_event(
         data={
             "timestamp": iso_format(before_now(days=1)),
             "fingerprint": ["group-2"]
         },
         project_id=self.project.id,
     )
     key = f"mail:p:{self.project.id}"
     backend.add(key,
                 event_to_record(event, [rule]),
                 increment_delay=0,
                 maximum_delay=0)
     backend.add(key,
                 event_to_record(event_2, [rule]),
                 increment_delay=0,
                 maximum_delay=0)
     digests.digest = backend.digest
     with self.tasks():
         deliver_digest(key)
     assert "2 new alerts since" in mail.outbox[0].subject
Example #25
0
    def test_missing_record_contents(self):
        backend = RedisBackend()

        record_1 = Record('record:1', 'value', time.time())
        backend.add('timeline', record_1)
        backend._get_connection('timeline').delete('d:t:timeline:r:record:1')

        record_2 = Record('record:2', 'value', time.time())
        backend.add('timeline', record_2)

        # The existing and new record should be there because the timeline
        # contents were merged back into the digest.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_2])
Example #26
0
    def test_add_record(self):
        timeline = 'timeline'
        backend = RedisBackend()

        timeline_key = make_timeline_key(backend.namespace, timeline)
        connection = backend.cluster.get_local_client_for_key(timeline_key)

        record = next(self.records)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)
        record_key = make_record_key(timeline_key, record.key)

        get_timeline_score_in_ready_set = functools.partial(connection.zscore, ready_set_key, timeline)
        get_record_score_in_timeline_set = functools.partial(connection.zscore, timeline_key, record.key)

        def get_record_value():
            value = connection.get(record_key)
            return backend.codec.decode(value) if value is not None else None

        with self.assertChanges(get_timeline_score_in_ready_set, before=None, after=record.timestamp), \
                self.assertChanges(get_record_score_in_timeline_set, before=None, after=record.timestamp), \
                self.assertChanges(get_record_value, before=None, after=record.value):
            backend.add(timeline, record)
Example #27
0
    def test_digesting(self):
        backend = RedisBackend()

        # XXX: This assumes the that adding records and scheduling are working
        # correctly to set up the state needed for this test!

        timeline = 'timeline'
        n = 10
        records = list(itertools.islice(self.records, n))
        for record in records:
            backend.add(timeline, record)

        for entry in backend.schedule(time.time()):
            pass

        timeline_key = make_timeline_key(backend.namespace, timeline)
        client = backend.cluster.get_local_client_for_key(timeline_key)

        waiting_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace, SCHEDULE_STATE_READY)

        get_timeline_size = functools.partial(client.zcard, timeline_key)
        get_waiting_set_size = functools.partial(get_set_size, backend.cluster, waiting_set_key)
        get_ready_set_size = functools.partial(get_set_size, backend.cluster, ready_set_key)

        with self.assertChanges(get_timeline_size, before=n, after=0), \
                self.assertChanges(get_waiting_set_size, before=0, after=1), \
                self.assertChanges(get_ready_set_size, before=1, after=0):

            timestamp = time.time()
            with mock.patch('time.time', return_value=timestamp), \
                    backend.digest(timeline) as entries:
                entries = list(entries)
                assert entries == records[::-1]

            next_scheduled_delivery = timestamp + backend.minimum_delay
            assert client.zscore(waiting_set_key, timeline) == next_scheduled_delivery
            assert int(client.get(make_last_processed_timestamp_key(timeline_key))) == int(timestamp)

        # Move the timeline back to the ready set.
        for entry in backend.schedule(next_scheduled_delivery):
            pass

        # The digest should be removed from the schedule if it is empty.
        with self.assertDoesNotChange(get_waiting_set_size), \
                self.assertChanges(get_ready_set_size, before=1, after=0):
            with backend.digest(timeline) as entries:
                assert list(entries) == []

        assert client.get(make_last_processed_timestamp_key(timeline_key)) is None
Example #28
0
    def test_maintenance(self):
        timeline = 'timeline'
        backend = RedisBackend(ttl=3600)

        timeline_key = make_timeline_key(backend.namespace, timeline)
        digest_key = make_digest_key(timeline_key)
        waiting_set_key = make_schedule_key(backend.namespace,
                                            SCHEDULE_STATE_WAITING)
        ready_set_key = make_schedule_key(backend.namespace,
                                          SCHEDULE_STATE_READY)

        now = time.time()

        connection = backend.cluster.get_local_client_for_key(timeline_key)
        schedule_time = now - 60
        connection.zadd(ready_set_key, schedule_time, timeline)
        connection.zadd(timeline_key, 0, '1')
        connection.set(make_record_key(timeline_key, '1'), 'data')
        connection.zadd(digest_key, 0, '2')
        connection.set(make_record_key(timeline_key, '2'), 'data')

        # Move the digest from the ready set to the waiting set.
        backend.maintenance(now)
        assert connection.zcard(ready_set_key) == 0
        assert connection.zrange(waiting_set_key, 0, -1, withscores=True) == [
            (timeline, schedule_time)
        ]

        connection.zrem(waiting_set_key, timeline)
        connection.zadd(ready_set_key, schedule_time, timeline)

        # Delete the digest from the ready set.
        with mock.patch('time.time', return_value=now + (backend.ttl + 1)):
            backend.maintenance(now)

        keys = (
            ready_set_key,
            waiting_set_key,
            timeline_key,
            digest_key,
            make_record_key(timeline_key, '1'),
            make_record_key(timeline_key, '2'),
        )
        for key in keys:
            assert connection.exists(key) is False
Example #29
0
    def test_truncation(self):
        timeline = 'timeline'
        capacity = 5
        backend = RedisBackend(capacity=capacity, truncation_chance=0.5)

        timeline_key = make_timeline_key(backend.namespace, timeline)
        connection = backend.cluster.get_local_client_for_key(timeline_key)

        get_timeline_size = functools.partial(connection.zcard, timeline_key)

        fill = 10

        with mock.patch('random.random', return_value=1.0):
            with self.assertChanges(get_timeline_size, before=0, after=fill):
                for _ in range(fill):
                    backend.add(timeline, next(self.records))

        with mock.patch('random.random', return_value=0.0):
            with self.assertChanges(get_timeline_size, before=fill, after=capacity):
                backend.add(timeline, next(self.records))
Example #30
0
    def test_issue_alert_team_issue_owners_user_settings_off_digests(
            self, digests, mock_func):
        """Test that issue alerts are sent to a team in Slack via an Issue Owners rule action
        even when the users' issue alert notification settings are off and digests are triggered."""

        backend = RedisBackend()
        digests.digest = backend.digest
        digests.enabled.return_value = True

        # turn off the user's issue alert notification settings
        # there was a bug where issue alerts to a team's Slack channel
        # were only firing if this was set to ALWAYS
        NotificationSetting.objects.update_settings(
            ExternalProviders.SLACK,
            NotificationSettingTypes.ISSUE_ALERTS,
            NotificationSettingOptionValues.NEVER,
            user=self.user,
        )
        # add a second user to the team so we can be sure it's only
        # sent once (to the team, and not to each individual user)
        user2 = self.create_user(is_superuser=False)
        self.create_member(teams=[self.team],
                           user=user2,
                           organization=self.organization)
        self.idp = IdentityProvider.objects.create(type="slack",
                                                   external_id="TXXXXXXX2",
                                                   config={})
        self.identity = Identity.objects.create(
            external_id="UXXXXXXX2",
            idp=self.idp,
            user=user2,
            status=IdentityStatus.VALID,
            scopes=[],
        )
        NotificationSetting.objects.update_settings(
            ExternalProviders.SLACK,
            NotificationSettingTypes.ISSUE_ALERTS,
            NotificationSettingOptionValues.NEVER,
            user=user2,
        )
        # update the team's notification settings
        ExternalActor.objects.create(
            actor=self.team.actor,
            organization=self.organization,
            integration=self.integration,
            provider=ExternalProviders.SLACK.value,
            external_name="goma",
            external_id="CXXXXXXX2",
        )
        NotificationSetting.objects.update_settings(
            ExternalProviders.SLACK,
            NotificationSettingTypes.ISSUE_ALERTS,
            NotificationSettingOptionValues.ALWAYS,
            team=self.team,
        )

        rule = GrammarRule(Matcher("path", "*"),
                           [Owner("team", self.team.slug)])
        ProjectOwnership.objects.create(project_id=self.project.id,
                                        schema=dump_schema([rule]),
                                        fallthrough=True)

        event = self.store_event(
            data={
                "message": "Hello world",
                "level": "error",
                "stacktrace": {
                    "frames": [{
                        "filename": "foo.py"
                    }]
                },
            },
            project_id=self.project.id,
        )

        action_data = {
            "id": "sentry.mail.actions.NotifyEmailAction",
            "targetType": "IssueOwners",
            "targetIdentifier": "",
        }
        rule = Rule.objects.create(
            project=self.project,
            label="ja rule",
            data={
                "match": "all",
                "actions": [action_data],
            },
        )

        key = f"mail:p:{self.project.id}"
        backend.add(key,
                    event_to_record(event, [rule]),
                    increment_delay=0,
                    maximum_delay=0)

        with self.tasks():
            deliver_digest(key)

        # check that only one was sent out - more would mean each user is being notified
        # rather than the team
        assert len(responses.calls) == 1

        # check that the team got a notification
        data = parse_qs(responses.calls[0].request.body)
        assert data["channel"] == ["CXXXXXXX2"]
        assert "attachments" in data
        attachments = json.loads(data["attachments"][0])
        assert len(attachments) == 1
        assert attachments[0]["title"] == "Hello world"
        assert (
            attachments[0]["footer"] ==
            f"{self.project.slug} | <http://testserver/settings/{self.organization.slug}/teams/{self.team.slug}/notifications/?referrer=alert-rule-slack-team|Notification Settings>"
        )
Example #31
0
    def test_maintenance_failure_recovery(self):
        backend = RedisBackend()

        record_1 = Record('record:1', 'value', time.time())
        backend.add('timeline', record_1)

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # ...and you can't send a digest in the waiting state.
        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                pass

        record_2 = Record('record:2', 'value', time.time())
        backend.add('timeline', record_2)

        # The schedule should now contain the timeline.
        assert set(entry.key for entry in backend.schedule(time.time())) == set(['timeline'])

        # The existing and new record should be there because the timeline
        # contents were merged back into the digest.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])
Example #32
0
 def get_backend(self, options={}):
     kwargs = self.DEFAULT_BACKEND_OPTIONS.copy()
     kwargs.update(options)
     return RedisBackend(**kwargs)
    def test_maintenance_failure_recovery(self):
        backend = RedisBackend()

        record_1 = Record('record:1', 'value', time.time())
        backend.add('timeline', record_1)

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # ...and you can't send a digest in the waiting state.
        with pytest.raises(InvalidState):
            with backend.digest('timeline', 0) as records:
                pass

        record_2 = Record('record:2', 'value', time.time())
        backend.add('timeline', record_2)

        # The schedule should now contain the timeline.
        assert set(entry.key
                   for entry in backend.schedule(time.time())) == set(
                       ['timeline'])

        # The existing and new record should be there because the timeline
        # contents were merged back into the digest.
        with backend.digest('timeline', 0) as records:
            assert set(records) == set([record_1, record_2])
    def test_maintenance_failure_recovery_with_capacity(self):
        backend = RedisBackend(capacity=10, truncation_chance=0.0)

        t = time.time()

        # Add 10 items to the timeline.
        for i in xrange(10):
            backend.add('timeline',
                        Record('record:{}'.format(i), '{}'.format(i), t + i))

        try:
            with backend.digest('timeline', 0) as records:
                raise Exception('This causes the digest to not be closed.')
        except Exception:
            pass

        # The 10 existing items should now be in the digest set (the exception
        # prevented the close operation from occurring, so they were never
        # deleted from Redis or removed from the digest set.) If we add 10 more
        # items, they should be added to the timeline set (not the digest set.)
        for i in xrange(10, 20):
            backend.add('timeline',
                        Record('record:{}'.format(i), '{}'.format(i), t + i))

        # Maintenance should move the timeline back to the waiting state, ...
        backend.maintenance(time.time())

        # The schedule should now contain the timeline.
        assert set(entry.key
                   for entry in backend.schedule(time.time())) == set(
                       ['timeline'])

        # Only the new records should exist -- the older one should have been
        # trimmed to avoid the digest growing beyond the timeline capacity.
        with backend.digest('timeline', 0) as records:
            expected_keys = set('record:{}'.format(i) for i in xrange(10, 20))
            assert set(record.key for record in records) == expected_keys