Пример #1
0
def save_event(cache_key=None, data=None, start_time=None, event_id=None, **kwargs):
    """
    Saves an event to the database.
    """
    from sentry.event_manager import EventManager

    if cache_key:
        data = default_cache.get(cache_key)

    if event_id is None and data is not None:
        event_id = data['event_id']

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'})
        return

    project = data.pop('project')

    delete_raw_event(project, event_id)

    Raven.tags_context({
        'project': project,
    })

    try:
        manager = EventManager(data)
        manager.save(project)
    finally:
        if cache_key:
            default_cache.delete(cache_key)
        if start_time:
            metrics.timing('events.time-to-process', time() - start_time,
                           instance=data['platform'])
Пример #2
0
    def test_group_release_with_env(self):
        manager = EventManager(self.make_event(
            release='1.0', environment='prod',
            event_id='a' * 32))
        event = manager.save(1)

        release = Release.objects.get(version='1.0', projects=event.project_id)

        assert GroupRelease.objects.filter(
            release_id=release.id,
            group_id=event.group_id,
            environment='prod',
        ).exists()

        manager = EventManager(self.make_event(
            release='1.0', environment='staging',
            event_id='b' * 32))
        event = manager.save(1)

        release = Release.objects.get(version='1.0', projects=event.project_id)

        assert GroupRelease.objects.filter(
            release_id=release.id,
            group_id=event.group_id,
            environment='staging',
        ).exists()
Пример #3
0
def test_long_culprit():
    manager = EventManager(make_event(
        culprit='x' * (MAX_CULPRIT_LENGTH + 1),
    ))
    manager.normalize()
    data = manager.get_data()
    assert len(data['culprit']) == MAX_CULPRIT_LENGTH
Пример #4
0
    def test_ephemral_interfaces_removed_on_save(self):
        manager = EventManager(self.make_event(platform='python'))
        event = manager.save(1)

        group = event.group
        assert group.platform == 'python'
        assert event.platform == 'python'
Пример #5
0
def test_long_transaction():
    manager = EventManager(make_event(
        transaction='x' * (MAX_CULPRIT_LENGTH + 1),
    ))
    manager.normalize()
    data = manager.get_data()
    assert len(data['transaction']) == MAX_CULPRIT_LENGTH
Пример #6
0
def test_moves_stacktrace_to_exception():
    manager = EventManager(
        make_event(
            exception={
                'type': 'MyException',
            },
            stacktrace={
                'frames': [
                    {
                        'lineno': 1,
                        'filename': 'foo.py',
                    }, {
                        'lineno': 1,
                        'filename': 'bar.py',
                    }
                ]
            }
        )
    )
    manager.normalize()
    data = manager.get_data()

    frames = data['exception']['values'][0]['stacktrace']['frames']
    assert frames[0]['lineno'] == 1
    assert frames[0]['filename'] == 'foo.py'
    assert frames[1]['lineno'] == 1
    assert frames[1]['filename'] == 'bar.py'
    assert 'stacktrace' not in data
    def test_does_not_unresolve_group(self, plugin_is_regression):
        # N.B. EventManager won't unresolve the group unless the event2 has a
        # later timestamp than event1. MySQL doesn't support microseconds.
        plugin_is_regression.return_value = False

        manager = EventManager(self.make_event(
            event_id='a' * 32, checksum='a' * 32,
            timestamp=1403007314,
        ))
        with self.tasks():
            event = manager.save(1)

        group = Group.objects.get(id=event.group_id)
        group.status = GroupStatus.RESOLVED
        group.save()
        assert group.is_resolved()

        manager = EventManager(self.make_event(
            event_id='b' * 32, checksum='a' * 32,
            timestamp=1403007315,
        ))
        event2 = manager.save(1)
        assert event.group_id == event2.group_id

        group = Group.objects.get(id=group.id)
        assert group.is_resolved()
Пример #8
0
 def test_long_message(self):
     manager = EventManager(self.make_event(
         message='x' * (settings.SENTRY_MAX_MESSAGE_LENGTH + 1),
     ))
     data = manager.normalize()
     assert len(data['sentry.interfaces.Message']['message']) == \
         settings.SENTRY_MAX_MESSAGE_LENGTH
Пример #9
0
    def test_saves_event_mapping_when_sampled(self, should_sample):
        should_sample.return_value = True
        event_id = 'a' * 32

        manager = EventManager(self.make_event(event_id=event_id))
        event = manager.save(1)

        # This is a brand new event, so it is actually saved.
        # In this case, we don't need an EventMapping, but we
        # do need the Event.
        assert not EventMapping.objects.filter(
            group_id=event.group_id,
            event_id=event_id,
        ).exists()

        assert Event.objects.filter(
            event_id=event_id,
        ).exists()

        event_id = 'b' * 32

        manager = EventManager(self.make_event(event_id=event_id))
        event = manager.save(1)

        # This second is a dupe, so should be sampled
        # For a sample, we want to store the EventMapping,
        # but don't need to store the Event
        assert EventMapping.objects.filter(
            group_id=event.group_id,
            event_id=event_id,
        ).exists()

        assert not Event.objects.filter(
            event_id=event_id,
        ).exists()
Пример #10
0
def process_event_from_kafka(message):
    project = Project.objects.get_from_cache(pk=message['project_id'])

    remote_addr = message['remote_addr']
    helper = ClientApiHelper(
        agent=message['agent'],
        project_id=project.id,
        ip_address=remote_addr,
    )
    helper.context.bind_project(project)

    auth = Auth(message['auth'], message['auth'].pop('is_public'))
    helper.context.bind_auth(auth)

    key = helper.project_key_from_auth(auth)
    data = message['data']
    version = data['version']

    event_manager = EventManager(
        data,
        project=project,
        key=key,
        auth=auth,
        client_ip=remote_addr,
        user_agent=helper.context.agent,
        version=version,
    )
    event_manager._normalized = True
    del data

    return process_event(event_manager, project, key,
                         remote_addr, helper, attachments=None)
Пример #11
0
    def send(self, **kwargs):
        # TODO(dcramer): this should respect rate limits/etc and use the normal
        # pipeline
        from sentry.app import tsdb
        from sentry.coreapi import insert_data_to_database
        from sentry.event_manager import EventManager
        from sentry.models import Project

        try:
            project = Project.objects.get_from_cache(id=settings.SENTRY_PROJECT)
        except Project.DoesNotExist:
            self.error_logger.error('Internal project (id=%s) does not exist',
                                    settings.SENTRY_PROJECT)
            return

        metrics.incr('events.total', 1)

        kwargs['project'] = project.id
        try:
            manager = EventManager(kwargs)
            data = manager.normalize()
            tsdb.incr_multi([
                (tsdb.models.project_total_received, project.id),
                (tsdb.models.organization_total_received, project.organization_id),
            ])
            insert_data_to_database(data)
        except Exception as e:
            if self.raise_send_errors:
                raise
            self.error_logger.error(
                'Unable to record event: %s\nEvent was: %r', e,
                kwargs['message'], exc_info=True)
Пример #12
0
    def test_platform_is_saved(self):
        manager = EventManager(self.make_event(platform='python'))
        event = manager.save(1)

        group = event.group
        assert group.platform == 'python'
        assert event.platform == 'python'
Пример #13
0
    def test_unresolves_group(self):
        ts = time() - 300

        # N.B. EventManager won't unresolve the group unless the event2 has a
        # later timestamp than event1.
        manager = EventManager(
            make_event(
                event_id='a' * 32,
                checksum='a' * 32,
                timestamp=ts,
            )
        )
        with self.tasks():
            event = manager.save(1)

        group = Group.objects.get(id=event.group_id)
        group.status = GroupStatus.RESOLVED
        group.save()
        assert group.is_resolved()

        manager = EventManager(
            make_event(
                event_id='b' * 32,
                checksum='a' * 32,
                timestamp=ts + 50,
            )
        )
        event2 = manager.save(1)
        assert event.group_id == event2.group_id

        group = Group.objects.get(id=group.id)
        assert not group.is_resolved()
Пример #14
0
    def test_updates_group_with_fingerprint(self):
        ts = time() - 200
        manager = EventManager(
            make_event(
                message='foo',
                event_id='a' * 32,
                fingerprint=['a' * 32],
                timestamp=ts,
            )
        )
        with self.tasks():
            event = manager.save(1)

        manager = EventManager(
            make_event(
                message='foo bar',
                event_id='b' * 32,
                fingerprint=['a' * 32],
                timestamp=ts,
            )
        )
        with self.tasks():
            event2 = manager.save(1)

        group = Group.objects.get(id=event.group_id)

        assert group.times_seen == 2
        assert group.last_seen == event.datetime
        assert group.message == event2.message
Пример #15
0
 def test_invalid_environment(self):
     manager = EventManager(make_event(**{
         'environment': 'bad/name',
     }))
     manager.normalize()
     event = manager.save(self.project.id)
     assert dict(event.tags).get('environment') is None
Пример #16
0
    def test_unresolves_group_with_auto_resolve(self, mock_is_resolved):
        ts = time() - 100
        mock_is_resolved.return_value = False
        manager = EventManager(
            make_event(
                event_id='a' * 32,
                checksum='a' * 32,
                timestamp=ts,
            )
        )
        with self.tasks():
            event = manager.save(1)

        mock_is_resolved.return_value = True
        manager = EventManager(
            make_event(
                event_id='b' * 32,
                checksum='a' * 32,
                timestamp=ts + 100,
            )
        )
        with self.tasks():
            event2 = manager.save(1)
        assert event.group_id == event2.group_id

        group = Group.objects.get(id=event.group.id)
        assert group.active_at.replace(second=0) == event2.datetime.replace(second=0)
        assert group.active_at.replace(second=0) != event.datetime.replace(second=0)
Пример #17
0
    def test_updates_group(self):
        manager = EventManager(
            self.make_event(
                message='foo',
                event_id='a' * 32,
                checksum='a' * 32,
            )
        )
        event = manager.save(1)

        manager = EventManager(
            self.make_event(
                message='foo bar',
                event_id='b' * 32,
                checksum='a' * 32,
            )
        )
        with self.tasks():
            event2 = manager.save(1)

        group = Group.objects.get(id=event.group_id)

        assert group.times_seen == 2
        assert group.last_seen.replace(microsecond=0) == event.datetime.replace(microsecond=0)
        assert group.message == event2.message
        assert group.data.get('type') == 'default'
        assert group.data.get('metadata') == {
            'title': 'foo bar',
        }
Пример #18
0
    def test_updates_group_with_fingerprint(self):
        manager = EventManager(
            self.make_event(
                message='foo',
                event_id='a' * 32,
                fingerprint=['a' * 32],
            )
        )
        with self.tasks():
            event = manager.save(1)

        manager = EventManager(
            self.make_event(
                message='foo bar',
                event_id='b' * 32,
                fingerprint=['a' * 32],
            )
        )
        with self.tasks():
            event2 = manager.save(1)

        group = Group.objects.get(id=event.group_id)

        assert group.times_seen == 2
        assert group.last_seen.replace(microsecond=0) == event.datetime.replace(microsecond=0)
        assert group.message == event2.message
Пример #19
0
    def test_unresolves_group_with_auto_resolve(self, mock_is_resolved):
        mock_is_resolved.return_value = False
        manager = EventManager(
            self.make_event(
                event_id='a' * 32,
                checksum='a' * 32,
                timestamp=1403007314,
            )
        )
        with self.tasks():
            event = manager.save(1)

        mock_is_resolved.return_value = True
        manager = EventManager(
            self.make_event(
                event_id='b' * 32,
                checksum='a' * 32,
                timestamp=1403007414,
            )
        )
        with self.tasks():
            event2 = manager.save(1)
        assert event.group_id == event2.group_id

        group = Group.objects.get(id=event.group.id)
        assert group.active_at == event2.datetime != event.datetime
Пример #20
0
    def test_user_report_gets_environment(self):
        project = self.create_project()
        environment = Environment.objects.create(
            project_id=project.id,
            organization_id=project.organization_id,
            name='production',
        )
        environment.add_project(project)
        event_id = 'a' * 32

        group = self.create_group(project=project)
        UserReport.objects.create(
            group=group,
            project=project,
            event_id=event_id,
            name='foo',
            email='*****@*****.**',
            comments='It Broke!!!',
        )
        manager = EventManager(
            self.make_event(
                environment=environment.name,
                event_id=event_id,
                group=group))
        manager.normalize()
        manager.save(project.id)
        assert UserReport.objects.get(event_id=event_id).environment == environment
Пример #21
0
    def test(self, mock_delay_index_event_tags, mock_eventstream_insert):
        now = datetime.utcnow()

        def _get_event_count():
            return snuba.query(
                start=now - timedelta(days=1),
                end=now + timedelta(days=1),
                groupby=['project_id'],
                filter_keys={'project_id': [self.project.id]},
            ).get(self.project.id, 0)

        assert _get_event_count() == 0

        raw_event = {
            'event_id': 'a' * 32,
            'message': 'foo',
            'timestamp': time.mktime(now.timetuple()),
            'level': logging.ERROR,
            'logger': 'default',
            'tags': [],
        }

        manager = EventManager(raw_event)
        manager.normalize()
        event = manager.save(self.project.id)

        # verify eventstream was called by EventManager
        insert_args, insert_kwargs = list(mock_eventstream_insert.call_args)
        assert not insert_args
        assert insert_kwargs == {
            'event': event,
            'group': event.group,
            'is_new_group_environment': True,
            'is_new': True,
            'is_regression': False,
            'is_sample': False,
            'primary_hash': 'acbd18db4cc2f85cedef654fccc4a4d8',
            'skip_consume': False
        }

        assert mock_delay_index_event_tags.call_count == 1

        # pass arguments on to Kafka EventManager
        self.kafka_eventstream.insert(*insert_args, **insert_kwargs)

        produce_args, produce_kwargs = list(self.kafka_eventstream.producer.produce.call_args)
        assert not produce_args
        assert produce_kwargs['topic'] == 'events'
        assert produce_kwargs['key'] == six.text_type(self.project.id)

        version, type_, payload1, payload2 = json.loads(produce_kwargs['value'])
        assert version == 2
        assert type_ == 'insert'

        # insert what would have been the Kafka payload directly
        # into Snuba, expect an HTTP 200 and for the event to now exist
        snuba_eventstream = SnubaEventStream()
        snuba_eventstream._send(self.project.id, 'insert', (payload1, payload2))
        assert _get_event_count() == 1
Пример #22
0
 def test_does_default_ip_address_to_user(self):
     manager = EventManager(
         self.make_event(
             **{"sentry.interfaces.Http": {"url": "http://example.com", "env": {"REMOTE_ADDR": "127.0.0.1"}}}
         )
     )
     data = manager.normalize()
     assert data["sentry.interfaces.User"]["ip_address"] == "127.0.0.1"
Пример #23
0
 def test_culprit_is_not_transaction(self):
     manager = EventManager(make_event(
         culprit='foobar',
     ))
     manager.normalize()
     event1 = manager.save(1)
     assert event1.transaction is None
     assert event1.culprit == 'foobar'
Пример #24
0
def test_removes_some_empty_containers(key, value):
    event = make_event()
    event[key] = value

    manager = EventManager(event)
    manager.normalize()
    data = manager.get_data()
    assert key not in data
Пример #25
0
    def test_broken_regression_signal(self, send):
        send.side_effect = Exception()

        manager = EventManager(self.make_event())
        event = manager.save(1)

        assert event.message == 'foo'
        assert event.project_id == 1
    def test_environment(self):
        manager = EventManager(self.make_event(**{
            'environment': 'beta',
        }))
        manager.normalize()
        event = manager.save(self.project.id)

        assert dict(event.tags).get('environment') == 'beta'
Пример #27
0
 def save_event():
     manager = EventManager(self.make_event(**{
         'event_id': uuid.uuid1().hex,  # don't deduplicate
         'environment': 'beta',
         'release': release_version,
     }))
     manager.normalize()
     return manager.save(self.project.id)
Пример #28
0
 def test_transaction_as_culprit(self):
     manager = EventManager(make_event(
         transaction='foobar',
     ))
     manager.normalize()
     event = manager.save(1)
     assert event.transaction == 'foobar'
     assert event.culprit == 'foobar'
Пример #29
0
 def test_invalid_transaction(self):
     dict_input = {'messages': 'foo'}
     manager = EventManager(self.make_event(
         transaction=dict_input,
     ))
     manager.normalize()
     event = manager.save(1)
     assert event.transaction is None
Пример #30
0
 def inner(data):
     mgr = EventManager(data={"sdk": data})
     mgr.normalize()
     evt = Event(data=mgr.get_data())
     insta_snapshot({
         'errors': evt.data.get('errors'),
         'to_json': evt.interfaces.get('sdk').to_json()
     })
Пример #31
0
    def test_marks_as_unresolved_only_with_new_release(self, plugin_is_regression):
        plugin_is_regression.return_value = True

        old_release = Release.objects.create(
            version='a',
            project=self.project,
            date_added=timezone.now() - timedelta(minutes=30),
        )

        manager = EventManager(self.make_event(
            event_id='a' * 32,
            checksum='a' * 32,
            timestamp=time() - 50000,  # need to work around active_at
            release=old_release.version,
        ))
        event = manager.save(1)

        group = event.group

        group.update(status=GroupStatus.RESOLVED)

        resolution = GroupResolution.objects.create(
            release=old_release,
            group=group,
        )
        activity = Activity.objects.create(
            group=group,
            project=group.project,
            type=Activity.SET_RESOLVED_IN_RELEASE,
            ident=resolution.id,
            data={'version': ''},
        )

        manager = EventManager(self.make_event(
            event_id='b' * 32,
            checksum='a' * 32,
            timestamp=time(),
            release=old_release.version,
        ))
        event = manager.save(1)
        assert event.group_id == group.id

        group = Group.objects.get(id=group.id)
        assert group.status == GroupStatus.RESOLVED

        activity = Activity.objects.get(id=activity.id)
        assert activity.data['version'] == ''

        assert GroupResolution.objects.filter(group=group).exists()

        manager = EventManager(self.make_event(
            event_id='c' * 32,
            checksum='a' * 32,
            timestamp=time(),
            release='b',
        ))
        event = manager.save(1)
        assert event.group_id == group.id

        group = Group.objects.get(id=group.id)
        assert group.status == GroupStatus.UNRESOLVED

        activity = Activity.objects.get(id=activity.id)
        assert activity.data['version'] == 'b'

        assert not GroupResolution.objects.filter(group=group).exists()

        assert Activity.objects.filter(
            group=group,
            type=Activity.SET_REGRESSION,
        ).exists()
Пример #32
0
 def test_long_message(self):
     manager = EventManager(self.make_event(
         message='x' * (settings.SENTRY_MAX_MESSAGE_LENGTH + 1),
     ))
     data = manager.normalize()
     assert len(data['message']) == settings.SENTRY_MAX_MESSAGE_LENGTH
Пример #33
0
    def test_event_user(self):
        manager = EventManager(make_event(
            event_id='a',
            environment='totally unique environment',
            **{'sentry.interfaces.User': {
                'id': '1',
            }}
        ))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        environment_id = Environment.get_for_organization_id(
            event.project.organization_id,
            'totally unique environment',
        ).id

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id, ),
            event.datetime,
            event.datetime,
        ) == {
            event.group.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
        ) == {
            event.project.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.group.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.project.id: 1,
        }

        euser = EventUser.objects.get(
            project_id=self.project.id,
            ident='1',
        )
        assert event.get_tag('sentry:user') == euser.tag_value

        # ensure event user is mapped to tags in second attempt
        manager = EventManager(
            make_event(
                event_id='b',
                **{'sentry.interfaces.User': {
                    'id': '1',
                    'name': 'jane',
                }}
            )
        )
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        euser = EventUser.objects.get(id=euser.id)
        assert event.get_tag('sentry:user') == euser.tag_value
        assert euser.name == 'jane'
        assert euser.ident == '1'
Пример #34
0
 def validate_and_normalize(self, data, request_env=None):
     data = self.helper.validate_data(data)
     return EventManager(data).normalize(request_env=request_env)
Пример #35
0
 def test_inferred_culprit_from_empty_stacktrace(self):
     manager = EventManager(make_event(stacktrace={"frames": []}))
     manager.normalize()
     event = manager.save(1)
     assert event.culprit == ''
Пример #36
0
    def send(self, **kwargs):
        # TODO(dcramer): this should respect rate limits/etc and use the normal
        # pipeline

        # Report the issue to an upstream Sentry if active
        # NOTE: we don't want to check self.is_enabled() like normal, since
        # is_enabled behavior is overridden in this class. We explicitly
        # want to check if the remote is active.
        if self.remote.is_active():
            from sentry import options
            # Append some extra tags that are useful for remote reporting
            super_kwargs = copy.deepcopy(kwargs)
            super_kwargs['tags']['install-id'] = options.get('sentry:install-id')
            super(SentryInternalClient, self).send(**super_kwargs)

        if not is_current_event_safe():
            return

        from sentry.app import tsdb
        from sentry.coreapi import ClientApiHelper
        from sentry.event_manager import EventManager
        from sentry.models import Project

        helper = ClientApiHelper(
            agent='raven-python/%s (sentry %s)' % (raven.VERSION, sentry.VERSION),
            project_id=settings.SENTRY_PROJECT,
            version=self.protocol_version,
        )

        try:
            project = Project.objects.get_from_cache(id=settings.SENTRY_PROJECT)
        except DatabaseError:
            self.error_logger.error('Unable to fetch internal project',
                                    exc_info=True)
            return
        except Project.DoesNotExist:
            self.error_logger.error('Internal project (id=%s) does not exist',
                                    settings.SENTRY_PROJECT)
            return
        except Exception:
            self.error_logger.error(
                'Unable to fetch internal project for some unknown reason',
                exc_info=True)
            return

        helper.context.bind_project(project)

        metrics.incr('events.total')

        kwargs['project'] = project.id
        try:
            manager = EventManager(kwargs)
            data = manager.normalize()
            tsdb.incr_multi([
                (tsdb.models.project_total_received, project.id),
                (tsdb.models.organization_total_received, project.organization_id),
            ])
            helper.insert_data_to_database(data)
        except Exception as e:
            if self.raise_send_errors:
                raise
            self.error_logger.error(
                'Unable to record event: %s\nEvent was: %r', e,
                kwargs['message'], exc_info=True)
Пример #37
0
    def from_kwargs(self, project, **kwargs):
        from sentry.event_manager import EventManager

        manager = EventManager(kwargs)
        manager.normalize()
        return manager.save(project)
Пример #38
0
def digest(request):
    random = get_random(request)

    # TODO: Refactor all of these into something more manageable.
    org = Organization(id=1, slug="example", name="Example Organization")

    project = Project(id=1,
                      slug="example",
                      name="Example Project",
                      organization=org)

    rules = {
        i: Rule(id=i, project=project, label="Rule #%s" % (i, ))
        for i in range(1, random.randint(2, 4))
    }

    state = {
        "project": project,
        "groups": {},
        "rules": rules,
        "event_counts": {},
        "user_counts": {},
    }

    records = []

    group_generator = make_group_generator(random, project)

    for i in range(random.randint(1, 30)):
        group = next(group_generator)
        state["groups"][group.id] = group

        offset = timedelta(seconds=0)
        for i in range(random.randint(1, 10)):
            offset += timedelta(seconds=random.random() * 120)

            data = dict(load_data("python"))
            data["message"] = group.message
            data.pop("logentry", None)

            event_manager = EventManager(data)
            event_manager.normalize()
            data = event_manager.get_data()

            data["timestamp"] = random.randint(to_timestamp(group.first_seen),
                                               to_timestamp(group.last_seen))

            event = eventstore.create_event(event_id=uuid.uuid4().hex,
                                            group_id=group.id,
                                            project_id=project.id,
                                            data=data.data)

            records.append(
                Record(
                    event.event_id,
                    Notification(
                        event,
                        random.sample(list(state["rules"].keys()),
                                      random.randint(1, len(state["rules"]))),
                    ),
                    to_timestamp(event.datetime),
                ))

            state["event_counts"][group.id] = random.randint(10, 1e4)
            state["user_counts"][group.id] = random.randint(10, 1e4)

    digest = build_digest(project, records, state)
    start, end, counts = get_digest_metadata(digest)

    context = {
        "project": project,
        "counts": counts,
        "digest": digest,
        "start": start,
        "end": end,
        "referrer": "digest_email",
    }
    add_unsubscribe_link(context)

    return MailPreview(
        html_template="sentry/emails/digests/body.html",
        text_template="sentry/emails/digests/body.txt",
        context=context,
    ).render(request)
Пример #39
0
    def test_interface_is_relabeled(self):
        manager = EventManager(self.make_event(user={'id': '1'}))
        data = manager.normalize()

        assert data['sentry.interfaces.User'] == {'id': '1'}
        assert 'user' not in data
Пример #40
0
 def test_long_culprit(self):
     manager = EventManager(
         self.make_event(culprit='x' * (MAX_CULPRIT_LENGTH + 1), ))
     data = manager.normalize()
     assert len(data['culprit']) == MAX_CULPRIT_LENGTH
Пример #41
0
 def test_long_transaction(self):
     manager = EventManager(
         self.make_event(transaction='x' * (MAX_CULPRIT_LENGTH + 1), ))
     data = manager.normalize()
     assert len(data['transaction']) == MAX_CULPRIT_LENGTH
Пример #42
0
    def test_marks_as_unresolved_with_new_release_with_integration(
        self,
        plugin_is_regression,
        mock_send_activity_notifications_delay,
        mock_sync_status_outbound,
    ):
        plugin_is_regression.return_value = True

        old_release = Release.objects.create(
            version="a",
            organization_id=self.project.organization_id,
            date_added=timezone.now() - timedelta(minutes=30),
        )
        old_release.add_project(self.project)

        manager = EventManager(
            make_event(
                event_id="a" * 32,
                checksum="a" * 32,
                timestamp=time() - 50000,  # need to work around active_at
                release=old_release.version,
            ))
        event = manager.save(1)

        group = event.group

        org = group.organization

        integration = Integration.objects.create(provider="example",
                                                 name="Example")
        integration.add_organization(org, self.user)
        OrganizationIntegration.objects.filter(
            integration_id=integration.id,
            organization_id=group.organization.id).update(
                config={
                    "sync_comments": True,
                    "sync_status_outbound": True,
                    "sync_status_inbound": True,
                    "sync_assignee_outbound": True,
                    "sync_assignee_inbound": True,
                })

        external_issue = ExternalIssue.objects.get_or_create(
            organization_id=org.id,
            integration_id=integration.id,
            key="APP-%s" % group.id)[0]

        GroupLink.objects.get_or_create(
            group_id=group.id,
            project_id=group.project_id,
            linked_type=GroupLink.LinkedType.issue,
            linked_id=external_issue.id,
            relationship=GroupLink.Relationship.references,
        )[0]

        group.update(status=GroupStatus.RESOLVED)

        resolution = GroupResolution.objects.create(release=old_release,
                                                    group=group)
        activity = Activity.objects.create(
            group=group,
            project=group.project,
            type=Activity.SET_RESOLVED_IN_RELEASE,
            ident=resolution.id,
            data={"version": ""},
        )

        manager = EventManager(
            make_event(event_id="b" * 32,
                       checksum="a" * 32,
                       timestamp=time(),
                       release=old_release.version))

        with self.tasks():
            with self.feature({"organizations:integrations-issue-sync": True}):
                event = manager.save(1)
                assert event.group_id == group.id

                group = Group.objects.get(id=group.id)
                assert group.status == GroupStatus.RESOLVED

                activity = Activity.objects.get(id=activity.id)
                assert activity.data["version"] == ""

                assert GroupResolution.objects.filter(group=group).exists()

                manager = EventManager(
                    make_event(event_id="c" * 32,
                               checksum="a" * 32,
                               timestamp=time(),
                               release="b"))
                event = manager.save(1)
                mock_sync_status_outbound.assert_called_once_with(
                    external_issue, False, event.group.project_id)
                assert event.group_id == group.id

                group = Group.objects.get(id=group.id)
                assert group.status == GroupStatus.UNRESOLVED

                activity = Activity.objects.get(id=activity.id)
                assert activity.data["version"] == "b"

                assert not GroupResolution.objects.filter(group=group).exists()

                activity = Activity.objects.get(group=group,
                                                type=Activity.SET_REGRESSION)

                mock_send_activity_notifications_delay.assert_called_once_with(
                    activity.id)
Пример #43
0
 def test_default_version(self):
     manager = EventManager(self.make_event())
     data = manager.normalize()
     assert data['version'] == '5'
Пример #44
0
def _do_save_event(
    cache_key=None, data=None, start_time=None, event_id=None, project_id=None, **kwargs
):
    """
    Saves an event to the database.
    """

    set_current_project(project_id)

    from sentry.event_manager import EventManager, HashDiscarded

    event_type = "none"

    if cache_key and data is None:
        with metrics.timer("tasks.store.do_save_event.get_cache") as metric_tags:
            data = event_processing_store.get(cache_key)
            if data is not None:
                metric_tags["event_type"] = event_type = data.get("type") or "none"

    with metrics.global_tags(event_type=event_type):
        if data is not None:
            data = CanonicalKeyDict(data)

        if event_id is None and data is not None:
            event_id = data["event_id"]

        # only when we come from reprocessing we get a project_id sent into
        # the task.
        if project_id is None:
            project_id = data.pop("project")
            set_current_project(project_id)

        # We only need to delete raw events for events that support
        # reprocessing.  If the data cannot be found we want to assume
        # that we need to delete the raw event.
        if not data or reprocessing.event_supports_reprocessing(data):
            with metrics.timer("tasks.store.do_save_event.delete_raw_event"):
                delete_raw_event(project_id, event_id, allow_hint_clear=True)

        # This covers two cases: where data is None because we did not manage
        # to fetch it from the default cache or the empty dictionary was
        # stored in the default cache.  The former happens if the event
        # expired while being on the queue, the second happens on reprocessing
        # if the raw event was deleted concurrently while we held on to
        # it.  This causes the node store to delete the data and we end up
        # fetching an empty dict.  We could in theory not invoke `save_event`
        # in those cases but it's important that we always clean up the
        # reprocessing reports correctly or they will screw up the UI.  So
        # to future proof this correctly we just handle this case here.
        if not data:
            metrics.incr(
                "events.failed", tags={"reason": "cache", "stage": "post"}, skip_internal=False
            )
            return

        try:
            with metrics.timer("tasks.store.do_save_event.event_manager.save"):
                manager = EventManager(data)
                # event.project.organization is populated after this statement.
                manager.save(
                    project_id, assume_normalized=True, start_time=start_time, cache_key=cache_key
                )

        except HashDiscarded:
            pass

        finally:
            if cache_key:
                with metrics.timer("tasks.store.do_save_event.delete_cache"):
                    event_processing_store.delete_by_key(cache_key)

                with metrics.timer("tasks.store.do_save_event.delete_attachment_cache"):
                    attachment_cache.delete(cache_key)

            if start_time:
                metrics.timing(
                    "events.time-to-process", time() - start_time, instance=data["platform"]
                )
Пример #45
0
    def test_tags_as_dict(self):
        manager = EventManager(self.make_event(tags={'foo': 'bar'}))
        data = manager.normalize()

        assert data['tags'] == [('foo', 'bar')]
Пример #46
0
    def test_default_fingerprint(self):
        manager = EventManager(make_event())
        manager.normalize()
        event = manager.save(self.project.id)

        assert event.data.get('fingerprint') == ['{{ default }}']
Пример #47
0
def alert(request):
    platform = request.GET.get("platform", "python")
    org = Organization(id=1, slug="example", name="Example")
    project = Project(id=1, slug="example", name="Example", organization=org)

    random = get_random(request)
    group = next(make_group_generator(random, project))

    data = dict(load_data(platform))
    data["message"] = group.message
    data["event_id"] = "44f1419e73884cd2b45c79918f4b6dc4"
    data.pop("logentry", None)
    data["environment"] = "prod"
    data["tags"] = [
        ("logger", "javascript"),
        ("environment", "prod"),
        ("level", "error"),
        ("device", "Other"),
    ]

    event_manager = EventManager(data)
    event_manager.normalize()
    data = event_manager.get_data()
    event = event_manager.save(project.id)
    # Prevent CI screenshot from constantly changing
    event.data["timestamp"] = 1504656000.0  # datetime(2017, 9, 6, 0, 0)
    event_type = get_event_type(event.data)

    group.message = event.search_message
    group.data = {
        "type": event_type.key,
        "metadata": event_type.get_metadata(data)
    }

    rule = Rule(label="An example rule")

    # XXX: this interface_list code needs to be the same as in
    #      src/sentry/mail/adapter.py
    interface_list = []
    for interface in six.itervalues(event.interfaces):
        body = interface.to_email_html(event)
        if not body:
            continue
        text_body = interface.to_string(event)
        interface_list.append(
            (interface.get_title(), mark_safe(body), text_body))

    return MailPreview(
        html_template="sentry/emails/error.html",
        text_template="sentry/emails/error.txt",
        context={
            "rule":
            rule,
            "group":
            group,
            "event":
            event,
            "link":
            "http://example.com/link",
            "interfaces":
            interface_list,
            "tags":
            event.tags,
            "project_label":
            project.slug,
            "commits": [{
                # TODO(dcramer): change to use serializer
                "repository": {
                    "status": "active",
                    "name": "Example Repo",
                    "url": "https://github.com/example/example",
                    "dateCreated": "2018-02-28T23:39:22.402Z",
                    "provider": {
                        "id": "github",
                        "name": "GitHub"
                    },
                    "id": "1",
                },
                "score": 2,
                "subject": "feat: Do something to raven/base.py",
                "message":
                "feat: Do something to raven/base.py\naptent vivamus vehicula tempus volutpat hac tortor",
                "id": "1b17483ffc4a10609e7921ee21a8567bfe0ed006",
                "shortId": "1b17483",
                "author": {
                    "username":
                    "******",
                    "isManaged":
                    False,
                    "lastActive":
                    "2018-03-01T18:25:28.149Z",
                    "id":
                    "1",
                    "isActive":
                    True,
                    "has2fa":
                    False,
                    "name":
                    "*****@*****.**",
                    "avatarUrl":
                    "https://secure.gravatar.com/avatar/51567a4f786cd8a2c41c513b592de9f9?s=32&d=mm",
                    "dateJoined":
                    "2018-02-27T22:04:32.847Z",
                    "emails": [{
                        "is_verified": False,
                        "id": "1",
                        "email": "*****@*****.**"
                    }],
                    "avatar": {
                        "avatarUuid": None,
                        "avatarType": "letter_avatar"
                    },
                    "lastLogin":
                    "******",
                    "email":
                    "*****@*****.**",
                },
            }],
        },
    ).render(request)
Пример #48
0
    def process(self, request, project, auth, data, **kwargs):
        event_received.send_robust(ip=request.META['REMOTE_ADDR'], sender=type(self))

        # TODO: improve this API (e.g. make RateLimit act on __ne__)
        rate_limit = safe_execute(app.quotas.is_rate_limited, project=project)
        if isinstance(rate_limit, bool):
            rate_limit = RateLimit(is_limited=rate_limit, retry_after=None)

        if rate_limit is not None and rate_limit.is_limited:
            raise APIRateLimited(rate_limit.retry_after)

        result = plugins.first('has_perm', request.user, 'create_event', project)
        if result is False:
            raise APIForbidden('Creation of this event was blocked')

        content_encoding = request.META.get('HTTP_CONTENT_ENCODING', '')

        if content_encoding == 'gzip':
            data = decompress_gzip(data)
        elif content_encoding == 'deflate':
            data = decompress_deflate(data)
        elif not data.startswith('{'):
            data = decode_and_decompress_data(data)
        data = safely_load_json_string(data)

        try:
            # mutates data
            validate_data(project, data, auth.client)
        except InvalidData as e:
            raise APIError(u'Invalid data: %s (%s)' % (six.text_type(e), type(e)))

        # mutates data
        manager = EventManager(data, version=auth.version)
        data = manager.normalize()

        # insert IP address if not available
        if auth.is_public:
            ensure_has_ip(data, request.META['REMOTE_ADDR'])

        event_id = data['event_id']

        # TODO(dcramer): ideally we'd only validate this if the event_id was
        # supplied by the user
        cache_key = 'ev:%s:%s' % (project.id, event_id,)

        if cache.get(cache_key) is not None:
            logger.warning('Discarded recent duplicate event from project %s/%s (id=%s)', project.team.slug, project.slug, event_id)
            raise InvalidRequest('An event with the same ID already exists.')

        # We filter data immediately before it ever gets into the queue
        inst = SensitiveDataFilter()
        inst.apply(data)

        # mutates data (strips a lot of context if not queued)
        insert_data_to_database(data)

        cache.set(cache_key, '', 60 * 5)

        logger.debug('New event from project %s/%s (id=%s)', project.team.slug, project.slug, event_id)

        return event_id
Пример #49
0
    def process(self,
                request,
                project,
                key,
                auth,
                helper,
                data,
                attachments=None,
                **kwargs):
        metrics.incr('events.total')

        if not data:
            raise APIError('No JSON data was found')

        remote_addr = request.META['REMOTE_ADDR']

        event_manager = EventManager(
            data,
            project=project,
            key=key,
            auth=auth,
            client_ip=remote_addr,
            user_agent=helper.context.agent,
            version=auth.version,
            content_encoding=request.META.get('HTTP_CONTENT_ENCODING', ''),
        )
        del data

        self.pre_normalize(event_manager, helper)
        event_manager.normalize()

        agent = request.META.get('HTTP_USER_AGENT')

        # TODO: Some form of coordination between the Kafka consumer
        # and this method (the 'relay') to decide whether a 429 should
        # be returned here.

        # Everything before this will eventually be done in the relay.
        if (kafka_publisher is not None and not attachments
                and random.random() < options.get('store.kafka-sample-rate')):

            process_in_kafka = options.get('store.process-in-kafka')

            try:
                kafka_publisher.publish(
                    channel=getattr(settings, 'KAFKA_EVENTS_PUBLISHER_TOPIC',
                                    'store-events'),
                    # Relay will (eventually) need to produce a Kafka message
                    # with this JSON format.
                    value=json.dumps({
                        'data': event_manager.get_data(),
                        'project_id': project.id,
                        'auth': {
                            'sentry_client': auth.client,
                            'sentry_version': auth.version,
                            'sentry_secret': auth.secret_key,
                            'sentry_key': auth.public_key,
                            'is_public': auth.is_public,
                        },
                        'remote_addr': remote_addr,
                        'agent': agent,
                        # Whether or not the Kafka consumer is in charge
                        # of actually processing this event.
                        'should_process': process_in_kafka,
                    }))
            except Exception as e:
                logger.exception("Cannot publish event to Kafka: {}".format(
                    e.message))
            else:
                if process_in_kafka:
                    # This event will be processed by the Kafka consumer, so we
                    # shouldn't double process it here.
                    return event_manager.get_data()['event_id']

        # Everything after this will eventually be done in a Kafka consumer.
        return process_event(event_manager, project, key, remote_addr, helper,
                             attachments)
Пример #50
0
def get_normalized_event(data, project):
    mgr = EventManager(data, project=project)
    mgr.normalize()
    return dict(mgr.get_data())
Пример #51
0
def validate_and_normalize(report, client_ip=None):
    manager = EventManager(report, client_ip=client_ip)
    manager.normalize()
    return manager.get_data()
Пример #52
0
 def make_release_event(self, release_name, project_id):
     manager = EventManager(make_event(release=release_name))
     manager.normalize()
     event = manager.save(project_id)
     return event
Пример #53
0
    def test_dupe_message_id(self):
        event_id = 'a' * 32

        manager = EventManager(make_event(event_id=event_id))
        manager.normalize()
        manager.save(1)

        assert Event.objects.count() == 1

        # ensure that calling it again doesn't raise a db error
        manager = EventManager(make_event(event_id=event_id))
        manager.normalize()
        manager.save(1)

        assert Event.objects.count() == 1
Пример #54
0
def validate_and_normalize(data):
    manager = EventManager(data)
    manager.normalize()
    return manager.get_data()
Пример #55
0
def _do_save_event(cache_key=None,
                   data=None,
                   start_time=None,
                   event_id=None,
                   project_id=None,
                   **kwargs):
    """
    Saves an event to the database.
    """

    from sentry.event_manager import HashDiscarded, EventManager
    from sentry import quotas
    from sentry.models import ProjectKey
    from sentry.utils.outcomes import Outcome, track_outcome
    from sentry.ingest.outcomes_consumer import mark_signal_sent

    event_type = "none"

    if cache_key and data is None:
        with metrics.timer(
                "tasks.store.do_save_event.get_cache") as metric_tags:
            data = default_cache.get(cache_key)
            if data is not None:
                metric_tags["event_type"] = event_type = data.get(
                    "type") or "none"

    with metrics.global_tags(event_type=event_type):
        if data is not None:
            data = CanonicalKeyDict(data)

        if event_id is None and data is not None:
            event_id = data["event_id"]

        # only when we come from reprocessing we get a project_id sent into
        # the task.
        if project_id is None:
            project_id = data.pop("project")

        key_id = None if data is None else data.get("key_id")
        if key_id is not None:
            key_id = int(key_id)
        timestamp = to_datetime(start_time) if start_time is not None else None

        # We only need to delete raw events for events that support
        # reprocessing.  If the data cannot be found we want to assume
        # that we need to delete the raw event.
        if not data or reprocessing.event_supports_reprocessing(data):
            with metrics.timer("tasks.store.do_save_event.delete_raw_event"):
                delete_raw_event(project_id, event_id, allow_hint_clear=True)

        # This covers two cases: where data is None because we did not manage
        # to fetch it from the default cache or the empty dictionary was
        # stored in the default cache.  The former happens if the event
        # expired while being on the queue, the second happens on reprocessing
        # if the raw event was deleted concurrently while we held on to
        # it.  This causes the node store to delete the data and we end up
        # fetching an empty dict.  We could in theory not invoke `save_event`
        # in those cases but it's important that we always clean up the
        # reprocessing reports correctly or they will screw up the UI.  So
        # to future proof this correctly we just handle this case here.
        if not data:
            metrics.incr("events.failed",
                         tags={
                             "reason": "cache",
                             "stage": "post"
                         },
                         skip_internal=False)
            return

        with configure_scope() as scope:
            scope.set_tag("project", project_id)

        event = None
        try:
            with metrics.timer("tasks.store.do_save_event.event_manager.save"):
                manager = EventManager(data)
                # event.project.organization is populated after this statement.
                event = manager.save(project_id,
                                     assume_normalized=True,
                                     cache_key=cache_key)

            with metrics.timer("tasks.store.do_save_event.track_outcome"):
                # This is where we can finally say that we have accepted the event.
                track_outcome(
                    event.project.organization_id,
                    event.project.id,
                    key_id,
                    Outcome.ACCEPTED,
                    None,
                    timestamp,
                    event_id,
                )

        except HashDiscarded:
            project = Project.objects.get_from_cache(id=project_id)
            reason = FilterStatKeys.DISCARDED_HASH
            project_key = None
            try:
                if key_id is not None:
                    project_key = ProjectKey.objects.get_from_cache(id=key_id)
            except ProjectKey.DoesNotExist:
                pass

            quotas.refund(project, key=project_key, timestamp=start_time)
            # There is no signal supposed to be sent for this particular
            # outcome-reason combination. Prevent the outcome consumer from
            # emitting it for now.
            #
            # XXX(markus): Revisit decision about signals once outcomes consumer is stable.
            mark_signal_sent(project_id, event_id)
            track_outcome(
                project.organization_id,
                project_id,
                key_id,
                Outcome.FILTERED,
                reason,
                timestamp,
                event_id,
            )

        finally:
            if cache_key:
                with metrics.timer("tasks.store.do_save_event.delete_cache"):
                    default_cache.delete(cache_key)

                with metrics.timer(
                        "tasks.store.do_save_event.delete_attachment_cache"):
                    # For the unlikely case that we did not manage to persist the
                    # event we also delete the key always.
                    if event is None or features.has(
                            "organizations:event-attachments",
                            event.project.organization,
                            actor=None):
                        attachment_cache.delete(cache_key)

            if start_time:
                metrics.timing("events.time-to-process",
                               time() - start_time,
                               instance=data["platform"])
Пример #56
0
 def test_key_id_remains_in_data(self):
     manager = EventManager(make_event(key_id=12345))
     manager.normalize()
     assert manager.get_data()['key_id'] == 12345
     event = manager.save(1)
     assert event.data['key_id'] == 12345
Пример #57
0
def alert(request):
    platform = request.GET.get('platform', 'python')
    org = Organization(
        id=1,
        slug='example',
        name='Example',
    )
    project = Project(
        id=1,
        slug='example',
        name='Example',
        organization=org,
    )

    random = get_random(request)
    group = next(make_group_generator(random, project), )

    data = dict(load_data(platform))
    data['message'] = group.message
    data['event_id'] = '44f1419e73884cd2b45c79918f4b6dc4'
    data.pop('logentry', None)
    data['environment'] = 'prod'
    data['tags'] = [('logger', 'javascript'), ('environment', 'prod'),
                    ('level', 'error'), ('device', 'Other')]

    event_manager = EventManager(data)
    event_manager.normalize()
    data = event_manager.get_data()
    event = event_manager.save(project.id)
    # Prevent Percy screenshot from constantly changing
    event.datetime = datetime(2017, 9, 6, 0, 0)
    event.save()
    event_type = event_manager.get_event_type()

    group.message = event_manager.get_search_message()
    group.data = {
        'type': event_type.key,
        'metadata': event_type.get_metadata(data),
    }

    rule = Rule(label="An example rule")

    interface_list = []
    for interface in six.itervalues(event.interfaces):
        body = interface.to_email_html(event)
        if not body:
            continue
        interface_list.append((interface.get_title(), mark_safe(body)))

    return MailPreview(
        html_template='sentry/emails/error.html',
        text_template='sentry/emails/error.txt',
        context={
            'rule':
            rule,
            'group':
            group,
            'event':
            event,
            'link':
            'http://example.com/link',
            'interfaces':
            interface_list,
            'tags':
            event.get_tags(),
            'project_label':
            project.slug,
            'commits': [{
                # TODO(dcramer): change to use serializer
                "repository": {
                    "status": "active",
                    "name": "Example Repo",
                    "url": "https://github.com/example/example",
                    "dateCreated": "2018-02-28T23:39:22.402Z",
                    "provider": {
                        "id": "github",
                        "name": "GitHub"
                    },
                    "id": "1"
                },
                "score": 2,
                "subject": "feat: Do something to raven/base.py",
                "message":
                "feat: Do something to raven/base.py\naptent vivamus vehicula tempus volutpat hac tortor",
                "id": "1b17483ffc4a10609e7921ee21a8567bfe0ed006",
                "shortId": "1b17483",
                "author": {
                    "username":
                    "******",
                    "isManaged":
                    False,
                    "lastActive":
                    "2018-03-01T18:25:28.149Z",
                    "id":
                    "1",
                    "isActive":
                    True,
                    "has2fa":
                    False,
                    "name":
                    "*****@*****.**",
                    "avatarUrl":
                    "https://secure.gravatar.com/avatar/51567a4f786cd8a2c41c513b592de9f9?s=32&d=mm",
                    "dateJoined":
                    "2018-02-27T22:04:32.847Z",
                    "emails": [{
                        "is_verified": False,
                        "id": "1",
                        "email": "*****@*****.**"
                    }],
                    "avatar": {
                        "avatarUuid": None,
                        "avatarType": "letter_avatar"
                    },
                    "lastLogin":
                    "******",
                    "email":
                    "*****@*****.**"
                }
            }],
        },
    ).render(request)
Пример #58
0
 def test_bad_logger(self):
     manager = EventManager(self.make_event(logger='foo bar'))
     data = manager.normalize()
     assert data['logger'] == DEFAULT_LOGGER_NAME
Пример #59
0
    def test_marks_as_unresolved_with_new_release(
            self, plugin_is_regression,
            mock_send_activity_notifications_delay):
        plugin_is_regression.return_value = True

        old_release = Release.objects.create(
            version="a",
            organization_id=self.project.organization_id,
            date_added=timezone.now() - timedelta(minutes=30),
        )
        old_release.add_project(self.project)

        manager = EventManager(
            make_event(
                event_id="a" * 32,
                checksum="a" * 32,
                timestamp=time() - 50000,  # need to work around active_at
                release=old_release.version,
            ))
        event = manager.save(1)

        group = event.group

        group.update(status=GroupStatus.RESOLVED)

        resolution = GroupResolution.objects.create(release=old_release,
                                                    group=group)
        activity = Activity.objects.create(
            group=group,
            project=group.project,
            type=Activity.SET_RESOLVED_IN_RELEASE,
            ident=resolution.id,
            data={"version": ""},
        )

        manager = EventManager(
            make_event(event_id="b" * 32,
                       checksum="a" * 32,
                       timestamp=time(),
                       release=old_release.version))
        event = manager.save(1)
        assert event.group_id == group.id

        group = Group.objects.get(id=group.id)
        assert group.status == GroupStatus.RESOLVED

        activity = Activity.objects.get(id=activity.id)
        assert activity.data["version"] == ""

        assert GroupResolution.objects.filter(group=group).exists()

        manager = EventManager(
            make_event(event_id="c" * 32,
                       checksum="a" * 32,
                       timestamp=time(),
                       release="b"))
        event = manager.save(1)
        assert event.group_id == group.id

        group = Group.objects.get(id=group.id)
        assert group.status == GroupStatus.UNRESOLVED

        activity = Activity.objects.get(id=activity.id)
        assert activity.data["version"] == "b"

        assert not GroupResolution.objects.filter(group=group).exists()

        activity = Activity.objects.get(group=group,
                                        type=Activity.SET_REGRESSION)

        mock_send_activity_notifications_delay.assert_called_once_with(
            activity.id)
Пример #60
0
 def test_explicit_version(self):
     manager = EventManager(self.make_event(), '6')
     data = manager.normalize()
     assert data['version'] == '6'