示例#1
0
    def test_saves_event_mapping_when_sampled(self, should_sample):
        should_sample.return_value = True
        event_id = 'a' * 32

        manager = EventManager(self.make_event(event_id=event_id))
        event = manager.save(1)

        # This is a brand new event, so it is actually saved.
        # In this case, we don't need an EventMapping, but we
        # do need the Event.
        assert not EventMapping.objects.filter(
            group_id=event.group_id,
            event_id=event_id,
        ).exists()

        assert Event.objects.filter(
            event_id=event_id,
        ).exists()

        event_id = 'b' * 32

        manager = EventManager(self.make_event(event_id=event_id))
        event = manager.save(1)

        # This second is a dupe, so should be sampled
        # For a sample, we want to store the EventMapping,
        # but don't need to store the Event
        assert EventMapping.objects.filter(
            group_id=event.group_id,
            event_id=event_id,
        ).exists()

        assert not Event.objects.filter(
            event_id=event_id,
        ).exists()
示例#2
0
    def test_updates_group(self):
        timestamp = time() - 300
        manager = EventManager(
            make_event(
                message='foo',
                event_id='a' * 32,
                checksum='a' * 32,
                timestamp=timestamp,
            )
        )
        manager.normalize()
        event = manager.save(1)

        manager = EventManager(
            make_event(
                message='foo bar',
                event_id='b' * 32,
                checksum='a' * 32,
                timestamp=timestamp + 2.0,
            )
        )
        manager.normalize()

        with self.tasks():
            event2 = manager.save(1)

        group = Group.objects.get(id=event.group_id)

        assert group.times_seen == 2
        assert group.last_seen == event2.datetime
        assert group.message == event2.message
        assert group.data.get('type') == 'default'
        assert group.data.get('metadata') == {
            'title': 'foo bar',
        }
示例#3
0
    def test_unresolves_group_with_auto_resolve(self, mock_is_resolved):
        ts = time() - 100
        mock_is_resolved.return_value = False
        manager = EventManager(
            make_event(
                event_id='a' * 32,
                checksum='a' * 32,
                timestamp=ts,
            )
        )
        with self.tasks():
            event = manager.save(1)

        mock_is_resolved.return_value = True
        manager = EventManager(
            make_event(
                event_id='b' * 32,
                checksum='a' * 32,
                timestamp=ts + 100,
            )
        )
        with self.tasks():
            event2 = manager.save(1)
        assert event.group_id == event2.group_id

        group = Group.objects.get(id=event.group.id)
        assert group.active_at.replace(second=0) == event2.datetime.replace(second=0)
        assert group.active_at.replace(second=0) != event.datetime.replace(second=0)
示例#4
0
    def test_unresolves_group(self):
        ts = time() - 300

        # N.B. EventManager won't unresolve the group unless the event2 has a
        # later timestamp than event1.
        manager = EventManager(
            make_event(
                event_id='a' * 32,
                checksum='a' * 32,
                timestamp=ts,
            )
        )
        with self.tasks():
            event = manager.save(1)

        group = Group.objects.get(id=event.group_id)
        group.status = GroupStatus.RESOLVED
        group.save()
        assert group.is_resolved()

        manager = EventManager(
            make_event(
                event_id='b' * 32,
                checksum='a' * 32,
                timestamp=ts + 50,
            )
        )
        event2 = manager.save(1)
        assert event.group_id == event2.group_id

        group = Group.objects.get(id=group.id)
        assert not group.is_resolved()
示例#5
0
    def test_updates_group_with_fingerprint(self):
        manager = EventManager(
            self.make_event(
                message='foo',
                event_id='a' * 32,
                fingerprint=['a' * 32],
            )
        )
        with self.tasks():
            event = manager.save(1)

        manager = EventManager(
            self.make_event(
                message='foo bar',
                event_id='b' * 32,
                fingerprint=['a' * 32],
            )
        )
        with self.tasks():
            event2 = manager.save(1)

        group = Group.objects.get(id=event.group_id)

        assert group.times_seen == 2
        assert group.last_seen.replace(microsecond=0) == event.datetime.replace(microsecond=0)
        assert group.message == event2.message
示例#6
0
    def test_updates_group(self):
        manager = EventManager(
            self.make_event(
                message='foo',
                event_id='a' * 32,
                checksum='a' * 32,
            )
        )
        event = manager.save(1)

        manager = EventManager(
            self.make_event(
                message='foo bar',
                event_id='b' * 32,
                checksum='a' * 32,
            )
        )
        with self.tasks():
            event2 = manager.save(1)

        group = Group.objects.get(id=event.group_id)

        assert group.times_seen == 2
        assert group.last_seen.replace(microsecond=0) == event.datetime.replace(microsecond=0)
        assert group.message == event2.message
        assert group.data.get('type') == 'default'
        assert group.data.get('metadata') == {
            'title': 'foo bar',
        }
示例#7
0
    def test_event_user(self):
        manager = EventManager(self.make_event(**{
            'sentry.interfaces.User': {
                'id': '1',
            }
        }))
        manager.normalize()
        event = manager.save(self.project.id)

        assert EventUser.objects.filter(
            project=self.project,
            ident='1',
        ).exists()
        assert 'sentry:user' in dict(event.tags)

        # ensure event user is mapped to tags in second attempt
        manager = EventManager(self.make_event(**{
            'sentry.interfaces.User': {
                'id': '1',
            }
        }))
        manager.normalize()
        event = manager.save(self.project.id)

        assert EventUser.objects.filter(
            project=self.project,
            ident='1',
        ).exists()
        assert 'sentry:user' in dict(event.tags)
示例#8
0
    def test_unresolves_group_with_auto_resolve(self, mock_is_resolved):
        mock_is_resolved.return_value = False
        manager = EventManager(
            self.make_event(
                event_id='a' * 32,
                checksum='a' * 32,
                timestamp=1403007314,
            )
        )
        with self.tasks():
            event = manager.save(1)

        mock_is_resolved.return_value = True
        manager = EventManager(
            self.make_event(
                event_id='b' * 32,
                checksum='a' * 32,
                timestamp=1403007414,
            )
        )
        with self.tasks():
            event2 = manager.save(1)
        assert event.group_id == event2.group_id

        group = Group.objects.get(id=event.group.id)
        assert group.active_at == event2.datetime != event.datetime
示例#9
0
    def test_group_release_with_env(self):
        manager = EventManager(self.make_event(
            release='1.0', environment='prod',
            event_id='a' * 32))
        event = manager.save(1)

        release = Release.objects.get(version='1.0', projects=event.project_id)

        assert GroupRelease.objects.filter(
            release_id=release.id,
            group_id=event.group_id,
            environment='prod',
        ).exists()

        manager = EventManager(self.make_event(
            release='1.0', environment='staging',
            event_id='b' * 32))
        event = manager.save(1)

        release = Release.objects.get(version='1.0', projects=event.project_id)

        assert GroupRelease.objects.filter(
            release_id=release.id,
            group_id=event.group_id,
            environment='staging',
        ).exists()
示例#10
0
文件: store.py 项目: rlugojr/sentry
def save_event(cache_key=None, data=None, start_time=None, event_id=None, **kwargs):
    """
    Saves an event to the database.
    """
    from sentry.event_manager import EventManager

    if cache_key:
        data = default_cache.get(cache_key)

    if event_id is None and data is not None:
        event_id = data['event_id']

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'})
        return

    project = data.pop('project')

    delete_raw_event(project, event_id)

    Raven.tags_context({
        'project': project,
    })

    try:
        manager = EventManager(data)
        manager.save(project)
    finally:
        if cache_key:
            default_cache.delete(cache_key)
        if start_time:
            metrics.timing('events.time-to-process', time() - start_time,
                           instance=data['platform'])
    def test_does_not_unresolve_group(self, plugin_is_regression):
        # N.B. EventManager won't unresolve the group unless the event2 has a
        # later timestamp than event1. MySQL doesn't support microseconds.
        plugin_is_regression.return_value = False

        manager = EventManager(self.make_event(
            event_id='a' * 32, checksum='a' * 32,
            timestamp=1403007314,
        ))
        with self.tasks():
            event = manager.save(1)

        group = Group.objects.get(id=event.group_id)
        group.status = GroupStatus.RESOLVED
        group.save()
        assert group.is_resolved()

        manager = EventManager(self.make_event(
            event_id='b' * 32, checksum='a' * 32,
            timestamp=1403007315,
        ))
        event2 = manager.save(1)
        assert event.group_id == event2.group_id

        group = Group.objects.get(id=group.id)
        assert group.is_resolved()
示例#12
0
    def test_updates_group_with_fingerprint(self):
        ts = time() - 200
        manager = EventManager(
            make_event(
                message='foo',
                event_id='a' * 32,
                fingerprint=['a' * 32],
                timestamp=ts,
            )
        )
        with self.tasks():
            event = manager.save(1)

        manager = EventManager(
            make_event(
                message='foo bar',
                event_id='b' * 32,
                fingerprint=['a' * 32],
                timestamp=ts,
            )
        )
        with self.tasks():
            event2 = manager.save(1)

        group = Group.objects.get(id=event.group_id)

        assert group.times_seen == 2
        assert group.last_seen == event.datetime
        assert group.message == event2.message
示例#13
0
    def test_user_report_gets_environment(self):
        project = self.create_project()
        environment = Environment.objects.create(
            project_id=project.id,
            organization_id=project.organization_id,
            name='production',
        )
        environment.add_project(project)
        event_id = 'a' * 32

        group = self.create_group(project=project)
        UserReport.objects.create(
            group=group,
            project=project,
            event_id=event_id,
            name='foo',
            email='*****@*****.**',
            comments='It Broke!!!',
        )
        manager = EventManager(
            self.make_event(
                environment=environment.name,
                event_id=event_id,
                group=group))
        manager.normalize()
        manager.save(project.id)
        assert UserReport.objects.get(event_id=event_id).environment == environment
示例#14
0
 def test_event_user_unicode_identifier(self):
     manager = EventManager(self.make_event(**{'sentry.interfaces.User': {'username': u'foô'}}))
     manager.normalize()
     with self.tasks():
         manager.save(self.project.id)
     euser = EventUser.objects.get(
         project_id=self.project.id,
     )
     assert euser.username == u'foô'
    def test_similar_message_prefix_doesnt_group(self):
        # we had a regression which caused the default hash to just be
        # 'event.message' instead of '[event.message]' which caused it to
        # generate a hash per letter
        manager = EventManager(self.make_event(message='foo bar'))
        event1 = manager.save(1)

        manager = EventManager(self.make_event(message='foo baz'))
        event2 = manager.save(1)

        assert event1.group_id != event2.group_id
示例#16
0
    def test_differentiates_with_fingerprint(self):
        manager = EventManager(
            self.make_event(message="foo", event_id="a" * 32, fingerprint=["{{ default }}", "a" * 32])
        )
        with self.tasks():
            event = manager.save(1)

        manager = EventManager(self.make_event(message="foo bar", event_id="b" * 32, fingerprint=["a" * 32]))
        with self.tasks():
            event2 = manager.save(1)

        assert event.group_id != event2.group_id
示例#17
0
    def test_sample_feature_flag(self, should_sample):
        should_sample.return_value = True

        manager = EventManager(self.make_event())
        with self.feature('projects:sample-events'):
            event = manager.save(1)
        assert event.id

        manager = EventManager(self.make_event())
        with self.feature('projects:sample-events', False):
            event = manager.save(1)
        assert not event.id
    def test_first_release(self):
        manager = EventManager(self.make_event(release='1.0'))
        event = manager.save(1)

        group = event.group
        assert group.first_release.version == '1.0'

        manager = EventManager(self.make_event(release='2.0'))
        event = manager.save(1)

        group = event.group
        assert group.first_release.version == '1.0'
示例#19
0
    def test_dupe_message_id(self):
        event_id = 'a' * 32

        manager = EventManager(self.make_event(event_id='a' * 32))
        manager.save(1)

        assert Event.objects.count() == 1

        # ensure that calling it again doesn't raise a db error
        manager = EventManager(self.make_event(event_id='a' * 32))
        manager.save(1)

        assert Event.objects.count() == 1
示例#20
0
    def test_updates_group(self):
        manager = EventManager(self.make_event(message="foo", event_id="a" * 32, checksum="a" * 32))
        event = manager.save(1)

        manager = EventManager(self.make_event(message="foo bar", event_id="b" * 32, checksum="a" * 32))
        with self.tasks():
            event2 = manager.save(1)

        group = Group.objects.get(id=event.group_id)

        assert group.times_seen == 2
        assert group.last_seen.replace(microsecond=0) == event.datetime.replace(microsecond=0)
        assert group.message == event2.message
示例#21
0
 def make_event(self, **kwargs):
     result = {
         'event_id': 'a' * 32,
         'message': 'foo',
         'timestamp': 1403007314.570599,
         'level': logging.ERROR,
         'logger': 'default',
         'tags': [],
     }
     result.update(kwargs)
     manager = EventManager(result)
     manager.normalize()
     manager.save(self.project.id)
示例#22
0
    def test_throws_when_matches_discarded_hash(self):
        manager = EventManager(
            self.make_event(
                message='foo',
                event_id='a' * 32,
                fingerprint=['a' * 32],
            )
        )
        with self.tasks():
            event = manager.save(1)

        group = Group.objects.get(id=event.group_id)
        tombstone = GroupTombstone.objects.create(
            project_id=group.project_id,
            level=group.level,
            message=group.message,
            culprit=group.culprit,
            data=group.data,
            previous_group_id=group.id,
        )
        GroupHash.objects.filter(
            group=group,
        ).update(
            group=None,
            group_tombstone_id=tombstone.id,
        )

        manager = EventManager(
            self.make_event(
                message='foo',
                event_id='b' * 32,
                fingerprint=['a' * 32],
            )
        )

        mock_event_discarded = mock.Mock()
        event_discarded.connect(mock_event_discarded)
        mock_event_saved = mock.Mock()
        event_saved.connect(mock_event_saved)

        with self.tasks():
            with self.assertRaises(HashDiscarded):
                event = manager.save(1)

        assert not mock_event_saved.called
        assert_mock_called_once_with_partial(
            mock_event_discarded,
            project=group.project,
            sender=EventManager,
            signal=event_discarded,
        )
示例#23
0
    def test_event_user(self):
        manager = EventManager(self.make_event(**{
            'sentry.interfaces.User': {
                'id': '1',
            }
        }))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id,),
            event.datetime,
            event.datetime,
        ) == {
            event.group.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id,),
            event.datetime,
            event.datetime,
        ) == {
            event.project.id: 1,
        }

        euser = EventUser.objects.get(
            project=self.project,
            ident='1',
        )
        assert event.get_tag('sentry:user') == euser.tag_value

        # ensure event user is mapped to tags in second attempt
        manager = EventManager(self.make_event(**{
            'sentry.interfaces.User': {
                'id': '1',
                'name': 'jane',
            }
        }))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        euser = EventUser.objects.get(id=euser.id)
        assert event.get_tag('sentry:user') == euser.tag_value
        assert euser.name == 'jane'
        assert euser.ident == '1'
示例#24
0
    def test_group_release_no_env(self):
        manager = EventManager(self.make_event(release='1.0'))
        event = manager.save(1)

        release = Release.objects.get(version='1.0', projects=event.project_id)

        assert GroupRelease.objects.filter(
            release_id=release.id,
            group_id=event.group_id,
            environment='',
        ).exists()

        # ensure we're not erroring on second creation
        manager = EventManager(self.make_event(release='1.0'))
        manager.save(1)
示例#25
0
    def test_ephemral_interfaces_removed_on_save(self):
        manager = EventManager(self.make_event(platform='python'))
        event = manager.save(1)

        group = event.group
        assert group.platform == 'python'
        assert event.platform == 'python'
    def test_differentiates_with_fingerprint(self):
        manager = EventManager(self.make_event(
            message='foo', event_id='a' * 32,
            fingerprint=['{{ default }}', 'a' * 32],
        ))
        with self.tasks():
            event = manager.save(1)

        manager = EventManager(self.make_event(
            message='foo bar', event_id='b' * 32,
            fingerprint=['a' * 32],
        ))
        with self.tasks():
            event2 = manager.save(1)

        assert event.group_id != event2.group_id
示例#27
0
 def test_invalid_environment(self):
     manager = EventManager(make_event(**{
         'environment': 'bad/name',
     }))
     manager.normalize()
     event = manager.save(self.project.id)
     assert dict(event.tags).get('environment') is None
示例#28
0
    def test_platform_is_saved(self):
        manager = EventManager(self.make_event(platform='python'))
        event = manager.save(1)

        group = event.group
        assert group.platform == 'python'
        assert event.platform == 'python'
    def test_event_user(self):
        manager = EventManager(self.make_event(**{
            'sentry.interfaces.User': {
                'id': '1',
            }
        }))
        manager.normalize()
        event = manager.save(self.project.id)

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id,),
            event.datetime,
            event.datetime,
        ) == {
            event.group.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id,),
            event.datetime,
            event.datetime,
        ) == {
            event.project.id: 1,
        }

        assert EventUser.objects.filter(
            project=self.project,
            ident='1',
        ).exists()
        assert 'sentry:user' in dict(event.tags)

        # ensure event user is mapped to tags in second attempt
        manager = EventManager(self.make_event(**{
            'sentry.interfaces.User': {
                'id': '1',
            }
        }))
        manager.normalize()
        event = manager.save(self.project.id)

        assert EventUser.objects.filter(
            project=self.project,
            ident='1',
        ).exists()
        assert 'sentry:user' in dict(event.tags)
示例#30
0
    def test(self, mock_delay_index_event_tags, mock_eventstream_insert):
        now = datetime.utcnow()

        def _get_event_count():
            return snuba.query(
                start=now - timedelta(days=1),
                end=now + timedelta(days=1),
                groupby=['project_id'],
                filter_keys={'project_id': [self.project.id]},
            ).get(self.project.id, 0)

        assert _get_event_count() == 0

        raw_event = {
            'event_id': 'a' * 32,
            'message': 'foo',
            'timestamp': time.mktime(now.timetuple()),
            'level': logging.ERROR,
            'logger': 'default',
            'tags': [],
        }

        manager = EventManager(raw_event)
        manager.normalize()
        event = manager.save(self.project.id)

        # verify eventstream was called by EventManager
        insert_args, insert_kwargs = list(mock_eventstream_insert.call_args)
        assert not insert_args
        assert insert_kwargs == {
            'event': event,
            'group': event.group,
            'is_new_group_environment': True,
            'is_new': True,
            'is_regression': False,
            'is_sample': False,
            'primary_hash': 'acbd18db4cc2f85cedef654fccc4a4d8',
            'skip_consume': False
        }

        assert mock_delay_index_event_tags.call_count == 1

        # pass arguments on to Kafka EventManager
        self.kafka_eventstream.insert(*insert_args, **insert_kwargs)

        produce_args, produce_kwargs = list(self.kafka_eventstream.producer.produce.call_args)
        assert not produce_args
        assert produce_kwargs['topic'] == 'events'
        assert produce_kwargs['key'] == six.text_type(self.project.id)

        version, type_, payload1, payload2 = json.loads(produce_kwargs['value'])
        assert version == 2
        assert type_ == 'insert'

        # insert what would have been the Kafka payload directly
        # into Snuba, expect an HTTP 200 and for the event to now exist
        snuba_eventstream = SnubaEventStream()
        snuba_eventstream._send(self.project.id, 'insert', (payload1, payload2))
        assert _get_event_count() == 1
示例#31
0
def _do_save_event(
    cache_key=None, data=None, start_time=None, event_id=None, project_id=None, **kwargs
):
    """
    Saves an event to the database.
    """

    set_current_project(project_id)

    from sentry.event_manager import EventManager, HashDiscarded

    event_type = "none"

    if cache_key and data is None:
        with metrics.timer("tasks.store.do_save_event.get_cache") as metric_tags:
            data = default_cache.get(cache_key)
            if data is not None:
                metric_tags["event_type"] = event_type = data.get("type") or "none"

    with metrics.global_tags(event_type=event_type):
        if data is not None:
            data = CanonicalKeyDict(data)

        if event_id is None and data is not None:
            event_id = data["event_id"]

        # only when we come from reprocessing we get a project_id sent into
        # the task.
        if project_id is None:
            project_id = data.pop("project")
            set_current_project(project_id)

        # We only need to delete raw events for events that support
        # reprocessing.  If the data cannot be found we want to assume
        # that we need to delete the raw event.
        if not data or reprocessing.event_supports_reprocessing(data):
            with metrics.timer("tasks.store.do_save_event.delete_raw_event"):
                delete_raw_event(project_id, event_id, allow_hint_clear=True)

        # This covers two cases: where data is None because we did not manage
        # to fetch it from the default cache or the empty dictionary was
        # stored in the default cache.  The former happens if the event
        # expired while being on the queue, the second happens on reprocessing
        # if the raw event was deleted concurrently while we held on to
        # it.  This causes the node store to delete the data and we end up
        # fetching an empty dict.  We could in theory not invoke `save_event`
        # in those cases but it's important that we always clean up the
        # reprocessing reports correctly or they will screw up the UI.  So
        # to future proof this correctly we just handle this case here.
        if not data:
            metrics.incr(
                "events.failed", tags={"reason": "cache", "stage": "post"}, skip_internal=False
            )
            return

        event = None
        try:
            with metrics.timer("tasks.store.do_save_event.event_manager.save"):
                manager = EventManager(data)
                # event.project.organization is populated after this statement.
                event = manager.save(
                    project_id, assume_normalized=True, start_time=start_time, cache_key=cache_key
                )

        except HashDiscarded:
            pass

        finally:
            if cache_key:
                with metrics.timer("tasks.store.do_save_event.delete_cache"):
                    default_cache.delete(cache_key)

                with metrics.timer("tasks.store.do_save_event.delete_attachment_cache"):
                    # For the unlikely case that we did not manage to persist the
                    # event we also delete the key always.
                    if event is None or features.has(
                        "organizations:event-attachments", event.project.organization, actor=None
                    ):
                        attachment_cache.delete(cache_key)

            if start_time:
                metrics.timing(
                    "events.time-to-process", time() - start_time, instance=data["platform"]
                )
示例#32
0
    def test_stringified_message(self):
        manager = EventManager(make_event(**{"message": 1234}))
        manager.normalize()
        event = manager.save(self.project.id)

        assert event.data["logentry"] == {"formatted": "1234", "message": None, "params": None}
示例#33
0
def _do_save_event(cache_key=None, data=None, start_time=None, event_id=None,
                   project_id=None, **kwargs):
    """
    Saves an event to the database.
    """
    from sentry.event_manager import HashDiscarded, EventManager
    from sentry import quotas, tsdb
    from sentry.models import ProjectKey

    if cache_key and data is None:
        data = default_cache.get(cache_key)

    if data is not None:
        data = CanonicalKeyDict(data)

    if event_id is None and data is not None:
        event_id = data['event_id']

    # only when we come from reprocessing we get a project_id sent into
    # the task.
    if project_id is None:
        project_id = data.pop('project')

    delete_raw_event(project_id, event_id, allow_hint_clear=True)

    # This covers two cases: where data is None because we did not manage
    # to fetch it from the default cache or the empty dictionary was
    # stored in the default cache.  The former happens if the event
    # expired while being on the queue, the second happens on reprocessing
    # if the raw event was deleted concurrently while we held on to
    # it.  This causes the node store to delete the data and we end up
    # fetching an empty dict.  We could in theory not invoke `save_event`
    # in those cases but it's important that we always clean up the
    # reprocessing reports correctly or they will screw up the UI.  So
    # to future proof this correctly we just handle this case here.
    if not data:
        metrics.incr(
            'events.failed',
            tags={
                'reason': 'cache',
                'stage': 'post'},
            skip_internal=False)
        return

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    event = None
    try:
        manager = EventManager(data)
        event = manager.save(project_id, assume_normalized=True)

        # Always load attachments from the cache so we can later prune them.
        # Only save them if the event-attachments feature is active, though.
        if features.has('organizations:event-attachments', event.project.organization, actor=None):
            attachments = attachment_cache.get(cache_key) or []
            for attachment in attachments:
                save_attachment(event, attachment)

    except HashDiscarded:
        increment_list = [
            (tsdb.models.project_total_received_discarded, project_id),
        ]

        try:
            project = Project.objects.get_from_cache(id=project_id)
        except Project.DoesNotExist:
            pass
        else:
            increment_list.extend([
                (tsdb.models.project_total_blacklisted, project.id),
                (tsdb.models.organization_total_blacklisted, project.organization_id),
            ])

            project_key = None
            if data.get('key_id') is not None:
                try:
                    project_key = ProjectKey.objects.get_from_cache(id=data['key_id'])
                except ProjectKey.DoesNotExist:
                    pass
                else:
                    increment_list.append((tsdb.models.key_total_blacklisted, project_key.id))

            quotas.refund(
                project,
                key=project_key,
                timestamp=start_time,
            )

        tsdb.incr_multi(
            increment_list,
            timestamp=to_datetime(start_time) if start_time is not None else None,
        )

    finally:
        if cache_key:
            default_cache.delete(cache_key)

            # For the unlikely case that we did not manage to persist the
            # event we also delete the key always.
            if event is None or \
               features.has('organizations:event-attachments', event.project.organization, actor=None):
                attachment_cache.delete(cache_key)

        if start_time:
            metrics.timing(
                'events.time-to-process',
                time() - start_time,
                instance=data['platform'])
示例#34
0
 def test_invalid_project(self):
     manager = EventManager(self.make_event())
     with self.assertRaises(Project.DoesNotExist):
         event = manager.save(2)
示例#35
0
    def test_environment(self):
        manager = EventManager(make_event(**{"environment": "beta"}))
        manager.normalize()
        event = manager.save(self.project.id)

        assert dict(event.tags).get("environment") == "beta"
示例#36
0
def alert(request):
    platform = request.GET.get('platform', 'python')
    org = Organization(
        id=1,
        slug='example',
        name='Example',
    )
    project = Project(
        id=1,
        slug='example',
        name='Example',
        organization=org,
    )

    random = get_random(request)
    group = next(make_group_generator(random, project), )

    data = dict(load_data(platform))
    data['message'] = group.message
    data['event_id'] = '44f1419e73884cd2b45c79918f4b6dc4'
    data.pop('logentry', None)
    data['environment'] = 'prod'
    data['tags'] = [('logger', 'javascript'), ('environment', 'prod'),
                    ('level', 'error'), ('device', 'Other')]

    event_manager = EventManager(data)
    event_manager.normalize()
    data = event_manager.get_data()
    event = event_manager.save(project.id)
    # Prevent Percy screenshot from constantly changing
    event.datetime = datetime(2017, 9, 6, 0, 0)
    event.save()
    event_type = event_manager.get_event_type()

    group.message = event_manager.get_search_message()
    group.data = {
        'type': event_type.key,
        'metadata': event_type.get_metadata(data),
    }

    rule = Rule(label="An example rule")

    interface_list = []
    for interface in six.itervalues(event.interfaces):
        body = interface.to_email_html(event)
        if not body:
            continue
        interface_list.append((interface.get_title(), mark_safe(body)))

    return MailPreview(
        html_template='sentry/emails/error.html',
        text_template='sentry/emails/error.txt',
        context={
            'rule':
            rule,
            'group':
            group,
            'event':
            event,
            'link':
            'http://example.com/link',
            'interfaces':
            interface_list,
            'tags':
            event.get_tags(),
            'project_label':
            project.slug,
            'commits': [{
                # TODO(dcramer): change to use serializer
                "repository": {
                    "status": "active",
                    "name": "Example Repo",
                    "url": "https://github.com/example/example",
                    "dateCreated": "2018-02-28T23:39:22.402Z",
                    "provider": {
                        "id": "github",
                        "name": "GitHub"
                    },
                    "id": "1"
                },
                "score": 2,
                "subject": "feat: Do something to raven/base.py",
                "message":
                "feat: Do something to raven/base.py\naptent vivamus vehicula tempus volutpat hac tortor",
                "id": "1b17483ffc4a10609e7921ee21a8567bfe0ed006",
                "shortId": "1b17483",
                "author": {
                    "username":
                    "******",
                    "isManaged":
                    False,
                    "lastActive":
                    "2018-03-01T18:25:28.149Z",
                    "id":
                    "1",
                    "isActive":
                    True,
                    "has2fa":
                    False,
                    "name":
                    "*****@*****.**",
                    "avatarUrl":
                    "https://secure.gravatar.com/avatar/51567a4f786cd8a2c41c513b592de9f9?s=32&d=mm",
                    "dateJoined":
                    "2018-02-27T22:04:32.847Z",
                    "emails": [{
                        "is_verified": False,
                        "id": "1",
                        "email": "*****@*****.**"
                    }],
                    "avatar": {
                        "avatarUuid": None,
                        "avatarType": "letter_avatar"
                    },
                    "lastLogin":
                    "******",
                    "email":
                    "*****@*****.**"
                }
            }],
        },
    ).render(request)
示例#37
0
def _do_save_event(cache_key=None,
                   data=None,
                   start_time=None,
                   event_id=None,
                   project_id=None,
                   **kwargs):
    """
    Saves an event to the database.
    """

    set_current_project(project_id)

    from sentry.event_manager import EventManager, HashDiscarded

    event_type = "none"

    if cache_key and data is None:
        with metrics.timer(
                "tasks.store.do_save_event.get_cache") as metric_tags:
            data = event_processing_store.get(cache_key)
            if data is not None:
                metric_tags["event_type"] = event_type = data.get(
                    "type") or "none"

    with metrics.global_tags(event_type=event_type):
        if data is not None:
            data = CanonicalKeyDict(data)

        if event_id is None and data is not None:
            event_id = data["event_id"]

        # only when we come from reprocessing we get a project_id sent into
        # the task.
        if project_id is None:
            project_id = data.pop("project")
            set_current_project(project_id)

        # We only need to delete raw events for events that support
        # reprocessing.  If the data cannot be found we want to assume
        # that we need to delete the raw event.
        if not data or reprocessing.event_supports_reprocessing(data):
            with metrics.timer("tasks.store.do_save_event.delete_raw_event"):
                delete_raw_event(project_id, event_id, allow_hint_clear=True)

        # This covers two cases: where data is None because we did not manage
        # to fetch it from the default cache or the empty dictionary was
        # stored in the default cache.  The former happens if the event
        # expired while being on the queue, the second happens on reprocessing
        # if the raw event was deleted concurrently while we held on to
        # it.  This causes the node store to delete the data and we end up
        # fetching an empty dict.  We could in theory not invoke `save_event`
        # in those cases but it's important that we always clean up the
        # reprocessing reports correctly or they will screw up the UI.  So
        # to future proof this correctly we just handle this case here.
        if not data:
            metrics.incr("events.failed",
                         tags={
                             "reason": "cache",
                             "stage": "post"
                         },
                         skip_internal=False)
            return

        try:
            with metrics.timer("tasks.store.do_save_event.event_manager.save"):
                manager = EventManager(data)
                # event.project.organization is populated after this statement.
                manager.save(project_id,
                             assume_normalized=True,
                             start_time=start_time,
                             cache_key=cache_key)
                # Put the updated event back into the cache so that post_process
                # has the most recent data.
                data = manager.get_data()
                if isinstance(data, CANONICAL_TYPES):
                    data = dict(data.items())
                with metrics.timer(
                        "tasks.store.do_save_event.write_processing_cache"):
                    event_processing_store.store(data)
        except HashDiscarded:
            # Delete the event payload from cache since it won't show up in post-processing.
            if cache_key:
                with metrics.timer("tasks.store.do_save_event.delete_cache"):
                    event_processing_store.delete_by_key(cache_key)
                    event_processing_store.delete_by_key(
                        _get_unprocessed_key(cache_key))

        finally:
            reprocessing2.mark_event_reprocessed(data)
            if cache_key:
                with metrics.timer(
                        "tasks.store.do_save_event.delete_attachment_cache"):
                    attachment_cache.delete(cache_key)

            if start_time:
                metrics.timing("events.time-to-process",
                               time() - start_time,
                               instance=data["platform"])

            time_synthetic_monitoring_event(data, project_id, start_time)
示例#38
0
    def test_default_fingerprint(self):
        manager = EventManager(self.make_event())
        manager.normalize()
        event = manager.save(self.project.id)

        assert event.data.get('fingerprint') == ['{{ default }}']
示例#39
0
    def test_event_user(self):
        manager = EventManager(
            make_event(event_id='a',
                       environment='totally unique environment',
                       **{'user': {
                           'id': '1',
                       }}))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        environment_id = Environment.get_for_organization_id(
            event.project.organization_id,
            'totally unique environment',
        ).id

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id, ),
            event.datetime,
            event.datetime,
        ) == {
            event.group.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
        ) == {
            event.project.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.group.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.project.id: 1,
        }

        euser = EventUser.objects.get(
            project_id=self.project.id,
            ident='1',
        )
        assert event.get_tag('sentry:user') == euser.tag_value

        # ensure event user is mapped to tags in second attempt
        manager = EventManager(
            make_event(event_id='b', **{'user': {
                'id': '1',
                'name': 'jane',
            }}))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        euser = EventUser.objects.get(id=euser.id)
        assert event.get_tag('sentry:user') == euser.tag_value
        assert euser.name == 'jane'
        assert euser.ident == '1'
示例#40
0
    def test_marks_as_unresolved_with_new_release(
            self, plugin_is_regression,
            mock_send_activity_notifications_delay):
        plugin_is_regression.return_value = True

        old_release = Release.objects.create(
            version="a",
            organization_id=self.project.organization_id,
            date_added=timezone.now() - timedelta(minutes=30),
        )
        old_release.add_project(self.project)

        manager = EventManager(
            make_event(
                event_id="a" * 32,
                checksum="a" * 32,
                timestamp=time() - 50000,  # need to work around active_at
                release=old_release.version,
            ))
        event = manager.save(1)

        group = event.group

        group.update(status=GroupStatus.RESOLVED)

        resolution = GroupResolution.objects.create(release=old_release,
                                                    group=group)
        activity = Activity.objects.create(
            group=group,
            project=group.project,
            type=Activity.SET_RESOLVED_IN_RELEASE,
            ident=resolution.id,
            data={"version": ""},
        )

        manager = EventManager(
            make_event(event_id="b" * 32,
                       checksum="a" * 32,
                       timestamp=time(),
                       release=old_release.version))
        event = manager.save(1)
        assert event.group_id == group.id

        group = Group.objects.get(id=group.id)
        assert group.status == GroupStatus.RESOLVED

        activity = Activity.objects.get(id=activity.id)
        assert activity.data["version"] == ""

        assert GroupResolution.objects.filter(group=group).exists()

        manager = EventManager(
            make_event(event_id="c" * 32,
                       checksum="a" * 32,
                       timestamp=time(),
                       release="b"))
        event = manager.save(1)
        assert event.group_id == group.id

        group = Group.objects.get(id=group.id)
        assert group.status == GroupStatus.UNRESOLVED

        activity = Activity.objects.get(id=activity.id)
        assert activity.data["version"] == "b"

        assert not GroupResolution.objects.filter(group=group).exists()

        activity = Activity.objects.get(group=group,
                                        type=Activity.SET_REGRESSION)

        mock_send_activity_notifications_delay.assert_called_once_with(
            activity.id)
示例#41
0
    def test_marks_as_unresolved_with_new_release_with_integration(
        self,
        plugin_is_regression,
        mock_send_activity_notifications_delay,
        mock_sync_status_outbound,
    ):
        plugin_is_regression.return_value = True

        old_release = Release.objects.create(
            version="a",
            organization_id=self.project.organization_id,
            date_added=timezone.now() - timedelta(minutes=30),
        )
        old_release.add_project(self.project)

        manager = EventManager(
            make_event(
                event_id="a" * 32,
                checksum="a" * 32,
                timestamp=time() - 50000,  # need to work around active_at
                release=old_release.version,
            ))
        event = manager.save(1)

        group = event.group

        org = group.organization

        integration = Integration.objects.create(provider="example",
                                                 name="Example")
        integration.add_organization(org, self.user)
        OrganizationIntegration.objects.filter(
            integration_id=integration.id,
            organization_id=group.organization.id).update(
                config={
                    "sync_comments": True,
                    "sync_status_outbound": True,
                    "sync_status_inbound": True,
                    "sync_assignee_outbound": True,
                    "sync_assignee_inbound": True,
                })

        external_issue = ExternalIssue.objects.get_or_create(
            organization_id=org.id,
            integration_id=integration.id,
            key="APP-%s" % group.id)[0]

        GroupLink.objects.get_or_create(
            group_id=group.id,
            project_id=group.project_id,
            linked_type=GroupLink.LinkedType.issue,
            linked_id=external_issue.id,
            relationship=GroupLink.Relationship.references,
        )[0]

        group.update(status=GroupStatus.RESOLVED)

        resolution = GroupResolution.objects.create(release=old_release,
                                                    group=group)
        activity = Activity.objects.create(
            group=group,
            project=group.project,
            type=Activity.SET_RESOLVED_IN_RELEASE,
            ident=resolution.id,
            data={"version": ""},
        )

        manager = EventManager(
            make_event(event_id="b" * 32,
                       checksum="a" * 32,
                       timestamp=time(),
                       release=old_release.version))

        with self.tasks():
            with self.feature({"organizations:integrations-issue-sync": True}):
                event = manager.save(1)
                assert event.group_id == group.id

                group = Group.objects.get(id=group.id)
                assert group.status == GroupStatus.RESOLVED

                activity = Activity.objects.get(id=activity.id)
                assert activity.data["version"] == ""

                assert GroupResolution.objects.filter(group=group).exists()

                manager = EventManager(
                    make_event(event_id="c" * 32,
                               checksum="a" * 32,
                               timestamp=time(),
                               release="b"))
                event = manager.save(1)
                mock_sync_status_outbound.assert_called_once_with(
                    external_issue, False, event.group.project_id)
                assert event.group_id == group.id

                group = Group.objects.get(id=group.id)
                assert group.status == GroupStatus.UNRESOLVED

                activity = Activity.objects.get(id=activity.id)
                assert activity.data["version"] == "b"

                assert not GroupResolution.objects.filter(group=group).exists()

                activity = Activity.objects.get(group=group,
                                                type=Activity.SET_REGRESSION)

                mock_send_activity_notifications_delay.assert_called_once_with(
                    activity.id)
示例#42
0
 def test_invalid_transaction(self):
     dict_input = {"messages": "foo"}
     manager = EventManager(make_event(transaction=dict_input))
     manager.normalize()
     event = manager.save(1)
     assert event.transaction is None
示例#43
0
    def from_kwargs(self, project, **kwargs):
        from sentry.event_manager import EventManager

        manager = EventManager(kwargs)
        manager.normalize()
        return manager.save(project)
示例#44
0
文件: store.py 项目: guoyu07/sentry-2
def save_event(cache_key=None, data=None, start_time=None, event_id=None, **kwargs):
    """
    Saves an event to the database.
    """
    from sentry.event_manager import HashDiscarded, EventManager
    from sentry import quotas, tsdb
    from sentry.models import ProjectKey

    if cache_key:
        data = default_cache.get(cache_key)

    if event_id is None and data is not None:
        event_id = data['event_id']

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'})
        return

    project_id = data.pop('project')

    delete_raw_event(project_id, event_id, allow_hint_clear=True)

    Raven.tags_context({
        'project': project_id,
    })

    try:
        manager = EventManager(data)
        manager.save(project_id)
    except HashDiscarded:
        tsdb.incr(
            tsdb.models.project_total_received_discarded,
            project_id,
            timestamp=to_datetime(start_time) if start_time is not None else None,
        )

        try:
            project = Project.objects.get_from_cache(id=project_id)
        except Project.DoesNotExist:
            pass
        else:
            project_key = None
            if data.get('key_id') is not None:
                try:
                    project_key = ProjectKey.objects.get_from_cache(id=data['key_id'])
                except ProjectKey.DoesNotExist:
                    pass

            quotas.refund(
                project,
                key=project_key,
                timestamp=start_time,
            )

    finally:
        if cache_key:
            default_cache.delete(cache_key)
        if start_time:
            metrics.timing(
                'events.time-to-process',
                time() - start_time,
                instance=data['platform'])
示例#45
0
def _do_save_event(cache_key=None,
                   data=None,
                   start_time=None,
                   event_id=None,
                   project_id=None,
                   **kwargs):
    """
    Saves an event to the database.
    """
    from sentry.event_manager import HashDiscarded, EventManager
    from sentry import quotas
    from sentry.models import ProjectKey
    from sentry.utils.outcomes import Outcome, track_outcome
    from sentry.ingest.outcomes_consumer import mark_signal_sent

    if cache_key and data is None:
        data = default_cache.get(cache_key)

    if data is not None:
        data = CanonicalKeyDict(data)

    if event_id is None and data is not None:
        event_id = data["event_id"]

    # only when we come from reprocessing we get a project_id sent into
    # the task.
    if project_id is None:
        project_id = data.pop("project")

    key_id = None if data is None else data.get("key_id")
    if key_id is not None:
        key_id = int(key_id)
    timestamp = to_datetime(start_time) if start_time is not None else None

    # We only need to delete raw events for events that support
    # reprocessing.  If the data cannot be found we want to assume
    # that we need to delete the raw event.
    if not data or reprocessing.event_supports_reprocessing(data):
        delete_raw_event(project_id, event_id, allow_hint_clear=True)

    # This covers two cases: where data is None because we did not manage
    # to fetch it from the default cache or the empty dictionary was
    # stored in the default cache.  The former happens if the event
    # expired while being on the queue, the second happens on reprocessing
    # if the raw event was deleted concurrently while we held on to
    # it.  This causes the node store to delete the data and we end up
    # fetching an empty dict.  We could in theory not invoke `save_event`
    # in those cases but it's important that we always clean up the
    # reprocessing reports correctly or they will screw up the UI.  So
    # to future proof this correctly we just handle this case here.
    if not data:
        metrics.incr("events.failed",
                     tags={
                         "reason": "cache",
                         "stage": "post"
                     },
                     skip_internal=False)
        return

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    event = None
    try:
        manager = EventManager(data)
        # event.project.organization is populated after this statement.
        event = manager.save(project_id,
                             assume_normalized=True,
                             cache_key=cache_key)

        # This is where we can finally say that we have accepted the event.
        track_outcome(
            event.project.organization_id,
            event.project.id,
            key_id,
            Outcome.ACCEPTED,
            None,
            timestamp,
            event_id,
        )

    except HashDiscarded:
        project = Project.objects.get_from_cache(id=project_id)
        reason = FilterStatKeys.DISCARDED_HASH
        project_key = None
        try:
            if key_id is not None:
                project_key = ProjectKey.objects.get_from_cache(id=key_id)
        except ProjectKey.DoesNotExist:
            pass

        quotas.refund(project, key=project_key, timestamp=start_time)
        # There is no signal supposed to be sent for this particular
        # outcome-reason combination. Prevent the outcome consumer from
        # emitting it for now.
        #
        # XXX(markus): Revisit decision about signals once outcomes consumer is stable.
        mark_signal_sent(project_id, event_id)
        track_outcome(
            project.organization_id,
            project_id,
            key_id,
            Outcome.FILTERED,
            reason,
            timestamp,
            event_id,
        )

    finally:
        if cache_key:
            default_cache.delete(cache_key)

            # For the unlikely case that we did not manage to persist the
            # event we also delete the key always.
            if event is None or features.has("organizations:event-attachments",
                                             event.project.organization,
                                             actor=None):
                attachment_cache.delete(cache_key)

        if start_time:
            metrics.timing("events.time-to-process",
                           time() - start_time,
                           instance=data["platform"])
示例#46
0
    def test_marks_as_unresolved_only_with_new_release(self,
                                                       plugin_is_regression):
        plugin_is_regression.return_value = True

        old_release = Release.objects.create(
            version='a',
            project=self.project,
            date_added=timezone.now() - timedelta(minutes=30),
        )

        manager = EventManager(
            self.make_event(
                event_id='a' * 32,
                checksum='a' * 32,
                timestamp=time() - 50000,  # need to work around active_at
                release=old_release.version,
            ))
        event = manager.save(1)

        group = event.group

        group.update(status=GroupStatus.RESOLVED)

        resolution = GroupResolution.objects.create(
            release=old_release,
            group=group,
        )
        activity = Activity.objects.create(
            group=group,
            project=group.project,
            type=Activity.SET_RESOLVED_IN_RELEASE,
            ident=resolution.id,
            data={'version': ''},
        )

        manager = EventManager(
            self.make_event(
                event_id='b' * 32,
                checksum='a' * 32,
                timestamp=time(),
                release=old_release.version,
            ))
        event = manager.save(1)
        assert event.group_id == group.id

        group = Group.objects.get(id=group.id)
        assert group.status == GroupStatus.RESOLVED

        activity = Activity.objects.get(id=activity.id)
        assert activity.data['version'] == ''

        assert GroupResolution.objects.filter(group=group).exists()

        manager = EventManager(
            self.make_event(
                event_id='c' * 32,
                checksum='a' * 32,
                timestamp=time(),
                release='b',
            ))
        event = manager.save(1)
        assert event.group_id == group.id

        group = Group.objects.get(id=group.id)
        assert group.status == GroupStatus.UNRESOLVED

        activity = Activity.objects.get(id=activity.id)
        assert activity.data['version'] == 'b'

        assert not GroupResolution.objects.filter(group=group).exists()

        assert Activity.objects.filter(
            group=group,
            type=Activity.SET_REGRESSION,
        ).exists()
示例#47
0
 def make_release_event(self, release_name, project_id):
     manager = EventManager(make_event(release=release_name))
     manager.normalize()
     event = manager.save(project_id)
     return event
示例#48
0
 def test_invalid_environment(self):
     manager = EventManager(make_event(**{"environment": "bad/name"}))
     manager.normalize()
     event = manager.save(self.project.id)
     assert dict(event.tags).get("environment") is None
示例#49
0
 def test_key_id_remains_in_data(self):
     manager = EventManager(make_event(key_id=12345))
     manager.normalize()
     assert manager.get_data()['key_id'] == 12345
     event = manager.save(1)
     assert event.data['key_id'] == 12345
示例#50
0
def create_sample_event_basic(data, project_id, raw=True):
    manager = EventManager(data)
    manager.normalize()
    return manager.save(project_id, raw=raw)
示例#51
0
 def test_invalid_transaction(self):
     dict_input = {'messages': 'foo'}
     manager = EventManager(self.make_event(transaction=dict_input, ))
     manager.normalize()
     event = manager.save(1)
     assert event.transaction is None
示例#52
0
    def test_event_user(self):
        manager = EventManager(
            make_event(event_id="a",
                       environment="totally unique environment",
                       **{"user": {
                           "id": "1"
                       }}))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        environment_id = Environment.get_for_organization_id(
            event.project.organization_id, "totally unique environment").id

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group, (event.group.id, ),
            event.datetime, event.datetime) == {
                event.group.id: 1
            }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
        ) == {
            event.project.id: 1
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.group.id: 1
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.project.id: 1
        }

        euser = EventUser.objects.get(project_id=self.project.id, ident="1")
        assert event.get_tag("sentry:user") == euser.tag_value

        # ensure event user is mapped to tags in second attempt
        manager = EventManager(
            make_event(event_id="b", **{"user": {
                "id": "1",
                "name": "jane"
            }}))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        euser = EventUser.objects.get(id=euser.id)
        assert event.get_tag("sentry:user") == euser.tag_value
        assert euser.name == "jane"
        assert euser.ident == "1"
示例#53
0
 def __build_transaction_event(self):
     manager = EventManager(load_data("transaction"))
     manager.normalize()
     return manager.save(self.project.id)
示例#54
0
def save_event(cache_key=None,
               data=None,
               start_time=None,
               event_id=None,
               project_id=None,
               **kwargs):
    """
    Saves an event to the database.
    """
    from sentry.event_manager import HashDiscarded, EventManager
    from sentry import quotas, tsdb
    from sentry.models import ProjectKey

    if cache_key:
        data = default_cache.get(cache_key)

    if event_id is None and data is not None:
        event_id = data['event_id']

    # only when we come from reprocessing we get a project_id sent into
    # the task.
    if project_id is None:
        project_id = data.pop('project')

    delete_raw_event(project_id, event_id, allow_hint_clear=True)

    # This covers two cases: where data is None because we did not manage
    # to fetch it from the default cache or the empty dictionary was
    # stored in the default cache.  The former happens if the event
    # expired while being on the queue, the second happens on reprocessing
    # if the raw event was deleted concurrently while we held on to
    # it.  This causes the node store to delete the data and we end up
    # fetching an empty dict.  We could in theory not invoke `save_event`
    # in those cases but it's important that we always clean up the
    # reprocessing reports correctly or they will screw up the UI.  So
    # to future proof this correctly we just handle this case here.
    if not data:
        metrics.incr('events.failed',
                     tags={
                         'reason': 'cache',
                         'stage': 'post'
                     })
        return

    Raven.tags_context({
        'project': project_id,
    })

    try:
        manager = EventManager(data)
        manager.save(project_id)
    except HashDiscarded:
        increment_list = [
            (tsdb.models.project_total_received_discarded, project_id),
        ]

        try:
            project = Project.objects.get_from_cache(id=project_id)
        except Project.DoesNotExist:
            pass
        else:
            increment_list.extend([
                (tsdb.models.project_total_blacklisted, project.id),
                (tsdb.models.organization_total_blacklisted,
                 project.organization_id),
            ])

            project_key = None
            if data.get('key_id') is not None:
                try:
                    project_key = ProjectKey.objects.get_from_cache(
                        id=data['key_id'])
                except ProjectKey.DoesNotExist:
                    pass
                else:
                    increment_list.append(
                        (tsdb.models.key_total_blacklisted, project_key.id))

            quotas.refund(
                project,
                key=project_key,
                timestamp=start_time,
            )

        tsdb.incr_multi(
            increment_list,
            timestamp=to_datetime(start_time)
            if start_time is not None else None,
        )

    finally:
        if cache_key:
            default_cache.delete(cache_key)
        if start_time:
            metrics.timing('events.time-to-process',
                           time() - start_time,
                           instance=data['platform'])
示例#55
0
 def test_transaction_as_culprit(self):
     manager = EventManager(make_event(transaction="foobar"))
     manager.normalize()
     event = manager.save(1)
     assert event.transaction == "foobar"
     assert event.culprit == "foobar"
示例#56
0
 def test_inferred_culprit_from_empty_stacktrace(self):
     manager = EventManager(make_event(stacktrace={"frames": []}))
     manager.normalize()
     event = manager.save(1)
     assert event.culprit == ""
示例#57
0
 def test_transaction_and_culprit(self):
     manager = EventManager(make_event(transaction="foobar", culprit="baz"))
     manager.normalize()
     event1 = manager.save(1)
     assert event1.transaction == "foobar"
     assert event1.culprit == "baz"
示例#58
0
def alert(request):
    platform = request.GET.get("platform", "python")
    org = Organization(id=1, slug="example", name="Example")
    project = Project(id=1, slug="example", name="Example", organization=org)

    random = get_random(request)
    group = next(make_group_generator(random, project))

    data = dict(load_data(platform))
    data["message"] = group.message
    data["event_id"] = "44f1419e73884cd2b45c79918f4b6dc4"
    data.pop("logentry", None)
    data["environment"] = "prod"
    data["tags"] = [
        ("logger", "javascript"),
        ("environment", "prod"),
        ("level", "error"),
        ("device", "Other"),
    ]

    event_manager = EventManager(data)
    event_manager.normalize()
    data = event_manager.get_data()
    event = event_manager.save(project.id)
    # Prevent CI screenshot from constantly changing
    event.data["timestamp"] = 1504656000.0  # datetime(2017, 9, 6, 0, 0)
    event_type = get_event_type(event.data)

    group.message = event.search_message
    group.data = {
        "type": event_type.key,
        "metadata": event_type.get_metadata(data)
    }

    rule = Rule(label="An example rule")

    # XXX: this interface_list code needs to be the same as in
    #      src/sentry/mail/adapter.py
    interface_list = []
    for interface in six.itervalues(event.interfaces):
        body = interface.to_email_html(event)
        if not body:
            continue
        text_body = interface.to_string(event)
        interface_list.append(
            (interface.get_title(), mark_safe(body), text_body))

    return MailPreview(
        html_template="sentry/emails/error.html",
        text_template="sentry/emails/error.txt",
        context={
            "rule":
            rule,
            "group":
            group,
            "event":
            event,
            "link":
            "http://example.com/link",
            "interfaces":
            interface_list,
            "tags":
            event.tags,
            "project_label":
            project.slug,
            "commits": [{
                # TODO(dcramer): change to use serializer
                "repository": {
                    "status": "active",
                    "name": "Example Repo",
                    "url": "https://github.com/example/example",
                    "dateCreated": "2018-02-28T23:39:22.402Z",
                    "provider": {
                        "id": "github",
                        "name": "GitHub"
                    },
                    "id": "1",
                },
                "score": 2,
                "subject": "feat: Do something to raven/base.py",
                "message":
                "feat: Do something to raven/base.py\naptent vivamus vehicula tempus volutpat hac tortor",
                "id": "1b17483ffc4a10609e7921ee21a8567bfe0ed006",
                "shortId": "1b17483",
                "author": {
                    "username":
                    "******",
                    "isManaged":
                    False,
                    "lastActive":
                    "2018-03-01T18:25:28.149Z",
                    "id":
                    "1",
                    "isActive":
                    True,
                    "has2fa":
                    False,
                    "name":
                    "*****@*****.**",
                    "avatarUrl":
                    "https://secure.gravatar.com/avatar/51567a4f786cd8a2c41c513b592de9f9?s=32&d=mm",
                    "dateJoined":
                    "2018-02-27T22:04:32.847Z",
                    "emails": [{
                        "is_verified": False,
                        "id": "1",
                        "email": "*****@*****.**"
                    }],
                    "avatar": {
                        "avatarUuid": None,
                        "avatarType": "letter_avatar"
                    },
                    "lastLogin":
                    "******",
                    "email":
                    "*****@*****.**",
                },
            }],
        },
    ).render(request)
示例#59
0
 def test_culprit_is_not_transaction(self):
     manager = EventManager(make_event(culprit="foobar"))
     manager.normalize()
     event1 = manager.save(1)
     assert event1.transaction is None
     assert event1.culprit == "foobar"