Ejemplo n.º 1
0
    def test_transfer_to_team(self):
        from_org = self.create_organization()
        from_team = self.create_team(organization=from_org)
        to_org = self.create_organization()
        to_team = self.create_team(organization=to_org)

        project = self.create_project(teams=[from_team])

        rule = Rule.objects.create(
            project=project,
            environment_id=Environment.get_or_create(project, 'production').id,
            label='Golden Rule',
            data={},
        )

        project.transfer_to(team=to_team)

        project = Project.objects.get(id=project.id)

        assert project.teams.count() == 1
        assert project.teams.first() == to_team
        assert project.organization_id == to_org.id

        updated_rule = project.rule_set.get(label='Golden Rule')
        assert updated_rule.id == rule.id
        assert updated_rule.environment_id != rule.environment_id
        assert updated_rule.environment_id == Environment.get_or_create(project, 'production').id
Ejemplo n.º 2
0
    def test_multiple_environments(self):
        group = self.create_group()
        self.login_as(user=self.user)

        environment = Environment.get_or_create(group.project, 'production')
        environment2 = Environment.get_or_create(group.project, 'staging')

        url = u'/api/0/issues/{}/?enable_snuba=1'.format(group.id)

        from sentry.api.endpoints.group_details import tsdb

        with mock.patch(
                'sentry.api.endpoints.group_details.tsdb.get_range',
                side_effect=tsdb.get_range) as get_range:
            response = self.client.get(
                '%s&environment=production&environment=staging' % (url,),
                format='json'
            )
            assert response.status_code == 200
            assert get_range.call_count == 2
            for args, kwargs in get_range.call_args_list:
                assert kwargs['environment_ids'] == [environment.id, environment2.id]

        response = self.client.get('%s&environment=invalid' % (url,), format='json')
        assert response.status_code == 404
Ejemplo n.º 3
0
    def test_simple(self):
        project = self.create_project()

        with pytest.raises(Environment.DoesNotExist):
            Environment.get_for_organization_id(
                project.organization_id,
                'prod',
            )

        env = Environment.get_or_create(
            project=project,
            name='prod',
        )

        assert env.name == 'prod'
        assert env.projects.first().id == project.id

        env2 = Environment.get_or_create(
            project=project,
            name='prod',
        )

        assert env2.id == env.id

        with self.assertNumQueries(0):
            assert Environment.get_for_organization_id(
                project.organization_id,
                'prod',
            ).id == env.id
Ejemplo n.º 4
0
    def test_with_environment(self):
        self.login_as(user=self.user)

        project = self.create_project()

        Environment.get_or_create(
            project,
            'production',
        )

        conditions = [
            {
                'id': 'sentry.rules.conditions.first_seen_event.FirstSeenEventCondition',
                'key': 'foo',
                'match': 'eq',
                'value': 'bar',
            }
        ]

        actions = [{'id': 'sentry.rules.actions.notify_event.NotifyEventAction'}]

        url = reverse(
            'sentry-api-0-project-rules',
            kwargs={
                'organization_slug': project.organization.slug,
                'project_slug': project.slug,
            }
        )
        response = self.client.post(
            url,
            data={
                'name': 'hello world',
                'environment': 'production',
                'conditions': conditions,
                'actions': actions,
                'actionMatch': 'any',
                'frequency': 30,
            },
            format='json'
        )

        assert response.status_code == 200, response.content
        assert response.data['id']
        assert response.data['environment'] == 'production'

        rule = Rule.objects.get(id=response.data['id'])
        assert rule.label == 'hello world'
        assert rule.environment_id == Environment.get_or_create(
            rule.project,
            'production',
        ).id
Ejemplo n.º 5
0
    def test_environment(self):
        group = self.create_group()
        self.login_as(user=self.user)

        environment = Environment.get_or_create(group.project, 'production')

        url = u'/api/0/issues/{}/'.format(group.id)

        from sentry.api.endpoints.group_details import tsdb

        with mock.patch(
                'sentry.api.endpoints.group_details.tsdb.get_range',
                side_effect=tsdb.get_range) as get_range:
            response = self.client.get(url, {'environment': 'production'}, format='json')
            assert response.status_code == 200
            assert get_range.call_count == 2
            for args, kwargs in get_range.call_args_list:
                assert kwargs['environment_ids'] == [environment.id]

        with mock.patch(
                'sentry.api.endpoints.group_details.tsdb.make_series',
                side_effect=tsdb.make_series) as make_series:
            response = self.client.get(url, {'environment': 'invalid'}, format='json')
            assert response.status_code == 200
            assert make_series.call_count == 2
Ejemplo n.º 6
0
    def test_environment(self):
        group = self.group

        environment = Environment.get_or_create(group.project, 'production')

        with mock.patch(
                'sentry.api.serializers.models.group.snuba_tsdb.get_range',
                side_effect=snuba_tsdb.get_range) as get_range:
            serialize(
                [group],
                serializer=StreamGroupSerializerSnuba(
                    environment_ids=[environment.id],
                    stats_period='14d',
                ),
            )
            assert get_range.call_count == 1
            for args, kwargs in get_range.call_args_list:
                assert kwargs['environment_ids'] == [environment.id]

        with mock.patch(
                'sentry.api.serializers.models.group.snuba_tsdb.get_range',
                side_effect=snuba_tsdb.get_range) as get_range:
            serialize(
                [group],
                serializer=StreamGroupSerializerSnuba(
                    environment_ids=None,
                    stats_period='14d',
                )
            )
            assert get_range.call_count == 1
            for args, kwargs in get_range.call_args_list:
                assert kwargs['environment_ids'] is None
Ejemplo n.º 7
0
    def test_with_environment(self):
        self.login_as(user=self.user)

        team = self.create_team()
        project1 = self.create_project(teams=[team], name='foo')
        self.create_project(teams=[team], name='bar')

        rule = project1.rule_set.all()[0]
        rule.update(
            environment_id=Environment.get_or_create(
                rule.project,
                'production',
            ).id,
        )

        url = reverse(
            'sentry-api-0-project-rule-details',
            kwargs={
                'organization_slug': project1.organization.slug,
                'project_slug': project1.slug,
                'rule_id': rule.id,
            }
        )
        response = self.client.get(url, format='json')

        assert response.status_code == 200, response.content
        assert response.data['id'] == six.text_type(rule.id)
        assert response.data['environment'] == 'production'
Ejemplo n.º 8
0
    def test_environment(self):
        group = self.group

        environment = Environment.get_or_create(group.project, 'production')

        from sentry.api.serializers.models.group import tsdb

        with mock.patch(
                'sentry.api.serializers.models.group.tsdb.get_range',
                side_effect=tsdb.get_range) as get_range:
            serialize(
                [group],
                serializer=StreamGroupSerializer(
                    environment_func=lambda: environment,
                    stats_period='14d',
                ),
            )
            assert get_range.call_count == 1
            for args, kwargs in get_range.call_args_list:
                assert kwargs['environment_id'] == environment.id

        def get_invalid_environment():
            raise Environment.DoesNotExist()

        with mock.patch(
                'sentry.api.serializers.models.group.tsdb.make_series',
                side_effect=tsdb.make_series) as make_series:
            serialize(
                [group],
                serializer=StreamGroupSerializer(
                    environment_func=get_invalid_environment,
                    stats_period='14d',
                )
            )
            assert make_series.call_count == 1
Ejemplo n.º 9
0
 def create_environment(self, **kwargs):
     project = kwargs.get('project', self.project)
     name = kwargs.get('name', petname.Generate(1, ' ', letters=10))
     return Environment.get_or_create(
         project=project,
         name=name
     )
Ejemplo n.º 10
0
    def test_simple(self):
        project = self.create_project()

        env = Environment.get_or_create(
            project=project,
            name='prod',
        )

        assert env.name == 'prod'
        assert env.projects.first().id == project.id

        env2 = Environment.get_or_create(
            project=project,
            name='prod',
        )

        assert env2.id == env.id
Ejemplo n.º 11
0
    def test_environment(self):
        self.login_as(user=self.user)

        group = self.create_group()
        events = {}

        for name in ['production', 'development']:
            environment = Environment.get_or_create(group.project, name)

            tagstore.get_or_create_tag_key(
                project_id=group.project_id,
                environment_id=environment.id,
                key='environment',
            )

            tagstore.create_tag_value(
                project_id=group.project_id,
                environment_id=environment.id,
                key='environment',
                value=name,
            )

            events[name] = event = self.create_event(
                group=group,
                tags={'environment': name},
            )

            tagstore.create_event_tags(
                project_id=group.project_id,
                group_id=group.id,
                environment_id=environment.id,
                event_id=event.id,
                tags=[
                    ('environment', name),
                ],
            )

        url = u'/api/0/issues/{}/events/'.format(group.id)
        response = self.client.get(url + '?environment=production', format='json')

        assert response.status_code == 200, response.content
        assert set(map(lambda x: x['id'], response.data)) == set([
            six.text_type(events['production'].id),
        ])

        url = u'/api/0/issues/{}/events/'.format(group.id)
        response = self.client.get(url + '?environment=invalid', format='json')

        assert response.status_code == 200, response.content
        assert response.data == []

        url = u'/api/0/issues/{}/events/'.format(group.id)
        response = self.client.get(
            url + '?environment=production&query=environment:development',
            format='json')

        assert response.status_code == 200, response.content
        assert response.data == []
Ejemplo n.º 12
0
    def get_environment(self):
        from sentry.models import Environment
        if not hasattr(self, '_environment_cache'):
            self._environment_cache = Environment.objects.get(
                organization_id=self.project.organization_id,
                name=Environment.get_name_or_default(self.get_tag('environment')),
            )

        return self._environment_cache
Ejemplo n.º 13
0
    def get(self, request, project, environment):
        try:
            instance = EnvironmentProject.objects.select_related('environment').get(
                project=project,
                environment__name=Environment.get_name_from_path_segment(environment),
            )
        except EnvironmentProject.DoesNotExist:
            raise ResourceDoesNotExist

        return Response(serialize(instance, request.user))
Ejemplo n.º 14
0
    def test_with_environment(self):
        self.login_as(user=self.user)

        project = self.create_project()

        Environment.get_or_create(
            project,
            'production',
        )

        rule = Rule.objects.create(project=project, label='foo')

        url = reverse(
            'sentry-api-0-project-rule-details',
            kwargs={
                'organization_slug': project.organization.slug,
                'project_slug': project.slug,
                'rule_id': rule.id,
            }
        )
        response = self.client.put(
            url,
            data={
                'name': 'hello world',
                'environment': 'production',
                'actionMatch': 'any',
                'actions': [],
                'conditions': []
            },
            format='json'
        )

        assert response.status_code == 200, response.content
        assert response.data['id'] == six.text_type(rule.id)
        assert response.data['environment'] == 'production'

        rule = Rule.objects.get(id=rule.id)
        assert rule.label == 'hello world'
        assert rule.environment_id == Environment.get_or_create(
            rule.project,
            'production',
        ).id
Ejemplo n.º 15
0
    def _setup_tags_for_event(self, event):
        tags = dict(event.data['tags'])

        try:
            environment = self.environments[tags['environment']]
        except KeyError:
            environment = self.environments[tags['environment']] = Environment.get_or_create(
                event.project,
                tags['environment'],
            )

        GroupEnvironment.objects.get_or_create(
            environment_id=environment.id,
            group_id=event.group_id,
        )

        for key, value in tags.items():
            for environment_id in [AGGREGATE_ENVIRONMENT_ID, environment.id]:
                tag_value, created = tagstore.get_or_create_group_tag_value(
                    project_id=event.project_id,
                    group_id=event.group_id,
                    environment_id=environment_id,
                    key=key,
                    value=value,
                )

                if created:  # XXX: Hack for tagstore compat
                    tag_value.update(
                        times_seen=1,
                        first_seen=event.datetime,
                        last_seen=event.datetime,
                    )
                else:
                    updates = {
                        'times_seen': tag_value.times_seen + 1,
                    }

                    if event.datetime < tag_value.first_seen:
                        updates['first_seen'] = event.datetime

                    if event.datetime > tag_value.last_seen:
                        updates['last_seen'] = event.datetime

                    if updates:
                        tag_value.update(**updates)

                tagstore.create_event_tags(
                    project_id=event.project_id,
                    group_id=event.group_id,
                    environment_id=environment_id,
                    event_id=event.id,
                    tags=tags.items(),
                    date_added=event.datetime,
                )
Ejemplo n.º 16
0
    def test_get_environment(self):
        environment = Environment.get_or_create(self.project, 'production')
        event = self.create_event(
            data={'tags': [
                ('environment', 'production'),
            ]}
        )

        event.get_environment() == environment

        with self.assertNumQueries(0):
            event.get_environment() == environment
Ejemplo n.º 17
0
    def test_get_environment(self):
        environment = Environment.get_or_create(self.project, 'production')
        event = self.store_event(
            data={
                'environment': 'production'
            },
            project_id=self.project.id
        )

        assert event.get_environment() == environment

        with self.assertNumQueries(0):
            event.get_environment() == environment
Ejemplo n.º 18
0
    def create_event(self, *args, **kwargs):
        event = super(SnubaSearchTest, self).create_event(*args, **kwargs)

        data = event.data.data
        tags = dict(data.get('tags', []))

        if tags['environment'] not in self.environments:
            self.environments[tags['environment']] = Environment.get_or_create(
                event.project,
                tags['environment'],
            )

        return event
Ejemplo n.º 19
0
def _get_event_environment(event, project, cache):
    from sentry.models import Environment

    environment_name = event.get_tag('environment')

    if environment_name not in cache:
        try:
            environment = Environment.get_for_organization_id(
                project.organization_id, environment_name)
        except Environment.DoesNotExist:
            logger.warn(
                'event.environment.does_not_exist',
                extra={
                    'project_id': project.id,
                    'environment_name': environment_name,
                }
            )
            environment = Environment.get_or_create(project, environment_name)

        cache[environment_name] = environment

    return cache[environment_name]
Ejemplo n.º 20
0
    def _get_environment_from_request(self, request, organization_id):
        if not hasattr(request, '_cached_environment'):
            environment_param = request.GET.get('environment')
            if environment_param is None:
                environment = None
            else:
                environment = Environment.get_for_organization_id(
                    name=environment_param,
                    organization_id=organization_id,
                )

            request._cached_environment = environment

        return request._cached_environment
Ejemplo n.º 21
0
    def validate_environment(self, attrs, source):
        name = attrs.get(source)
        if name is None:
            return attrs

        try:
            attrs['environment'] = Environment.get_for_organization_id(
                self.context['project'].organization_id,
                name,
            ).id
        except Environment.DoesNotExist:
            raise serializers.ValidationError(u'This environment has not been created.')

        return attrs
Ejemplo n.º 22
0
    def create_event(self, *args, **kwargs):
        """\
        Takes the results from the existing `create_event` method and
        inserts into the local test Snuba cluster so that tests can be
        run against the same event data.

        Note that we create a GroupHash as necessary because `create_event`
        doesn't run them through the 'real' event pipeline. In a perfect
        world all test events would go through the full regular pipeline.
        """

        from sentry.event_manager import get_hashes_from_fingerprint, md5_from_hash

        event = super(SnubaTestCase, self).create_event(*args, **kwargs)

        data = event.data.data
        tags = dict(data.get('tags', []))

        if not data.get('received'):
            data['received'] = calendar.timegm(event.datetime.timetuple())

        if 'environment' in tags:
            environment = Environment.get_or_create(
                event.project,
                tags['environment'],
            )

            GroupEnvironment.objects.get_or_create(
                environment_id=environment.id,
                group_id=event.group_id,
            )

        hashes = get_hashes_from_fingerprint(
            event,
            data.get('fingerprint', ['{{ default }}']),
        )
        primary_hash = md5_from_hash(hashes[0])

        grouphash, _ = GroupHash.objects.get_or_create(
            project=event.project,
            group=event.group,
            hash=primary_hash,
        )

        self.snuba_insert(self.__wrap_event(event, data, grouphash.hash))

        return event
Ejemplo n.º 23
0
    def test_transfer_to_team_releases(self):
        from_org = self.create_organization()
        from_team = self.create_team(organization=from_org)
        to_org = self.create_organization()
        to_team = self.create_team(organization=to_org)

        project = self.create_project(teams=[from_team])

        environment = Environment.get_or_create(project, 'production')
        release = Release.get_or_create(project=project, version='1.0')

        ReleaseProjectEnvironment.objects.create(
            project=project,
            release=release,
            environment=environment,
        )

        assert ReleaseProjectEnvironment.objects.filter(
            project=project,
            release=release,
            environment=environment,
        ).exists()
        assert ReleaseProject.objects.filter(
            project=project,
            release=release,
        ).exists()

        project.transfer_to(team=to_team)

        project = Project.objects.get(id=project.id)

        assert project.teams.count() == 1
        assert project.teams.first() == to_team
        assert project.organization_id == to_org.id

        assert not ReleaseProjectEnvironment.objects.filter(
            project=project,
            release=release,
            environment=environment,
        ).exists()
        assert not ReleaseProject.objects.filter(
            project=project,
            release=release,
        ).exists()
Ejemplo n.º 24
0
    def test_tsdb(self):
        project = self.project
        manager = EventManager(self.make_event(
            fingerprint=['totally unique super duper fingerprint'],
            environment='totally unique super duper environment',
        ))
        event = manager.save(project)

        def query(model, key, **kwargs):
            return tsdb.get_sums(model, [key], event.datetime, event.datetime, **kwargs)[key]

        assert query(tsdb.models.project, project.id) == 1
        assert query(tsdb.models.group, event.group.id) == 1

        environment_id = Environment.get_for_organization_id(
            event.project.organization_id,
            'totally unique super duper environment',
        ).id
        assert query(tsdb.models.project, project.id, environment_id=environment_id) == 1
        assert query(tsdb.models.group, event.group.id, environment_id=environment_id) == 1
Ejemplo n.º 25
0
    def create_event(self, *args, **kwargs):
        """\
        Takes the results from the existing `create_event` method and
        inserts into the local test Snuba cluster so that tests can be
        run against the same event data.

        Note that we create a GroupHash as necessary because `create_event`
        doesn't run them through the 'real' event pipeline. In a perfect
        world all test events would go through the full regular pipeline.
        """
        # XXX: Use `store_event` instead of this!
        event = Factories.create_event(*args, **kwargs)

        data = event.data.data
        tags = dict(data.get('tags', []))

        if not data.get('received'):
            data['received'] = calendar.timegm(event.datetime.timetuple())

        if 'environment' in tags:
            environment = Environment.get_or_create(
                event.project,
                tags['environment'],
            )

            GroupEnvironment.objects.get_or_create(
                environment_id=environment.id,
                group_id=event.group_id,
            )

        primary_hash = event.get_primary_hash()

        grouphash, _ = GroupHash.objects.get_or_create(
            project=event.project,
            group=event.group,
            hash=primary_hash,
        )

        self.snuba_insert(self.__wrap_event(event, data, grouphash.hash))

        return event
Ejemplo n.º 26
0
    def put(self, request, project, environment):
        try:
            instance = EnvironmentProject.objects.select_related('environment').get(
                project=project,
                environment__name=Environment.get_name_from_path_segment(environment),
            )
        except EnvironmentProject.DoesNotExist:
            raise ResourceDoesNotExist

        serializer = ProjectEnvironmentSerializer(data=request.DATA, partial=True)
        if not serializer.is_valid():
            return Response(serializer.errors, status=400)

        data = serializer.object
        fields = {}

        if 'isHidden' in data:
            fields['is_hidden'] = data['isHidden']

        if fields:
            instance.update(**fields)

        return Response(serialize(instance, request.user))
Ejemplo n.º 27
0
    def test_simple(self):
        org = self.create_organization()
        commit = Commit.objects.create(organization_id=org.id, repository_id=5)
        commit2 = Commit.objects.create(organization_id=org.id, repository_id=6)

        # merge to
        project = self.create_project(organization=org, name='foo')
        environment = Environment.get_or_create(project=project, name='env1')
        release = Release.objects.create(version='abcdabc', organization=org)
        release.add_project(project)
        release_commit = ReleaseCommit.objects.create(
            organization_id=org.id, release=release, commit=commit, order=1
        )
        release_environment = ReleaseEnvironment.objects.create(
            organization_id=org.id, project_id=project.id, release_id=release.id, environment_id=environment.id
        )
        release_project_environment = ReleaseProjectEnvironment.objects.create(
            release_id=release.id, project_id=project.id, environment_id=environment.id
        )
        group_release = GroupRelease.objects.create(
            project_id=project.id, release_id=release.id, group_id=1
        )
        group = self.create_group(project=project, first_release=release)
        group_resolution = GroupResolution.objects.create(group=group, release=release)

        # merge from #1
        project2 = self.create_project(organization=org, name='bar')
        environment2 = Environment.get_or_create(project=project2, name='env2')
        release2 = Release.objects.create(version='bbbbbbb', organization=org)
        release2.add_project(project2)
        release_commit2 = ReleaseCommit.objects.create(
            organization_id=org.id, release=release2, commit=commit, order=2
        )
        release_environment2 = ReleaseEnvironment.objects.create(
            organization_id=org.id,
            project_id=project2.id,
            release_id=release2.id,
            environment_id=environment2.id,
        )
        release_project_environment2 = ReleaseProjectEnvironment.objects.create(
            release_id=release2.id, project_id=project2.id, environment_id=environment2.id
        )
        group_release2 = GroupRelease.objects.create(
            project_id=project2.id, release_id=release2.id, group_id=2
        )
        group2 = self.create_group(project=project2, first_release=release2)
        group_resolution2 = GroupResolution.objects.create(group=group2, release=release2)

        # merge from #2
        project3 = self.create_project(organization=org, name='baz')
        environment3 = Environment.get_or_create(project=project3, name='env3')
        release3 = Release.objects.create(version='cccccc', organization=org)
        release3.add_project(project3)
        release_commit3 = ReleaseCommit.objects.create(
            organization_id=org.id, release=release2, commit=commit2, order=3
        )
        release_environment3 = ReleaseEnvironment.objects.create(
            organization_id=org.id,
            project_id=project3.id,
            release_id=release3.id,
            environment_id=environment3.id,
        )
        release_project_environment3 = ReleaseProjectEnvironment.objects.create(
            release_id=release3.id, project_id=project3.id, environment_id=environment3.id
        )
        group_release3 = GroupRelease.objects.create(
            project_id=project3.id, release_id=release3.id, group_id=3
        )
        group3 = self.create_group(project=project3, first_release=release3)
        group_resolution3 = GroupResolution.objects.create(group=group3, release=release3)

        Release.merge(release, [release2, release3])

        # ReleaseCommit.release
        assert ReleaseCommit.objects.get(id=release_commit.id).release == release
        # should not exist because they referenced the same commit
        assert not ReleaseCommit.objects.filter(id=release_commit2.id).exists()
        assert ReleaseCommit.objects.get(id=release_commit3.id).release == release

        # ReleaseEnvironment.release_id
        assert ReleaseEnvironment.objects.get(id=release_environment.id).release_id == release.id
        assert ReleaseEnvironment.objects.get(id=release_environment2.id).release_id == release.id
        assert ReleaseEnvironment.objects.get(id=release_environment3.id).release_id == release.id

        # ReleaseProject.release
        assert release.projects.count() == 3
        assert ReleaseProject.objects.filter(release=release, project=project).exists()
        assert ReleaseProject.objects.filter(release=release, project=project2).exists()
        assert ReleaseProject.objects.filter(release=release, project=project3).exists()

        # ReleaseProjectEnvironment.release
        assert ReleaseProjectEnvironment.objects.get(
            id=release_project_environment.id).release_id == release.id
        assert ReleaseProjectEnvironment.objects.get(
            id=release_project_environment2.id).release_id == release.id
        assert ReleaseProjectEnvironment.objects.get(
            id=release_project_environment3.id).release_id == release.id

        # GroupRelease.release_id
        assert GroupRelease.objects.get(id=group_release.id).release_id == release.id
        assert GroupRelease.objects.get(id=group_release2.id).release_id == release.id
        assert GroupRelease.objects.get(id=group_release3.id).release_id == release.id

        # GroupResolution.release
        assert GroupResolution.objects.get(id=group_resolution.id).release == release
        assert GroupResolution.objects.get(id=group_resolution2.id).release == release
        assert GroupResolution.objects.get(id=group_resolution3.id).release == release

        # Group.first_release
        assert Group.objects.get(id=group.id).first_release == release
        assert Group.objects.get(id=group2.id).first_release == release
        assert Group.objects.get(id=group3.id).first_release == release

        # Releases are gone
        assert Release.objects.filter(id=release.id).exists()
        assert not Release.objects.filter(id=release2.id).exists()
        assert not Release.objects.filter(id=release3.id).exists()
Ejemplo n.º 28
0
def _get_or_create_environment_many(jobs, projects):
    for job in jobs:
        job["environment"] = Environment.get_or_create(
            project=projects[job["project_id"]], name=job["environment"]
        )
Ejemplo n.º 29
0
    def test_simple(self):
        org = self.create_organization()
        commit = Commit.objects.create(organization_id=org.id, repository_id=5)
        commit2 = Commit.objects.create(organization_id=org.id,
                                        repository_id=6)

        # merge to
        project = self.create_project(organization=org, name="foo")
        environment = Environment.get_or_create(project=project, name="env1")
        release = Release.objects.create(version="abcdabc", organization=org)
        release.add_project(project)
        release_commit = ReleaseCommit.objects.create(organization_id=org.id,
                                                      release=release,
                                                      commit=commit,
                                                      order=1)
        release_environment = ReleaseEnvironment.objects.create(
            organization_id=org.id,
            project_id=project.id,
            release_id=release.id,
            environment_id=environment.id,
        )
        release_project_environment = ReleaseProjectEnvironment.objects.create(
            release_id=release.id,
            project_id=project.id,
            environment_id=environment.id)
        group_release = GroupRelease.objects.create(project_id=project.id,
                                                    release_id=release.id,
                                                    group_id=1)
        group = self.create_group(project=project, first_release=release)
        group_resolution = GroupResolution.objects.create(group=group,
                                                          release=release)

        # merge from #1
        project2 = self.create_project(organization=org, name="bar")
        environment2 = Environment.get_or_create(project=project2, name="env2")
        release2 = Release.objects.create(version="bbbbbbb", organization=org)
        release2.add_project(project2)
        release_commit2 = ReleaseCommit.objects.create(organization_id=org.id,
                                                       release=release2,
                                                       commit=commit,
                                                       order=2)
        release_environment2 = ReleaseEnvironment.objects.create(
            organization_id=org.id,
            project_id=project2.id,
            release_id=release2.id,
            environment_id=environment2.id,
        )
        release_project_environment2 = ReleaseProjectEnvironment.objects.create(
            release_id=release2.id,
            project_id=project2.id,
            environment_id=environment2.id)
        group_release2 = GroupRelease.objects.create(project_id=project2.id,
                                                     release_id=release2.id,
                                                     group_id=2)
        group2 = self.create_group(project=project2, first_release=release2)
        group_resolution2 = GroupResolution.objects.create(group=group2,
                                                           release=release2)

        # merge from #2
        project3 = self.create_project(organization=org, name="baz")
        environment3 = Environment.get_or_create(project=project3, name="env3")
        release3 = Release.objects.create(version="cccccc", organization=org)
        release3.add_project(project3)
        release_commit3 = ReleaseCommit.objects.create(organization_id=org.id,
                                                       release=release2,
                                                       commit=commit2,
                                                       order=3)
        release_environment3 = ReleaseEnvironment.objects.create(
            organization_id=org.id,
            project_id=project3.id,
            release_id=release3.id,
            environment_id=environment3.id,
        )
        release_project_environment3 = ReleaseProjectEnvironment.objects.create(
            release_id=release3.id,
            project_id=project3.id,
            environment_id=environment3.id)
        group_release3 = GroupRelease.objects.create(project_id=project3.id,
                                                     release_id=release3.id,
                                                     group_id=3)
        group3 = self.create_group(project=project3, first_release=release3)
        group_resolution3 = GroupResolution.objects.create(group=group3,
                                                           release=release3)

        Release.merge(release, [release2, release3])

        # ReleaseCommit.release
        assert ReleaseCommit.objects.get(
            id=release_commit.id).release == release
        # should not exist because they referenced the same commit
        assert not ReleaseCommit.objects.filter(id=release_commit2.id).exists()
        assert ReleaseCommit.objects.get(
            id=release_commit3.id).release == release

        # ReleaseEnvironment.release_id
        assert ReleaseEnvironment.objects.get(
            id=release_environment.id).release_id == release.id
        assert ReleaseEnvironment.objects.get(
            id=release_environment2.id).release_id == release.id
        assert ReleaseEnvironment.objects.get(
            id=release_environment3.id).release_id == release.id

        # ReleaseProject.release
        assert release.projects.count() == 3
        assert ReleaseProject.objects.filter(release=release,
                                             project=project).exists()
        assert ReleaseProject.objects.filter(release=release,
                                             project=project2).exists()
        assert ReleaseProject.objects.filter(release=release,
                                             project=project3).exists()

        # ReleaseProjectEnvironment.release
        assert (ReleaseProjectEnvironment.objects.get(
            id=release_project_environment.id).release_id == release.id)
        assert (ReleaseProjectEnvironment.objects.get(
            id=release_project_environment2.id).release_id == release.id)
        assert (ReleaseProjectEnvironment.objects.get(
            id=release_project_environment3.id).release_id == release.id)

        # GroupRelease.release_id
        assert GroupRelease.objects.get(
            id=group_release.id).release_id == release.id
        assert GroupRelease.objects.get(
            id=group_release2.id).release_id == release.id
        assert GroupRelease.objects.get(
            id=group_release3.id).release_id == release.id

        # GroupResolution.release
        assert GroupResolution.objects.get(
            id=group_resolution.id).release == release
        assert GroupResolution.objects.get(
            id=group_resolution2.id).release == release
        assert GroupResolution.objects.get(
            id=group_resolution3.id).release == release

        # Group.first_release
        assert Group.objects.get(id=group.id).first_release == release
        assert Group.objects.get(id=group2.id).first_release == release
        assert Group.objects.get(id=group3.id).first_release == release

        # Releases are gone
        assert Release.objects.filter(id=release.id).exists()
        assert not Release.objects.filter(id=release2.id).exists()
        assert not Release.objects.filter(id=release3.id).exists()
Ejemplo n.º 30
0
    def save(self, project, raw=False):
        from sentry.tasks.post_process import index_event_tags

        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        level = data.pop('level')

        culprit = data.pop('culprit', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        fingerprint = data.pop('fingerprint', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)
        environment = data.pop('environment', None)

        # unused
        time_spent = data.pop('time_spent', None)
        message = data.pop('message', '')

        if not culprit:
            culprit = generate_culprit(data, platform=platform)

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'platform': platform,
        }

        event = Event(project_id=project.id,
                      event_id=event_id,
                      data=data,
                      time_spent=time_spent,
                      datetime=date,
                      **kwargs)

        tags = data.get('tags') or []
        tags.append(('level', LOG_LEVELS[level]))
        if logger_name:
            tags.append(('logger', logger_name))
        if server_name:
            tags.append(('server_name', server_name))
        if site:
            tags.append(('site', site))
        if release:
            # TODO(dcramer): we should ensure we create Release objects
            tags.append(('sentry:release', release))
        if environment:
            tags.append(('environment', environment))

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags,
                                      event,
                                      _with_transaction=False)
            if added_tags:
                tags.extend(added_tags)

        event_user = self._get_event_user(project, data)
        if event_user:
            tags.append(('sentry:user', event_user.tag_value))

        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        data['fingerprint'] = fingerprint or ['{{ default }}']

        for path, iface in event.interfaces.iteritems():
            data['tags'].extend(iface.iter_tags())
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.get_path(), None)

        # prioritize fingerprint over checksum as its likely the client defaulted
        # a checksum whereas the fingerprint was explicit
        if fingerprint:
            hashes = map(md5_from_hash,
                         get_hashes_from_fingerprint(event, fingerprint))
        elif checksum:
            hashes = [checksum]
        else:
            hashes = map(md5_from_hash, get_hashes_for_event(event))

        # TODO(dcramer): temp workaround for complexity
        data['message'] = message
        event_type = eventtypes.get(data.get('type', 'default'))(data)
        event_metadata = event_type.get_metadata()
        # TODO(dcramer): temp workaround for complexity
        del data['message']

        data['type'] = event_type.key
        data['metadata'] = event_metadata

        # index components into ``Event.message``
        # See GH-3248
        if event_type.key != 'default':
            if 'sentry.interfaces.Message' in data and \
                    data['sentry.interfaces.Message']['message'] != message:
                message = u'{} {}'.format(
                    message,
                    data['sentry.interfaces.Message']['message'],
                )

        if not message:
            message = ''
        elif not isinstance(message, basestring):
            message = force_text(message)

        for value in event_metadata.itervalues():
            value_u = force_text(value, errors='replace')
            if value_u not in message:
                message = u'{} {}'.format(message, value_u)

        message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH)

        event.message = message
        kwargs['message'] = message

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'data': {
                'last_received':
                event.data.get('received')
                or float(event.datetime.strftime('%s')),
                'type':
                event_type.key,
                # we cache the events metadata on the group to ensure its
                # accessible in the stream
                'metadata':
                event_metadata,
            },
        })

        if release:
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            group_kwargs['first_release'] = release

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event, hashes=hashes, release=release, **group_kwargs)

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic(using=router.db_for_write(EventMapping)):
                EventMapping.objects.create(project=project,
                                            group=group,
                                            event_id=event_id)
        except IntegrityError:
            self.logger.info('Duplicate EventMapping found for event_id=%s',
                             event_id,
                             exc_info=True)
            return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        tsdb.incr_multi([
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ],
                        timestamp=event.datetime)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]
        if release:
            frequencies.append((tsdb.models.frequent_releases_by_group, {
                group.id: {
                    grouprelease.id: 1,
                },
            }))

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info('Duplicate Event found for event_id=%s',
                                 event_id,
                                 exc_info=True)
                return event

            index_event_tags.delay(
                project_id=project.id,
                group_id=group.id,
                event_id=event.id,
                tags=tags,
            )

        if event_user:
            tsdb.record_multi((
                (tsdb.models.users_affected_by_group, group.id,
                 (event_user.tag_value, )),
                (tsdb.models.users_affected_by_project, project.id,
                 (event_user.tag_value, )),
            ),
                              timestamp=event.datetime)

        if is_new and release:
            buffer.incr(Release, {'new_groups': 1}, {
                'id': release.id,
            })

        safe_execute(Group.objects.add_tags,
                     group,
                     tags,
                     _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send(project=project,
                                          group=group,
                                          sender=Project)

            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info(
                'Raw event passed; skipping post process for event_id=%s',
                event_id)

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
Ejemplo n.º 31
0
    def save(self, project, raw=False):
        from sentry.tasks.post_process import index_event_tags
        data = self.data

        project = Project.objects.get_from_cache(id=project)

        # Check to make sure we're not about to do a bunch of work that's
        # already been done if we've processed an event with this ID. (This
        # isn't a perfect solution -- this doesn't handle ``EventMapping`` and
        # there's a race condition between here and when the event is actually
        # saved, but it's an improvement. See GH-7677.)
        try:
            event = Event.objects.get(
                project_id=project.id,
                event_id=data['event_id'],
            )
        except Event.DoesNotExist:
            pass
        else:
            self.logger.info(
                'duplicate.found',
                exc_info=True,
                extra={
                    'event_uuid': data['event_id'],
                    'project_id': project.id,
                    'model': Event.__name__,
                }
            )
            return event

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        level = data.pop('level')
        transaction_name = data.pop('transaction', None)
        culprit = data.pop('culprit', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        fingerprint = data.pop('fingerprint', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)
        dist = data.pop('dist', None)
        environment = data.pop('environment', None)

        # unused
        time_spent = data.pop('time_spent', None)
        message = data.pop('message', '')

        if not culprit:
            if transaction_name:
                culprit = transaction_name
            else:
                culprit = generate_culprit(data, platform=platform)

        culprit = force_text(culprit)
        if transaction_name:
            transaction_name = force_text(transaction_name)

        recorded_timestamp = data.pop('timestamp')
        date = datetime.fromtimestamp(recorded_timestamp)
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'platform': platform,
        }

        event = Event(
            project_id=project.id,
            event_id=event_id,
            data=data,
            time_spent=time_spent,
            datetime=date,
            **kwargs
        )
        event._project_cache = project
        data = event.data.data

        # convert this to a dict to ensure we're only storing one value per key
        # as most parts of Sentry dont currently play well with multiple values
        tags = dict(data.get('tags') or [])
        tags['level'] = LOG_LEVELS[level]
        if logger_name:
            tags['logger'] = logger_name
        if server_name:
            tags['server_name'] = server_name
        if site:
            tags['site'] = site
        if environment:
            tags['environment'] = environment
        if transaction_name:
            tags['transaction'] = transaction_name

        if release:
            # dont allow a conflicting 'release' tag
            if 'release' in tags:
                del tags['release']
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            tags['sentry:release'] = release.version

        if dist and release:
            dist = release.add_dist(dist, date)
            tags['sentry:dist'] = dist.name
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            if 'user' in tags:
                del tags['user']
            tags['sentry:user'] = event_user.tag_value

        # At this point we want to normalize the in_app values in case the
        # clients did not set this appropriately so far.
        normalize_in_app(data)

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False)
            if added_tags:
                # plugins should not override user provided tags
                for key, value in added_tags:
                    tags.setdefault(key, value)

        for path, iface in six.iteritems(event.interfaces):
            for k, v in iface.iter_tags():
                tags[k] = v
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.get_path(), None)

        # tags are stored as a tuple
        tags = tags.items()

        data['tags'] = tags
        data['fingerprint'] = fingerprint or ['{{ default }}']

        # prioritize fingerprint over checksum as its likely the client defaulted
        # a checksum whereas the fingerprint was explicit
        if fingerprint:
            hashes = [md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint)]
        elif checksum:
            if HASH_RE.match(checksum):
                hashes = [checksum]
            else:
                hashes = [md5_from_hash([checksum]), checksum]
            data['checksum'] = checksum
        else:
            hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)]

        # TODO(dcramer): temp workaround for complexity
        data['message'] = message
        event_type = eventtypes.get(data.get('type', 'default'))(data)
        event_metadata = event_type.get_metadata()
        # TODO(dcramer): temp workaround for complexity
        del data['message']

        data['type'] = event_type.key
        data['metadata'] = event_metadata

        # index components into ``Event.message``
        # See GH-3248
        if event_type.key != 'default':
            if 'sentry.interfaces.Message' in data and \
                    data['sentry.interfaces.Message']['message'] != message:
                message = u'{} {}'.format(
                    message,
                    data['sentry.interfaces.Message']['message'],
                )

        if not message:
            message = ''
        elif not isinstance(message, six.string_types):
            message = force_text(message)

        for value in six.itervalues(event_metadata):
            value_u = force_text(value, errors='replace')
            if value_u not in message:
                message = u'{} {}'.format(message, value_u)

        if culprit and culprit not in message:
            culprit_u = force_text(culprit, errors='replace')
            message = u'{} {}'.format(message, culprit_u)

        message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH)

        event.message = message
        kwargs['message'] = message

        received_timestamp = event.data.get('received') or float(event.datetime.strftime('%s'))
        group_kwargs = kwargs.copy()
        group_kwargs.update(
            {
                'culprit': culprit,
                'logger': logger_name,
                'level': level,
                'last_seen': date,
                'first_seen': date,
                'active_at': date,
                'data': {
                    'last_received': received_timestamp,
                    'type':
                    event_type.key,
                    # we cache the events metadata on the group to ensure its
                    # accessible in the stream
                    'metadata':
                    event_metadata,
                },
            }
        )

        if release:
            group_kwargs['first_release'] = release

        try:
            group, is_new, is_regression, is_sample = self._save_aggregate(
                event=event, hashes=hashes, release=release, **group_kwargs
            )
        except HashDiscarded:
            event_discarded.send_robust(
                project=project,
                sender=EventManager,
            )

            metrics.incr(
                'events.discarded',
                skip_internal=True,
                tags={
                    'organization_id': project.organization_id,
                    'platform': platform,
                },
            )
            raise
        else:
            event_saved.send_robust(
                project=project,
                event_size=event.size,
                sender=EventManager,
            )

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        # When an event was sampled, the canonical source of truth
        # is the EventMapping table since we aren't going to be writing out an actual
        # Event row. Otherwise, if the Event isn't being sampled, we can safely
        # rely on the Event table itself as the source of truth and ignore
        # EventMapping since it's redundant information.
        if is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(EventMapping)):
                    EventMapping.objects.create(project=project, group=group, event_id=event_id)
            except IntegrityError:
                self.logger.info(
                    'duplicate.found',
                    exc_info=True,
                    extra={
                        'event_uuid': event_id,
                        'project_id': project.id,
                        'group_id': group.id,
                        'model': EventMapping.__name__,
                    }
                )
                return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
            group_id=group.id,
            environment_id=environment.id,
            defaults={
                'first_release_id': release.id if release else None,
            },
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            ReleaseProjectEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        counters = [
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ]

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]

        if release:
            frequencies.append(
                (tsdb.models.frequent_releases_by_group, {
                    group.id: {
                        grouprelease.id: 1,
                    },
                })
            )

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(
            group=group,
            environment=environment,
        )

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info(
                    'duplicate.found',
                    exc_info=True,
                    extra={
                        'event_uuid': event_id,
                        'project_id': project.id,
                        'group_id': group.id,
                        'model': Event.__name__,
                    }
                )
                return event

            index_event_tags.delay(
                organization_id=project.organization_id,
                project_id=project.id,
                group_id=group.id,
                environment_id=environment.id,
                event_id=event.id,
                tags=tags,
                date_added=event.datetime,
            )

        if event_user:
            tsdb.record_multi(
                (
                    (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )),
                    (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )),
                ),
                timestamp=event.datetime,
                environment_id=environment.id,
            )
        if release:
            if is_new:
                buffer.incr(
                    ReleaseProject, {'new_groups': 1}, {
                        'release_id': release.id,
                        'project_id': project.id,
                    }
                )
            if is_new_group_environment:
                buffer.incr(
                    ReleaseProjectEnvironment, {'new_issues_count': 1}, {
                        'project_id': project.id,
                        'release_id': release.id,
                        'environment_id': environment.id,
                    }
                )

        safe_execute(Group.objects.add_tags, group, environment, tags, _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send_robust(project=project, group=group, sender=Project)

        eventstream.insert(
            group=group,
            event=event,
            is_new=is_new,
            is_sample=is_sample,
            is_regression=is_regression,
            is_new_group_environment=is_new_group_environment,
            primary_hash=hashes[0],
            # We are choosing to skip consuming the event back
            # in the eventstream if it's flagged as raw.
            # This means that we want to publish the event
            # through the event stream, but we don't care
            # about post processing and handling the commit.
            skip_consume=raw,
        )

        metrics.timing(
            'events.latency',
            received_timestamp - recorded_timestamp,
            tags={
                'project_id': project.id,
            },
        )

        return event
Ejemplo n.º 32
0
    def post(self, request, organization, version):
        """
        Create a Deploy
        ```````````````

        Create a deploy for a given release.

        :pparam string organization_slug: the organization short name
        :pparam string version: the version identifier of the release.
        :param string environment: the environment you're deploying to
        :param string name: the optional name of the deploy
        :param url url: the optional url that points to the deploy
        :param datetime dateStarted: an optional date that indicates when
                                     the deploy started
        :param datetime dateFinished: an optional date that indicates when
                                      the deploy ended. If not provided, the
                                      current time is used.
        """
        try:
            release = Release.objects.get(
                version=version,
                organization=organization,
            )
        except Release.DoesNotExist:
            raise ResourceDoesNotExist

        if not self.has_release_permission(request, organization, release):
            raise PermissionDenied

        serializer = DeploySerializer(data=request.DATA)

        if serializer.is_valid():
            result = serializer.object
            try:
                env = Environment.objects.get(
                    organization_id=organization.id,
                    name=result['environment'],
                )
            except Environment.DoesNotExist:
                # TODO(jess): clean up when changing unique constraint
                lock_key = Environment.get_lock_key(organization.id, result['environment'])
                lock = locks.get(lock_key, duration=5)
                with TimedRetryPolicy(10)(lock.acquire):
                    try:
                        env = Environment.objects.get(
                            organization_id=organization.id,
                            name=result['environment'],
                        )
                    except Environment.DoesNotExist:
                        env = Environment.objects.create(
                            organization_id=organization.id,
                            name=result['environment'],
                        )

            try:
                with transaction.atomic():
                    deploy, created = Deploy.objects.create(
                        organization_id=organization.id,
                        release=release,
                        environment_id=env.id,
                        date_finished=result.get('dateFinished', timezone.now()),
                        date_started=result.get('dateStarted'),
                        name=result.get('name'),
                        url=result.get('url'),
                    ), True
            except IntegrityError:
                deploy, created = Deploy.objects.get(
                    organization_id=organization.id,
                    release=release,
                    environment_id=env.id,
                ), False
                deploy.update(
                    date_finished=result.get('dateFinished', timezone.now()),
                    date_started=result.get('dateStarted'),
                )

            activity = None
            for project in release.projects.all():
                activity = Activity.objects.create(
                    type=Activity.DEPLOY,
                    project=project,
                    ident=release.version,
                    data={
                        'version': release.version,
                        'deploy_id': deploy.id,
                        'environment': env.name
                    },
                    datetime=deploy.date_finished,
                )
            # Somewhat hacky, only send notification for one
            # Deploy Activity record because it will cover all projects
            if activity is not None:
                activity.send_notification()

            # This is the closest status code that makes sense, and we want
            # a unique 2xx response code so people can understand when
            # behavior differs.
            #   208 Already Reported (WebDAV; RFC 5842)
            status = 201 if created else 208

            return Response(serialize(deploy, request.user), status=status)

        return Response(serializer.errors, status=400)
Ejemplo n.º 33
0
 def validate_environment(self, attrs, source):
     value = attrs[source]
     if not Environment.is_valid_name(value):
         raise serializers.ValidationError('Invalid value for environment')
     return attrs
Ejemplo n.º 34
0
    def save(self,
             project_id,
             raw=False,
             assume_normalized=False,
             cache_key=None):
        """
        We re-insert events with duplicate IDs into Snuba, which is responsible
        for deduplicating events. Since deduplication in Snuba is on the primary
        key (based on event ID, project ID and day), events with same IDs are only
        deduplicated if their timestamps fall on the same day. The latest event
        always wins and overwrites the value of events received earlier in that day.

        Since we increment counters and frequencies here before events get inserted
        to eventstream these numbers may be larger than the total number of
        events if we receive duplicate event IDs that fall on the same day
        (that do not hit cache first).
        """

        # Normalize if needed
        if not self._normalized:
            if not assume_normalized:
                self.normalize()
            self._normalized = True

        data = self._data

        with metrics.timer("event_manager.save.project.get_from_cache"):
            project = Project.objects.get_from_cache(id=project_id)

        with metrics.timer("event_manager.save.organization.get_from_cache"):
            project._organization_cache = Organization.objects.get_from_cache(
                id=project.organization_id)

        # Pull out the culprit
        culprit = self.get_culprit()

        # Pull the toplevel data we're interested in
        level = data.get("level")

        # TODO(mitsuhiko): this code path should be gone by July 2018.
        # This is going to be fine because no code actually still depends
        # on integers here.  When we need an integer it will be converted
        # into one later.  Old workers used to send integers here.
        if level is not None and isinstance(level, six.integer_types):
            level = LOG_LEVELS[level]

        transaction_name = data.get("transaction")
        logger_name = data.get("logger")
        release = data.get("release")
        dist = data.get("dist")
        environment = data.get("environment")
        recorded_timestamp = data.get("timestamp")

        # We need to swap out the data with the one internal to the newly
        # created event object
        event = self._get_event_instance(project_id=project_id)
        self._data = data = event.data.data

        event._project_cache = project

        date = event.datetime
        platform = event.platform
        event_id = event.event_id

        if transaction_name:
            transaction_name = force_text(transaction_name)

        # Right now the event type is the signal to skip the group. This
        # is going to change a lot.
        if event.get_event_type() == "transaction":
            issueless_event = True
        else:
            issueless_event = False

        # Some of the data that are toplevel attributes are duplicated
        # into tags (logger, level, environment, transaction).  These are
        # different from legacy attributes which are normalized into tags
        # ahead of time (site, server_name).
        setdefault_path(data, "tags", value=[])
        set_tag(data, "level", level)
        if logger_name:
            set_tag(data, "logger", logger_name)
        if environment:
            set_tag(data, "environment", environment)
        if transaction_name:
            set_tag(data, "transaction", transaction_name)

        if release:
            # dont allow a conflicting 'release' tag
            pop_tag(data, "release")
            release = Release.get_or_create(project=project,
                                            version=release,
                                            date_added=date)
            set_tag(data, "sentry:release", release.version)

        if dist and release:
            dist = release.add_dist(dist, date)
            # dont allow a conflicting 'dist' tag
            pop_tag(data, "dist")
            set_tag(data, "sentry:dist", dist.name)
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            pop_tag(data, "user")
            set_tag(data, "sentry:user", event_user.tag_value)

        with metrics.timer("event_manager.load_grouping_config"):
            # At this point we want to normalize the in_app values in case the
            # clients did not set this appropriately so far.
            grouping_config = load_grouping_config(
                get_grouping_config_dict_for_event_data(data, project))

        with metrics.timer("event_manager.normalize_stacktraces_for_grouping"):
            normalize_stacktraces_for_grouping(data, grouping_config)

        with metrics.timer("event_manager.plugins"):
            for plugin in plugins.for_project(project, version=None):
                added_tags = safe_execute(plugin.get_tags,
                                          event,
                                          _with_transaction=False)
                if added_tags:
                    # plugins should not override user provided tags
                    for key, value in added_tags:
                        if get_tag(data, key) is None:
                            set_tag(data, key, value)

        with metrics.timer("event_manager.set_tags"):
            for path, iface in six.iteritems(event.interfaces):
                for k, v in iface.iter_tags():
                    set_tag(data, k, v)
                # Get rid of ephemeral interface data
                if iface.ephemeral:
                    data.pop(iface.path, None)

        with metrics.timer("event_manager.apply_server_fingerprinting"):
            # The active grouping config was put into the event in the
            # normalize step before.  We now also make sure that the
            # fingerprint was set to `'{{ default }}' just in case someone
            # removed it from the payload.  The call to get_hashes will then
            # look at `grouping_config` to pick the right parameters.
            data["fingerprint"] = data.get("fingerprint") or ["{{ default }}"]
            apply_server_fingerprinting(
                data, get_fingerprinting_config_for_project(project))

        with metrics.timer("event_manager.event.get_hashes"):
            # Here we try to use the grouping config that was requested in the
            # event.  If that config has since been deleted (because it was an
            # experimental grouping config) we fall back to the default.
            try:
                hashes = event.get_hashes()
            except GroupingConfigNotFound:
                data["grouping_config"] = get_grouping_config_dict_for_project(
                    project)
                hashes = event.get_hashes()

        data["hashes"] = hashes

        with metrics.timer("event_manager.materialize_metadata"):
            # we want to freeze not just the metadata and type in but also the
            # derived attributes.  The reason for this is that we push this
            # data into kafka for snuba processing and our postprocessing
            # picks up the data right from the snuba topic.  For most usage
            # however the data is dynamically overridden by Event.title and
            # Event.location (See Event.as_dict)
            materialized_metadata = self.materialize_metadata()
            data.update(materialized_metadata)
            data["culprit"] = culprit

        received_timestamp = event.data.get("received") or float(
            event.datetime.strftime("%s"))

        if not issueless_event:
            # The group gets the same metadata as the event when it's flushed but
            # additionally the `last_received` key is set.  This key is used by
            # _save_aggregate.
            group_metadata = dict(materialized_metadata)
            group_metadata["last_received"] = received_timestamp
            kwargs = {
                "platform": platform,
                "message": event.search_message,
                "culprit": culprit,
                "logger": logger_name,
                "level": LOG_LEVELS_MAP.get(level),
                "last_seen": date,
                "first_seen": date,
                "active_at": date,
                "data": group_metadata,
            }

            if release:
                kwargs["first_release"] = release

            try:
                group, is_new, is_regression = self._save_aggregate(
                    event=event, hashes=hashes, release=release, **kwargs)
            except HashDiscarded:
                event_discarded.send_robust(project=project,
                                            sender=EventManager)

                metrics.incr(
                    "events.discarded",
                    skip_internal=True,
                    tags={
                        "organization_id": project.organization_id,
                        "platform": platform
                    },
                )
                raise
            event.group = group
        else:
            group = None
            is_new = False
            is_regression = False

        with metrics.timer("event_manager.event_saved_signal"):
            event_saved.send_robust(project=project,
                                    event_size=event.size,
                                    sender=EventManager)

        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        environment = Environment.get_or_create(project=project,
                                                name=environment)

        if group:
            group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
                group_id=group.id,
                environment_id=environment.id,
                defaults={"first_release": release if release else None},
            )
        else:
            is_new_group_environment = False

        if release:
            ReleaseEnvironment.get_or_create(project=project,
                                             release=release,
                                             environment=environment,
                                             datetime=date)

            ReleaseProjectEnvironment.get_or_create(project=project,
                                                    release=release,
                                                    environment=environment,
                                                    datetime=date)

            if group:
                grouprelease = GroupRelease.get_or_create(
                    group=group,
                    release=release,
                    environment=environment,
                    datetime=date)

        counters = [(tsdb.models.project, project.id)]

        if group:
            counters.append((tsdb.models.group, group.id))

        if release:
            counters.append((tsdb.models.release, release.id))

        with metrics.timer("event_manager.tsdb_incr_group_and_release_counters"
                           ) as metrics_tags:
            metrics_tags["has_group"] = "true" if group else "false"
            tsdb.incr_multi(counters,
                            timestamp=event.datetime,
                            environment_id=environment.id)

        frequencies = []

        if group:
            frequencies.append((tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1
                }
            }))

            if release:
                frequencies.append((tsdb.models.frequent_releases_by_group, {
                    group.id: {
                        grouprelease.id: 1
                    }
                }))
        if frequencies:
            tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        if group:
            UserReport.objects.filter(project=project,
                                      event_id=event_id).update(
                                          group=group, environment=environment)

        # Enusre the _metrics key exists. This is usually created during
        # and prefilled with ingestion sizes.
        event_metrics = event.data.get("_metrics") or {}
        event.data["_metrics"] = event_metrics

        # Capture the actual size that goes into node store.
        event_metrics["bytes.stored.event"] = len(
            json.dumps(dict(event.data.items())))

        if not issueless_event:
            # Load attachments first, but persist them at the very last after
            # posting to eventstream to make sure all counters and eventstream are
            # incremented for sure.
            attachments = self.get_attachments(cache_key, event)
            for attachment in attachments:
                key = "bytes.stored.%s" % (attachment.type, )
                event_metrics[key] = (event_metrics.get(key) or 0) + len(
                    attachment.data)

        # Write the event to Nodestore
        event.data.save()

        if event_user:
            counters = [(tsdb.models.users_affected_by_project, project.id,
                         (event_user.tag_value, ))]

            if group:
                counters.append((tsdb.models.users_affected_by_group, group.id,
                                 (event_user.tag_value, )))

            with metrics.timer("event_manager.tsdb_record_users_affected"
                               ) as metrics_tags:
                metrics_tags["has_group"] = "true" if group else "false"
                tsdb.record_multi(counters,
                                  timestamp=event.datetime,
                                  environment_id=environment.id)

        if release:
            if is_new:
                buffer.incr(
                    ReleaseProject,
                    {"new_groups": 1},
                    {
                        "release_id": release.id,
                        "project_id": project.id
                    },
                )
            if is_new_group_environment:
                buffer.incr(
                    ReleaseProjectEnvironment,
                    {"new_issues_count": 1},
                    {
                        "project_id": project.id,
                        "release_id": release.id,
                        "environment_id": environment.id,
                    },
                )

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send_robust(project=project,
                                                 event=event,
                                                 sender=Project)

        with metrics.timer("event_manager.eventstream.insert"):
            eventstream.insert(
                group=group,
                event=event,
                is_new=is_new,
                is_regression=is_regression,
                is_new_group_environment=is_new_group_environment,
                primary_hash=hashes[0],
                # We are choosing to skip consuming the event back
                # in the eventstream if it's flagged as raw.
                # This means that we want to publish the event
                # through the event stream, but we don't care
                # about post processing and handling the commit.
                skip_consume=raw,
            )

        if not issueless_event:
            # Do this last to ensure signals get emitted even if connection to the
            # file store breaks temporarily.
            self.save_attachments(attachments, event)

        metric_tags = {"from_relay": "_relay_processed" in self._data}

        metrics.timing("events.latency",
                       received_timestamp - recorded_timestamp,
                       tags=metric_tags)
        metrics.timing("events.size.data.post_save",
                       event.size,
                       tags=metric_tags)
        metrics.incr(
            "events.post_save.normalize.errors",
            amount=len(self._data.get("errors") or ()),
            tags=metric_tags,
        )

        return event
Ejemplo n.º 35
0
    def save(self, project_id, raw=False, assume_normalized=False):
        # Normalize if needed
        if not self._normalized:
            if not assume_normalized:
                self.normalize()
            self._normalized = True

        data = self._data

        project = Project.objects.get_from_cache(id=project_id)
        project._organization_cache = Organization.objects.get_from_cache(
            id=project.organization_id)

        # Check to make sure we're not about to do a bunch of work that's
        # already been done if we've processed an event with this ID. (This
        # isn't a perfect solution -- this doesn't handle ``EventMapping`` and
        # there's a race condition between here and when the event is actually
        # saved, but it's an improvement. See GH-7677.)
        try:
            event = Event.objects.get(
                project_id=project.id,
                event_id=data['event_id'],
            )
        except Event.DoesNotExist:
            pass
        else:
            # Make sure we cache on the project before returning
            event._project_cache = project
            logger.info('duplicate.found',
                        exc_info=True,
                        extra={
                            'event_uuid': data['event_id'],
                            'project_id': project.id,
                            'model': Event.__name__,
                        })
            return event

        # Pull out the culprit
        culprit = self.get_culprit()

        # Pull the toplevel data we're interested in
        level = data.get('level')

        # TODO(mitsuhiko): this code path should be gone by July 2018.
        # This is going to be fine because no code actually still depends
        # on integers here.  When we need an integer it will be converted
        # into one later.  Old workers used to send integers here.
        if level is not None and isinstance(level, six.integer_types):
            level = LOG_LEVELS[level]

        transaction_name = data.get('transaction')
        logger_name = data.get('logger')
        release = data.get('release')
        dist = data.get('dist')
        environment = data.get('environment')
        recorded_timestamp = data.get('timestamp')

        # We need to swap out the data with the one internal to the newly
        # created event object
        event = self._get_event_instance(project_id=project_id)
        self._data = data = event.data.data

        event._project_cache = project

        date = event.datetime
        platform = event.platform
        event_id = event.event_id

        if transaction_name:
            transaction_name = force_text(transaction_name)

        # Some of the data that are toplevel attributes are duplicated
        # into tags (logger, level, environment, transaction).  These are
        # different from legacy attributes which are normalized into tags
        # ahead of time (site, server_name).
        setdefault_path(data, 'tags', value=[])
        set_tag(data, 'level', level)
        if logger_name:
            set_tag(data, 'logger', logger_name)
        if environment:
            set_tag(data, 'environment', environment)
        if transaction_name:
            set_tag(data, 'transaction', transaction_name)

        if release:
            # dont allow a conflicting 'release' tag
            pop_tag(data, 'release')
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )
            set_tag(data, 'sentry:release', release.version)

        if dist and release:
            dist = release.add_dist(dist, date)
            # dont allow a conflicting 'dist' tag
            pop_tag(data, 'dist')
            set_tag(data, 'sentry:dist', dist.name)
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            pop_tag(data, 'user')
            set_tag(data, 'sentry:user', event_user.tag_value)

        # At this point we want to normalize the in_app values in case the
        # clients did not set this appropriately so far.
        grouping_config = load_grouping_config(
            get_grouping_config_dict_for_event_data(data, project))
        normalize_stacktraces_for_grouping(data, grouping_config)

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags,
                                      event,
                                      _with_transaction=False)
            if added_tags:
                # plugins should not override user provided tags
                for key, value in added_tags:
                    if get_tag(data, key) is None:
                        set_tag(data, key, value)

        for path, iface in six.iteritems(event.interfaces):
            for k, v in iface.iter_tags():
                set_tag(data, k, v)
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.path, None)

        # The active grouping config was put into the event in the
        # normalize step before.  We now also make sure that the
        # fingerprint was set to `'{{ default }}' just in case someone
        # removed it from the payload.  The call to get_hashes will then
        # look at `grouping_config` to pick the right paramters.
        data['fingerprint'] = data.get('fingerprint') or ['{{ default }}']
        apply_server_fingerprinting(
            data, get_fingerprinting_config_for_project(project))
        hashes = event.get_hashes()
        data['hashes'] = hashes

        # we want to freeze not just the metadata and type in but also the
        # derived attributes.  The reason for this is that we push this
        # data into kafka for snuba processing and our postprocessing
        # picks up the data right from the snuba topic.  For most usage
        # however the data is dynamically overriden by Event.title and
        # Event.location (See Event.as_dict)
        materialized_metadata = self.materialize_metadata()
        event_metadata = materialized_metadata['metadata']
        data.update(materialized_metadata)
        data['culprit'] = culprit

        # index components into ``Event.message``
        # See GH-3248
        event.message = self.get_search_message(event_metadata, culprit)
        received_timestamp = event.data.get('received') or float(
            event.datetime.strftime('%s'))

        # The group gets the same metadata as the event when it's flushed but
        # additionally the `last_received` key is set.  This key is used by
        # _save_aggregate.
        group_metadata = dict(materialized_metadata)
        group_metadata['last_received'] = received_timestamp
        kwargs = {
            'platform': platform,
            'message': event.message,
            'culprit': culprit,
            'logger': logger_name,
            'level': LOG_LEVELS_MAP.get(level),
            'last_seen': date,
            'first_seen': date,
            'active_at': date,
            'data': group_metadata,
        }

        if release:
            kwargs['first_release'] = release

        try:
            group, is_new, is_regression, is_sample = self._save_aggregate(
                event=event, hashes=hashes, release=release, **kwargs)
        except HashDiscarded:
            event_discarded.send_robust(
                project=project,
                sender=EventManager,
            )

            metrics.incr(
                'events.discarded',
                skip_internal=True,
                tags={
                    'organization_id': project.organization_id,
                    'platform': platform,
                },
            )
            raise
        else:
            event_saved.send_robust(
                project=project,
                event_size=event.size,
                sender=EventManager,
            )

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        # When an event was sampled, the canonical source of truth
        # is the EventMapping table since we aren't going to be writing out an actual
        # Event row. Otherwise, if the Event isn't being sampled, we can safely
        # rely on the Event table itself as the source of truth and ignore
        # EventMapping since it's redundant information.
        if is_sample:
            try:
                with transaction.atomic(
                        using=router.db_for_write(EventMapping)):
                    EventMapping.objects.create(project=project,
                                                group=group,
                                                event_id=event_id)
            except IntegrityError:
                logger.info('duplicate.found',
                            exc_info=True,
                            extra={
                                'event_uuid': event_id,
                                'project_id': project.id,
                                'group_id': group.id,
                                'model': EventMapping.__name__,
                            })
                return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
            group_id=group.id,
            environment_id=environment.id,
            defaults={
                'first_release': release if release else None,
            },
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            ReleaseProjectEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        counters = [
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ]

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters,
                        timestamp=event.datetime,
                        environment_id=environment.id)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]

        if release:
            frequencies.append((tsdb.models.frequent_releases_by_group, {
                group.id: {
                    grouprelease.id: 1,
                },
            }))

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(
            group=group,
            environment=environment,
        )

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                logger.info('duplicate.found',
                            exc_info=True,
                            extra={
                                'event_uuid': event_id,
                                'project_id': project.id,
                                'group_id': group.id,
                                'model': Event.__name__,
                            })
                return event

            tagstore.delay_index_event_tags(
                organization_id=project.organization_id,
                project_id=project.id,
                group_id=group.id,
                environment_id=environment.id,
                event_id=event.id,
                tags=event.tags,
                date_added=event.datetime,
            )

        if event_user:
            tsdb.record_multi(
                (
                    (tsdb.models.users_affected_by_group, group.id,
                     (event_user.tag_value, )),
                    (tsdb.models.users_affected_by_project, project.id,
                     (event_user.tag_value, )),
                ),
                timestamp=event.datetime,
                environment_id=environment.id,
            )
        if release:
            if is_new:
                buffer.incr(ReleaseProject, {'new_groups': 1}, {
                    'release_id': release.id,
                    'project_id': project.id,
                })
            if is_new_group_environment:
                buffer.incr(ReleaseProjectEnvironment, {'new_issues_count': 1},
                            {
                                'project_id': project.id,
                                'release_id': release.id,
                                'environment_id': environment.id,
                            })

        safe_execute(Group.objects.add_tags,
                     group,
                     environment,
                     event.get_tags(),
                     _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send_robust(project=project,
                                                 group=group,
                                                 sender=Project)

        eventstream.insert(
            group=group,
            event=event,
            is_new=is_new,
            is_sample=is_sample,
            is_regression=is_regression,
            is_new_group_environment=is_new_group_environment,
            primary_hash=hashes[0],
            # We are choosing to skip consuming the event back
            # in the eventstream if it's flagged as raw.
            # This means that we want to publish the event
            # through the event stream, but we don't care
            # about post processing and handling the commit.
            skip_consume=raw,
        )

        metrics.timing(
            'events.latency',
            received_timestamp - recorded_timestamp,
            tags={
                'project_id': project.id,
            },
        )

        metrics.timing('events.size.data.post_save',
                       event.size,
                       tags={'project_id': project.id})

        return event
Ejemplo n.º 36
0
    def transfer_to(self, team=None, organization=None):
        # NOTE: this will only work properly if the new team is in a different
        # org than the existing one, which is currently the only use case in
        # production
        # TODO(jess): refactor this to make it an org transfer only
        from sentry.models import (
            Environment,
            EnvironmentProject,
            ProjectTeam,
            ReleaseProject,
            ReleaseProjectEnvironment,
            Rule,
        )

        if organization is None:
            organization = team.organization

        old_org_id = self.organization_id
        org_changed = old_org_id != organization.id

        self.organization = organization

        try:
            with transaction.atomic():
                self.update(organization=organization)
        except IntegrityError:
            slugify_instance(self,
                             self.name,
                             organization=organization,
                             max_length=50)
            self.update(slug=self.slug, organization=organization)

        # Both environments and releases are bound at an organization level.
        # Due to this, when you transfer a project into another org, we have to
        # handle this behavior somehow. We really only have two options here:
        # * Copy over all releases/environments into the new org and handle de-duping
        # * Delete the bindings and let them reform with new data.
        # We're generally choosing to just delete the bindings since new data
        # flowing in will recreate links correctly. The tradeoff is that
        # historical data is lost, but this is a compromise we're willing to
        # take and a side effect of allowing this feature. There are exceptions
        # to this however, such as rules, which should maintain their
        # configuration when moved across organizations.
        if org_changed:
            for model in ReleaseProject, ReleaseProjectEnvironment, EnvironmentProject:
                model.objects.filter(project_id=self.id).delete()
            # this is getting really gross, but make sure there aren't lingering associations
            # with old orgs or teams
            ProjectTeam.objects.filter(
                project=self, team__organization_id=old_org_id).delete()

        rules_by_environment_id = defaultdict(set)
        for rule_id, environment_id in Rule.objects.filter(
                project_id=self.id, environment_id__isnull=False).values_list(
                    "id", "environment_id"):
            rules_by_environment_id[environment_id].add(rule_id)

        environment_names = dict(
            Environment.objects.filter(
                id__in=rules_by_environment_id).values_list("id", "name"))

        for environment_id, rule_ids in rules_by_environment_id.items():
            Rule.objects.filter(id__in=rule_ids).update(
                environment_id=Environment.get_or_create(
                    self, environment_names[environment_id]).id)

        # ensure this actually exists in case from team was null
        if team is not None:
            self.add_team(team)
    def save(self, project, raw=False):
        from sentry.tasks.post_process import index_event_tags
        data = self.data

        project = Project.objects.get_from_cache(id=project)

        # Check to make sure we're not about to do a bunch of work that's
        # already been done if we've processed an event with this ID. (This
        # isn't a perfect solution -- this doesn't handle ``EventMapping`` and
        # there's a race condition between here and when the event is actually
        # saved, but it's an improvement. See GH-7677.)
        try:
            event = Event.objects.get(
                project_id=project.id,
                event_id=data['event_id'],
            )
        except Event.DoesNotExist:
            pass
        else:
            self.logger.info('duplicate.found',
                             exc_info=True,
                             extra={
                                 'event_uuid': data['event_id'],
                                 'project_id': project.id,
                                 'model': Event.__name__,
                             })
            return event

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        level = data.pop('level')
        culprit = data.pop('transaction', None)
        if not culprit:
            culprit = data.pop('culprit', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        fingerprint = data.pop('fingerprint', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)
        dist = data.pop('dist', None)
        environment = data.pop('environment', None)

        # unused
        time_spent = data.pop('time_spent', None)
        message = data.pop('message', '')

        if not culprit:
            # if we generate an implicit culprit, lets not call it a
            # transaction
            transaction_name = None
            culprit = generate_culprit(data, platform=platform)
        else:
            transaction_name = culprit

        culprit = force_text(culprit)

        recorded_timestamp = data.pop('timestamp')
        date = datetime.fromtimestamp(recorded_timestamp)
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'platform': platform,
        }

        event = Event(project_id=project.id,
                      event_id=event_id,
                      data=data,
                      time_spent=time_spent,
                      datetime=date,
                      **kwargs)
        event._project_cache = project
        data = event.data.data

        # convert this to a dict to ensure we're only storing one value per key
        # as most parts of Sentry dont currently play well with multiple values
        tags = dict(data.get('tags') or [])
        tags['level'] = LOG_LEVELS[level]
        if logger_name:
            tags['logger'] = logger_name
        if server_name:
            tags['server_name'] = server_name
        if site:
            tags['site'] = site
        if environment:
            tags['environment'] = environment
        if transaction_name:
            tags['transaction'] = transaction_name

        if release:
            # dont allow a conflicting 'release' tag
            if 'release' in tags:
                del tags['release']
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            tags['sentry:release'] = release.version

        if dist and release:
            dist = release.add_dist(dist, date)
            tags['sentry:dist'] = dist.name
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            if 'user' in tags:
                del tags['user']
            tags['sentry:user'] = event_user.tag_value

        # At this point we want to normalize the in_app values in case the
        # clients did not set this appropriately so far.
        normalize_in_app(data)

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags,
                                      event,
                                      _with_transaction=False)
            if added_tags:
                # plugins should not override user provided tags
                for key, value in added_tags:
                    tags.setdefault(key, value)

        for path, iface in six.iteritems(event.interfaces):
            for k, v in iface.iter_tags():
                tags[k] = v
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.get_path(), None)

        # tags are stored as a tuple
        tags = tags.items()

        data['tags'] = tags
        data['fingerprint'] = fingerprint or ['{{ default }}']

        # prioritize fingerprint over checksum as its likely the client defaulted
        # a checksum whereas the fingerprint was explicit
        if fingerprint:
            hashes = [
                md5_from_hash(h)
                for h in get_hashes_from_fingerprint(event, fingerprint)
            ]
        elif checksum:
            if HASH_RE.match(checksum):
                hashes = [checksum]
            else:
                hashes = [md5_from_hash([checksum]), checksum]
            data['checksum'] = checksum
        else:
            hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)]

        # TODO(dcramer): temp workaround for complexity
        data['message'] = message
        event_type = eventtypes.get(data.get('type', 'default'))(data)
        event_metadata = event_type.get_metadata()
        # TODO(dcramer): temp workaround for complexity
        del data['message']

        data['type'] = event_type.key
        data['metadata'] = event_metadata

        # index components into ``Event.message``
        # See GH-3248
        if event_type.key != 'default':
            if 'sentry.interfaces.Message' in data and \
                    data['sentry.interfaces.Message']['message'] != message:
                message = u'{} {}'.format(
                    message,
                    data['sentry.interfaces.Message']['message'],
                )

        if not message:
            message = ''
        elif not isinstance(message, six.string_types):
            message = force_text(message)

        for value in six.itervalues(event_metadata):
            value_u = force_text(value, errors='replace')
            if value_u not in message:
                message = u'{} {}'.format(message, value_u)

        if culprit and culprit not in message:
            culprit_u = force_text(culprit, errors='replace')
            message = u'{} {}'.format(message, culprit_u)

        message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH)

        event.message = message
        kwargs['message'] = message

        received_timestamp = event.data.get('received') or float(
            event.datetime.strftime('%s'))
        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'active_at': date,
            'data': {
                'last_received': received_timestamp,
                'type': event_type.key,
                # we cache the events metadata on the group to ensure its
                # accessible in the stream
                'metadata': event_metadata,
            },
        })

        if release:
            group_kwargs['first_release'] = release

        try:
            group, is_new, is_regression, is_sample = self._save_aggregate(
                event=event, hashes=hashes, release=release, **group_kwargs)
        except HashDiscarded:
            event_discarded.send_robust(
                project=project,
                sender=EventManager,
            )

            metrics.incr(
                'events.discarded',
                skip_internal=True,
                tags={
                    'organization_id': project.organization_id,
                    'platform': platform,
                },
            )
            raise
        else:
            event_saved.send_robust(
                project=project,
                event_size=event.size,
                sender=EventManager,
            )

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        # When an event was sampled, the canonical source of truth
        # is the EventMapping table since we aren't going to be writing out an actual
        # Event row. Otherwise, if the Event isn't being sampled, we can safely
        # rely on the Event table itself as the source of truth and ignore
        # EventMapping since it's redundant information.
        if is_sample:
            try:
                with transaction.atomic(
                        using=router.db_for_write(EventMapping)):
                    EventMapping.objects.create(project=project,
                                                group=group,
                                                event_id=event_id)
            except IntegrityError:
                self.logger.info('duplicate.found',
                                 exc_info=True,
                                 extra={
                                     'event_uuid': event_id,
                                     'project_id': project.id,
                                     'group_id': group.id,
                                     'model': EventMapping.__name__,
                                 })
                return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
            group_id=group.id,
            environment_id=environment.id,
            defaults={
                'first_release_id': release.id if release else None,
            },
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            ReleaseProjectEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        counters = [
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ]

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters,
                        timestamp=event.datetime,
                        environment_id=environment.id)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]

        if release:
            frequencies.append((tsdb.models.frequent_releases_by_group, {
                group.id: {
                    grouprelease.id: 1,
                },
            }))

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(
            group=group,
            environment=environment,
        )

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info('duplicate.found',
                                 exc_info=True,
                                 extra={
                                     'event_uuid': event_id,
                                     'project_id': project.id,
                                     'group_id': group.id,
                                     'model': Event.__name__,
                                 })
                return event

            index_event_tags.delay(
                organization_id=project.organization_id,
                project_id=project.id,
                group_id=group.id,
                environment_id=environment.id,
                event_id=event.id,
                tags=tags,
                date_added=event.datetime,
            )

        if event_user:
            tsdb.record_multi(
                (
                    (tsdb.models.users_affected_by_group, group.id,
                     (event_user.tag_value, )),
                    (tsdb.models.users_affected_by_project, project.id,
                     (event_user.tag_value, )),
                ),
                timestamp=event.datetime,
                environment_id=environment.id,
            )
        if release:
            if is_new:
                buffer.incr(ReleaseProject, {'new_groups': 1}, {
                    'release_id': release.id,
                    'project_id': project.id,
                })
            if is_new_group_environment:
                buffer.incr(ReleaseProjectEnvironment, {'new_issues_count': 1},
                            {
                                'project_id': project.id,
                                'release_id': release.id,
                                'environment_id': environment.id,
                            })

        safe_execute(Group.objects.add_tags,
                     group,
                     environment,
                     tags,
                     _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send(project=project,
                                          group=group,
                                          sender=Project)

        eventstream.publish(
            group=group,
            event=event,
            is_new=is_new,
            is_sample=is_sample,
            is_regression=is_regression,
            is_new_group_environment=is_new_group_environment,
            primary_hash=hashes[0],
            # We are choosing to skip consuming the event back
            # in the eventstream if it's flagged as raw.
            # This means that we want to publish the event
            # through the event stream, but we don't care
            # about post processing and handling the commit.
            skip_consume=raw,
        )

        metrics.timing(
            'events.latency',
            received_timestamp - recorded_timestamp,
            tags={
                'project_id': project.id,
            },
        )

        return event
Ejemplo n.º 38
0
 def setUp(self):
     self.project = self.create_project()
     self.release = Release.get_or_create(self.project, '1.0')
     self.environment1 = Environment.get_or_create(self.project, 'prod')
     self.environment2 = Environment.get_or_create(self.project, 'staging')
     self.timestamp = float(int(time() - 300))
Ejemplo n.º 39
0
    def test_event_user(self):
        manager = EventManager(
            make_event(event_id='a',
                       environment='totally unique environment',
                       **{'user': {
                           'id': '1',
                       }}))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        environment_id = Environment.get_for_organization_id(
            event.project.organization_id,
            'totally unique environment',
        ).id

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id, ),
            event.datetime,
            event.datetime,
        ) == {
            event.group.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
        ) == {
            event.project.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.group.id: 1,
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.project.id: 1,
        }

        euser = EventUser.objects.get(
            project_id=self.project.id,
            ident='1',
        )
        assert event.get_tag('sentry:user') == euser.tag_value

        # ensure event user is mapped to tags in second attempt
        manager = EventManager(
            make_event(event_id='b', **{'user': {
                'id': '1',
                'name': 'jane',
            }}))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        euser = EventUser.objects.get(id=euser.id)
        assert event.get_tag('sentry:user') == euser.tag_value
        assert euser.name == 'jane'
        assert euser.ident == '1'
Ejemplo n.º 40
0
def get_environment_name(event):
    return Environment.get_name_or_default(event.get_tag("environment"))
Ejemplo n.º 41
0
    def save(self, project, raw=False):
        from sentry.tasks.post_process import index_event_tags

        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        level = data.pop('level')

        culprit = data.pop('culprit', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        fingerprint = data.pop('fingerprint', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)
        dist = data.pop('dist', None)
        environment = data.pop('environment', None)

        # unused
        time_spent = data.pop('time_spent', None)
        message = data.pop('message', '')

        if not culprit:
            # if we generate an implicit culprit, lets not call it a
            # transaction
            transaction_name = None
            culprit = generate_culprit(data, platform=platform)
        else:
            transaction_name = culprit

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'platform': platform,
        }

        event = Event(project_id=project.id,
                      event_id=event_id,
                      data=data,
                      time_spent=time_spent,
                      datetime=date,
                      **kwargs)

        # convert this to a dict to ensure we're only storing one value per key
        # as most parts of Sentry dont currently play well with multiple values
        tags = dict(data.get('tags') or [])
        tags['level'] = LOG_LEVELS[level]
        if logger_name:
            tags['logger'] = logger_name
        if server_name:
            tags['server_name'] = server_name
        if site:
            tags['site'] = site
        if environment:
            tags['environment'] = environment
        if transaction_name:
            tags['transaction'] = transaction_name

        if release:
            # dont allow a conflicting 'release' tag
            if 'release' in tags:
                del tags['release']
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            tags['sentry:release'] = release.version

        if dist and release:
            dist = release.add_dist(dist, date)
            tags['sentry:dist'] = dist.name
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            if 'user' in tags:
                del tags['user']
            tags['sentry:user'] = event_user.tag_value

        # At this point we want to normalize the in_app values in case the
        # clients did not set this appropriately so far.
        normalize_in_app(data)

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags,
                                      event,
                                      _with_transaction=False)
            if added_tags:
                # plugins should not override user provided tags
                for key, value in added_tags:
                    tags.setdefault(key, value)

        # tags are stored as a tuple
        tags = tags.items()

        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        data['fingerprint'] = fingerprint or ['{{ default }}']

        for path, iface in six.iteritems(event.interfaces):
            data['tags'].extend(iface.iter_tags())
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.get_path(), None)

        # prioritize fingerprint over checksum as its likely the client defaulted
        # a checksum whereas the fingerprint was explicit
        if fingerprint:
            hashes = [
                md5_from_hash(h)
                for h in get_hashes_from_fingerprint(event, fingerprint)
            ]
        elif checksum:
            hashes = [checksum]
            data['checksum'] = checksum
        else:
            hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)]

        # TODO(dcramer): temp workaround for complexity
        data['message'] = message
        event_type = eventtypes.get(data.get('type', 'default'))(data)
        event_metadata = event_type.get_metadata()
        # TODO(dcramer): temp workaround for complexity
        del data['message']

        data['type'] = event_type.key
        data['metadata'] = event_metadata

        # index components into ``Event.message``
        # See GH-3248
        if event_type.key != 'default':
            if 'sentry.interfaces.Message' in data and \
                    data['sentry.interfaces.Message']['message'] != message:
                message = u'{} {}'.format(
                    message,
                    data['sentry.interfaces.Message']['message'],
                )

        if not message:
            message = ''
        elif not isinstance(message, six.string_types):
            message = force_text(message)

        for value in six.itervalues(event_metadata):
            value_u = force_text(value, errors='replace')
            if value_u not in message:
                message = u'{} {}'.format(message, value_u)

        if culprit and culprit not in message:
            culprit_u = force_text(culprit, errors='replace')
            message = u'{} {}'.format(message, culprit_u)

        message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH)

        event.message = message
        kwargs['message'] = message

        group_kwargs = kwargs.copy()
        group_kwargs.update({
            'culprit': culprit,
            'logger': logger_name,
            'level': level,
            'last_seen': date,
            'first_seen': date,
            'active_at': date,
            'data': {
                'last_received':
                event.data.get('received')
                or float(event.datetime.strftime('%s')),
                'type':
                event_type.key,
                # we cache the events metadata on the group to ensure its
                # accessible in the stream
                'metadata':
                event_metadata,
            },
        })

        if release:
            group_kwargs['first_release'] = release

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event, hashes=hashes, release=release, **group_kwargs)

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic(using=router.db_for_write(EventMapping)):
                EventMapping.objects.create(project=project,
                                            group=group,
                                            event_id=event_id)
        except IntegrityError:
            self.logger.info('duplicate.found',
                             exc_info=True,
                             extra={
                                 'event_uuid': event_id,
                                 'project_id': project.id,
                                 'group_id': group.id,
                                 'model': EventMapping.__name__,
                             })
            return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        counters = [
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ]

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters, timestamp=event.datetime)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]

        if release:
            frequencies.append((tsdb.models.frequent_releases_by_group, {
                group.id: {
                    grouprelease.id: 1,
                },
            }))

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info('duplicate.found',
                                 exc_info=True,
                                 extra={
                                     'event_uuid': event_id,
                                     'project_id': project.id,
                                     'group_id': group.id,
                                     'model': Event.__name__,
                                 })
                return event

            index_event_tags.delay(
                organization_id=project.organization_id,
                project_id=project.id,
                group_id=group.id,
                event_id=event.id,
                tags=tags,
            )

        if event_user:
            tsdb.record_multi((
                (tsdb.models.users_affected_by_group, group.id,
                 (event_user.tag_value, )),
                (tsdb.models.users_affected_by_project, project.id,
                 (event_user.tag_value, )),
            ),
                              timestamp=event.datetime)

        if is_new and release:
            buffer.incr(ReleaseProject, {'new_groups': 1}, {
                'release_id': release.id,
                'project_id': project.id,
            })

        safe_execute(Group.objects.add_tags,
                     group,
                     tags,
                     _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send(project=project,
                                          group=group,
                                          sender=Project)

            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info('post_process.skip.raw_event',
                             extra={'event_id': event.id})

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
Ejemplo n.º 42
0
    def test_slack_channel_id_saved(self):
        self.login_as(user=self.user)

        project = self.create_project()

        rule = Rule.objects.create(
            project=project,
            environment_id=Environment.get_or_create(project, "production").id,
            label="foo",
        )
        integration = Integration.objects.create(
            provider="slack",
            name="Awesome Team",
            external_id="TXXXXXXX1",
            metadata={
                "access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"
            },
        )
        integration.add_organization(project.organization, self.user)

        url = reverse(
            "sentry-api-0-project-rule-details",
            kwargs={
                "organization_slug": project.organization.slug,
                "project_slug": project.slug,
                "rule_id": rule.id,
            },
        )
        responses.add(
            method=responses.GET,
            url="https://slack.com/api/conversations.info",
            status=200,
            content_type="application/json",
            body=json.dumps({
                "ok": "true",
                "channel": {
                    "name": "team-team-team",
                    "id": "CSVK0921"
                }
            }),
        )
        response = self.client.put(
            url,
            data={
                "name":
                "hello world",
                "environment":
                None,
                "actionMatch":
                "any",
                "actions": [{
                    "id":
                    "sentry.integrations.slack.notify_action.SlackNotifyServiceAction",
                    "name":
                    "Send a notification to the funinthesun Slack workspace to #team-team-team and show tags [] in notification",
                    "workspace": integration.id,
                    "channel": "#team-team-team",
                    "input_channel_id": "CSVK0921",
                }],
                "conditions": [{
                    "id":
                    "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"
                }],
            },
            format="json",
        )

        assert response.status_code == 200, response.content
        assert response.data["id"] == str(rule.id)
        assert response.data["actions"][0]["channel_id"] == "CSVK0921"
Ejemplo n.º 43
0
 def validate_environment(self, value):
     if not Environment.is_valid_name(value):
         raise serializers.ValidationError("Invalid value for environment")
     return value
Ejemplo n.º 44
0
    def save(self, project_id, raw=False, assume_normalized=False):
        # Normalize if needed
        if not self._normalized:
            if not assume_normalized:
                self.normalize()
            self._normalized = True

        data = self._data

        project = Project.objects.get_from_cache(id=project_id)
        project._organization_cache = Organization.objects.get_from_cache(
            id=project.organization_id
        )

        # Check to make sure we're not about to do a bunch of work that's
        # already been done if we've processed an event with this ID. (This
        # isn't a perfect solution -- this doesn't handle ``EventMapping`` and
        # there's a race condition between here and when the event is actually
        # saved, but it's an improvement. See GH-7677.)
        try:
            event = Event.objects.get(project_id=project.id, event_id=data["event_id"])
        except Event.DoesNotExist:
            pass
        else:
            # Make sure we cache on the project before returning
            event._project_cache = project
            logger.info(
                "duplicate.found",
                exc_info=True,
                extra={
                    "event_uuid": data["event_id"],
                    "project_id": project.id,
                    "model": Event.__name__,
                },
            )
            return event

        # Pull out the culprit
        culprit = self.get_culprit()

        # Pull the toplevel data we're interested in
        level = data.get("level")

        # TODO(mitsuhiko): this code path should be gone by July 2018.
        # This is going to be fine because no code actually still depends
        # on integers here.  When we need an integer it will be converted
        # into one later.  Old workers used to send integers here.
        if level is not None and isinstance(level, six.integer_types):
            level = LOG_LEVELS[level]

        transaction_name = data.get("transaction")
        logger_name = data.get("logger")
        release = data.get("release")
        dist = data.get("dist")
        environment = data.get("environment")
        recorded_timestamp = data.get("timestamp")

        # We need to swap out the data with the one internal to the newly
        # created event object
        event = self._get_event_instance(project_id=project_id)
        self._data = data = event.data.data

        event._project_cache = project

        date = event.datetime
        platform = event.platform
        event_id = event.event_id

        if transaction_name:
            transaction_name = force_text(transaction_name)

        # Right now the event type is the signal to skip the group. This
        # is going to change a lot.
        if event.get_event_type() == "transaction":
            issueless_event = True
        else:
            issueless_event = False

        # Some of the data that are toplevel attributes are duplicated
        # into tags (logger, level, environment, transaction).  These are
        # different from legacy attributes which are normalized into tags
        # ahead of time (site, server_name).
        setdefault_path(data, "tags", value=[])
        set_tag(data, "level", level)
        if logger_name:
            set_tag(data, "logger", logger_name)
        if environment:
            set_tag(data, "environment", environment)
        if transaction_name:
            set_tag(data, "transaction", transaction_name)

        if release:
            # dont allow a conflicting 'release' tag
            pop_tag(data, "release")
            release = Release.get_or_create(project=project, version=release, date_added=date)
            set_tag(data, "sentry:release", release.version)

        if dist and release:
            dist = release.add_dist(dist, date)
            # dont allow a conflicting 'dist' tag
            pop_tag(data, "dist")
            set_tag(data, "sentry:dist", dist.name)
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            pop_tag(data, "user")
            set_tag(data, "sentry:user", event_user.tag_value)

        # At this point we want to normalize the in_app values in case the
        # clients did not set this appropriately so far.
        grouping_config = load_grouping_config(
            get_grouping_config_dict_for_event_data(data, project)
        )
        normalize_stacktraces_for_grouping(data, grouping_config)

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False)
            if added_tags:
                # plugins should not override user provided tags
                for key, value in added_tags:
                    if get_tag(data, key) is None:
                        set_tag(data, key, value)

        for path, iface in six.iteritems(event.interfaces):
            for k, v in iface.iter_tags():
                set_tag(data, k, v)
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.path, None)

        # The active grouping config was put into the event in the
        # normalize step before.  We now also make sure that the
        # fingerprint was set to `'{{ default }}' just in case someone
        # removed it from the payload.  The call to get_hashes will then
        # look at `grouping_config` to pick the right paramters.
        data["fingerprint"] = data.get("fingerprint") or ["{{ default }}"]
        apply_server_fingerprinting(data, get_fingerprinting_config_for_project(project))

        # Here we try to use the grouping config that was requested in the
        # event.  If that config has since been deleted (because it was an
        # experimental grouping config) we fall back to the default.
        try:
            hashes = event.get_hashes()
        except GroupingConfigNotFound:
            data["grouping_config"] = get_grouping_config_dict_for_project(project)
            hashes = event.get_hashes()

        data["hashes"] = hashes

        # we want to freeze not just the metadata and type in but also the
        # derived attributes.  The reason for this is that we push this
        # data into kafka for snuba processing and our postprocessing
        # picks up the data right from the snuba topic.  For most usage
        # however the data is dynamically overriden by Event.title and
        # Event.location (See Event.as_dict)
        materialized_metadata = self.materialize_metadata()
        event_metadata = materialized_metadata["metadata"]
        data.update(materialized_metadata)
        data["culprit"] = culprit

        # index components into ``Event.message``
        # See GH-3248
        event.message = self.get_search_message(event_metadata, culprit)
        received_timestamp = event.data.get("received") or float(event.datetime.strftime("%s"))

        if not issueless_event:
            # The group gets the same metadata as the event when it's flushed but
            # additionally the `last_received` key is set.  This key is used by
            # _save_aggregate.
            group_metadata = dict(materialized_metadata)
            group_metadata["last_received"] = received_timestamp
            kwargs = {
                "platform": platform,
                "message": event.message,
                "culprit": culprit,
                "logger": logger_name,
                "level": LOG_LEVELS_MAP.get(level),
                "last_seen": date,
                "first_seen": date,
                "active_at": date,
                "data": group_metadata,
            }

            if release:
                kwargs["first_release"] = release

            try:
                group, is_new, is_regression = self._save_aggregate(
                    event=event, hashes=hashes, release=release, **kwargs
                )
            except HashDiscarded:
                event_discarded.send_robust(project=project, sender=EventManager)

                metrics.incr(
                    "events.discarded",
                    skip_internal=True,
                    tags={"organization_id": project.organization_id, "platform": platform},
                )
                raise
            else:
                event_saved.send_robust(project=project, event_size=event.size, sender=EventManager)
            event.group = group
        else:
            group = None
            is_new = False
            is_regression = False
            event_saved.send_robust(project=project, event_size=event.size, sender=EventManager)

        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        environment = Environment.get_or_create(project=project, name=environment)

        if group:
            group_environment, is_new_group_environment = GroupEnvironment.get_or_create(
                group_id=group.id,
                environment_id=environment.id,
                defaults={"first_release": release if release else None},
            )
        else:
            is_new_group_environment = False

        if release:
            ReleaseEnvironment.get_or_create(
                project=project, release=release, environment=environment, datetime=date
            )

            ReleaseProjectEnvironment.get_or_create(
                project=project, release=release, environment=environment, datetime=date
            )

            if group:
                grouprelease = GroupRelease.get_or_create(
                    group=group, release=release, environment=environment, datetime=date
                )

        counters = [(tsdb.models.project, project.id)]

        if group:
            counters.append((tsdb.models.group, group.id))

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
        ]

        if group:
            frequencies.append(
                (tsdb.models.frequent_environments_by_group, {group.id: {environment.id: 1}})
            )

            if release:
                frequencies.append(
                    (tsdb.models.frequent_releases_by_group, {group.id: {grouprelease.id: 1}})
                )
        if frequencies:
            tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        if group:
            UserReport.objects.filter(project=project, event_id=event_id).update(
                group=group, environment=environment
            )

        # save the event
        try:
            with transaction.atomic(using=router.db_for_write(Event)):
                event.save()
        except IntegrityError:
            logger.info(
                "duplicate.found",
                exc_info=True,
                extra={
                    "event_uuid": event_id,
                    "project_id": project.id,
                    "group_id": group.id if group else None,
                    "model": Event.__name__,
                },
            )
            return event

        tagstore.delay_index_event_tags(
            organization_id=project.organization_id,
            project_id=project.id,
            group_id=group.id if group else None,
            environment_id=environment.id,
            event_id=event.id,
            tags=event.tags,
            date_added=event.datetime,
        )

        if event_user:
            counters = [
                (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,))
            ]

            if group:
                counters.append(
                    (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,))
                )

            tsdb.record_multi(counters, timestamp=event.datetime, environment_id=environment.id)

        if release:
            if is_new:
                buffer.incr(
                    ReleaseProject,
                    {"new_groups": 1},
                    {"release_id": release.id, "project_id": project.id},
                )
            if is_new_group_environment:
                buffer.incr(
                    ReleaseProjectEnvironment,
                    {"new_issues_count": 1},
                    {
                        "project_id": project.id,
                        "release_id": release.id,
                        "environment_id": environment.id,
                    },
                )

        if group:
            safe_execute(
                Group.objects.add_tags,
                group,
                environment,
                event.get_tags(),
                _with_transaction=False,
            )

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send_robust(project=project, event=event, sender=Project)

        eventstream.insert(
            group=group,
            event=event,
            is_new=is_new,
            is_regression=is_regression,
            is_new_group_environment=is_new_group_environment,
            primary_hash=hashes[0],
            # We are choosing to skip consuming the event back
            # in the eventstream if it's flagged as raw.
            # This means that we want to publish the event
            # through the event stream, but we don't care
            # about post processing and handling the commit.
            skip_consume=raw,
        )

        metrics.timing("events.latency", received_timestamp - recorded_timestamp)

        metrics.timing("events.size.data.post_save", event.size)

        return event
Ejemplo n.º 45
0
    def test_event_user(self):
        manager = EventManager(
            make_event(event_id="a",
                       environment="totally unique environment",
                       **{"user": {
                           "id": "1"
                       }}))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        environment_id = Environment.get_for_organization_id(
            event.project.organization_id, "totally unique environment").id

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group, (event.group.id, ),
            event.datetime, event.datetime) == {
                event.group.id: 1
            }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
        ) == {
            event.project.id: 1
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_group,
            (event.group.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.group.id: 1
        }

        assert tsdb.get_distinct_counts_totals(
            tsdb.models.users_affected_by_project,
            (event.project.id, ),
            event.datetime,
            event.datetime,
            environment_id=environment_id,
        ) == {
            event.project.id: 1
        }

        euser = EventUser.objects.get(project_id=self.project.id, ident="1")
        assert event.get_tag("sentry:user") == euser.tag_value

        # ensure event user is mapped to tags in second attempt
        manager = EventManager(
            make_event(event_id="b", **{"user": {
                "id": "1",
                "name": "jane"
            }}))
        manager.normalize()
        with self.tasks():
            event = manager.save(self.project.id)

        euser = EventUser.objects.get(id=euser.id)
        assert event.get_tag("sentry:user") == euser.tag_value
        assert euser.name == "jane"
        assert euser.ident == "1"
Ejemplo n.º 46
0
 def get_invalid_environment():
     raise Environment.DoesNotExist()
Ejemplo n.º 47
0
def test_valid_name(val, expected):
    assert Environment.is_valid_name(val) == expected
Ejemplo n.º 48
0
    def save(self, project, raw=False):
        from sentry.tasks.post_process import index_event_tags

        project = Project.objects.get_from_cache(id=project)

        data = self.data.copy()

        # First we pull out our top-level (non-data attr) kwargs
        event_id = data.pop('event_id')
        level = data.pop('level')

        culprit = data.pop('transaction', None)
        if not culprit:
            culprit = data.pop('culprit', None)
        logger_name = data.pop('logger', None)
        server_name = data.pop('server_name', None)
        site = data.pop('site', None)
        checksum = data.pop('checksum', None)
        fingerprint = data.pop('fingerprint', None)
        platform = data.pop('platform', None)
        release = data.pop('release', None)
        dist = data.pop('dist', None)
        environment = data.pop('environment', None)

        # unused
        time_spent = data.pop('time_spent', None)
        message = data.pop('message', '')

        if not culprit:
            # if we generate an implicit culprit, lets not call it a
            # transaction
            transaction_name = None
            culprit = generate_culprit(data, platform=platform)
        else:
            transaction_name = culprit

        date = datetime.fromtimestamp(data.pop('timestamp'))
        date = date.replace(tzinfo=timezone.utc)

        kwargs = {
            'platform': platform,
        }

        event = Event(
            project_id=project.id,
            event_id=event_id,
            data=data,
            time_spent=time_spent,
            datetime=date,
            **kwargs
        )
        event._project_cache = project

        # convert this to a dict to ensure we're only storing one value per key
        # as most parts of Sentry dont currently play well with multiple values
        tags = dict(data.get('tags') or [])
        tags['level'] = LOG_LEVELS[level]
        if logger_name:
            tags['logger'] = logger_name
        if server_name:
            tags['server_name'] = server_name
        if site:
            tags['site'] = site
        if environment:
            tags['environment'] = environment
        if transaction_name:
            tags['transaction'] = transaction_name

        if release:
            # dont allow a conflicting 'release' tag
            if 'release' in tags:
                del tags['release']
            release = Release.get_or_create(
                project=project,
                version=release,
                date_added=date,
            )

            tags['sentry:release'] = release.version

        if dist and release:
            dist = release.add_dist(dist, date)
            tags['sentry:dist'] = dist.name
        else:
            dist = None

        event_user = self._get_event_user(project, data)
        if event_user:
            # dont allow a conflicting 'user' tag
            if 'user' in tags:
                del tags['user']
            tags['sentry:user'] = event_user.tag_value

        # At this point we want to normalize the in_app values in case the
        # clients did not set this appropriately so far.
        normalize_in_app(data)

        for plugin in plugins.for_project(project, version=None):
            added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False)
            if added_tags:
                # plugins should not override user provided tags
                for key, value in added_tags:
                    tags.setdefault(key, value)

        # tags are stored as a tuple
        tags = tags.items()

        # XXX(dcramer): we're relying on mutation of the data object to ensure
        # this propagates into Event
        data['tags'] = tags

        data['fingerprint'] = fingerprint or ['{{ default }}']

        for path, iface in six.iteritems(event.interfaces):
            data['tags'].extend(iface.iter_tags())
            # Get rid of ephemeral interface data
            if iface.ephemeral:
                data.pop(iface.get_path(), None)

        # prioritize fingerprint over checksum as its likely the client defaulted
        # a checksum whereas the fingerprint was explicit
        if fingerprint:
            hashes = [md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint)]
        elif checksum:
            hashes = [checksum]
            data['checksum'] = checksum
        else:
            hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)]

        # TODO(dcramer): temp workaround for complexity
        data['message'] = message
        event_type = eventtypes.get(data.get('type', 'default'))(data)
        event_metadata = event_type.get_metadata()
        # TODO(dcramer): temp workaround for complexity
        del data['message']

        data['type'] = event_type.key
        data['metadata'] = event_metadata

        # index components into ``Event.message``
        # See GH-3248
        if event_type.key != 'default':
            if 'sentry.interfaces.Message' in data and \
                    data['sentry.interfaces.Message']['message'] != message:
                message = u'{} {}'.format(
                    message,
                    data['sentry.interfaces.Message']['message'],
                )

        if not message:
            message = ''
        elif not isinstance(message, six.string_types):
            message = force_text(message)

        for value in six.itervalues(event_metadata):
            value_u = force_text(value, errors='replace')
            if value_u not in message:
                message = u'{} {}'.format(message, value_u)

        if culprit and culprit not in message:
            culprit_u = force_text(culprit, errors='replace')
            message = u'{} {}'.format(message, culprit_u)

        message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH)

        event.message = message
        kwargs['message'] = message

        group_kwargs = kwargs.copy()
        group_kwargs.update(
            {
                'culprit': culprit,
                'logger': logger_name,
                'level': level,
                'last_seen': date,
                'first_seen': date,
                'active_at': date,
                'data': {
                    'last_received':
                    event.data.get('received') or float(event.datetime.strftime('%s')),
                    'type':
                    event_type.key,
                    # we cache the events metadata on the group to ensure its
                    # accessible in the stream
                    'metadata':
                    event_metadata,
                },
            }
        )

        if release:
            group_kwargs['first_release'] = release

        group, is_new, is_regression, is_sample = self._save_aggregate(
            event=event, hashes=hashes, release=release, **group_kwargs
        )

        event.group = group
        # store a reference to the group id to guarantee validation of isolation
        event.data.bind_ref(event)

        try:
            with transaction.atomic(using=router.db_for_write(EventMapping)):
                EventMapping.objects.create(project=project, group=group, event_id=event_id)
        except IntegrityError:
            self.logger.info(
                'duplicate.found',
                exc_info=True,
                extra={
                    'event_uuid': event_id,
                    'project_id': project.id,
                    'group_id': group.id,
                    'model': EventMapping.__name__,
                }
            )
            return event

        environment = Environment.get_or_create(
            project=project,
            name=environment,
        )

        if release:
            ReleaseEnvironment.get_or_create(
                project=project,
                release=release,
                environment=environment,
                datetime=date,
            )

            grouprelease = GroupRelease.get_or_create(
                group=group,
                release=release,
                environment=environment,
                datetime=date,
            )

        counters = [
            (tsdb.models.group, group.id),
            (tsdb.models.project, project.id),
        ]

        if release:
            counters.append((tsdb.models.release, release.id))

        tsdb.incr_multi(counters, timestamp=event.datetime)

        frequencies = [
            # (tsdb.models.frequent_projects_by_organization, {
            #     project.organization_id: {
            #         project.id: 1,
            #     },
            # }),
            # (tsdb.models.frequent_issues_by_project, {
            #     project.id: {
            #         group.id: 1,
            #     },
            # })
            (tsdb.models.frequent_environments_by_group, {
                group.id: {
                    environment.id: 1,
                },
            })
        ]

        if release:
            frequencies.append(
                (tsdb.models.frequent_releases_by_group, {
                    group.id: {
                        grouprelease.id: 1,
                    },
                })
            )

        tsdb.record_frequency_multi(frequencies, timestamp=event.datetime)

        UserReport.objects.filter(
            project=project,
            event_id=event_id,
        ).update(group=group)

        # save the event unless its been sampled
        if not is_sample:
            try:
                with transaction.atomic(using=router.db_for_write(Event)):
                    event.save()
            except IntegrityError:
                self.logger.info(
                    'duplicate.found',
                    exc_info=True,
                    extra={
                        'event_uuid': event_id,
                        'project_id': project.id,
                        'group_id': group.id,
                        'model': Event.__name__,
                    }
                )
                return event

            index_event_tags.delay(
                organization_id=project.organization_id,
                project_id=project.id,
                group_id=group.id,
                event_id=event.id,
                tags=tags,
            )

        if event_user:
            tsdb.record_multi(
                (
                    (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )),
                    (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )),
                ),
                timestamp=event.datetime
            )

        if is_new and release:
            buffer.incr(
                ReleaseProject, {'new_groups': 1}, {
                    'release_id': release.id,
                    'project_id': project.id,
                }
            )

        safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False)

        if not raw:
            if not project.first_event:
                project.update(first_event=date)
                first_event_received.send(project=project, group=group, sender=Project)

            post_process_group.delay(
                group=group,
                event=event,
                is_new=is_new,
                is_sample=is_sample,
                is_regression=is_regression,
            )
        else:
            self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id})

        # TODO: move this to the queue
        if is_regression and not raw:
            regression_signal.send_robust(sender=Group, instance=group)

        return event
Ejemplo n.º 49
0
 def create_environment(self, **kwargs):
     project = kwargs.get('project', self.project)
     name = kwargs.get('name', petname.Generate(1, ' ', letters=10))
     return Environment.get_or_create(project=project, name=name)