def test_event_user(self): manager = EventManager( make_event( event_id="a", environment="totally unique environment", **{"user": {"id": "1"}} ) ) manager.normalize() with self.tasks(): event = manager.save(self.project.id) environment_id = Environment.get_for_organization_id( event.project.organization_id, "totally unique environment" ).id assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id,), event.datetime, event.datetime ) == {event.group.id: 1} assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id,), event.datetime, event.datetime, ) == {event.project.id: 1} assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id,), event.datetime, event.datetime, environment_id=environment_id, ) == {event.group.id: 1} assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id,), event.datetime, event.datetime, environment_id=environment_id, ) == {event.project.id: 1} euser = EventUser.objects.get(project_id=self.project.id, ident="1") assert event.get_tag("sentry:user") == euser.tag_value # ensure event user is mapped to tags in second attempt manager = EventManager(make_event(event_id="b", **{"user": {"id": "1", "name": "jane"}})) manager.normalize() with self.tasks(): event = manager.save(self.project.id) euser = EventUser.objects.get(id=euser.id) assert event.get_tag("sentry:user") == euser.tag_value assert euser.name == "jane" assert euser.ident == "1"
def test_event_user(self): manager = EventManager(self.make_event(**{ 'sentry.interfaces.User': { 'id': '1', } })) manager.normalize() with self.tasks(): event = manager.save(self.project.id) assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id,), event.datetime, event.datetime, ) == { event.group.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id,), event.datetime, event.datetime, ) == { event.project.id: 1, } euser = EventUser.objects.get( project=self.project, ident='1', ) assert event.get_tag('sentry:user') == euser.tag_value # ensure event user is mapped to tags in second attempt manager = EventManager(self.make_event(**{ 'sentry.interfaces.User': { 'id': '1', 'name': 'jane', } })) manager.normalize() with self.tasks(): event = manager.save(self.project.id) euser = EventUser.objects.get(id=euser.id) assert event.get_tag('sentry:user') == euser.tag_value assert euser.name == 'jane' assert euser.ident == '1'
def test_event_user(self): manager = EventManager( self.make_event(**{'sentry.interfaces.User': { 'id': '1', }})) manager.normalize() with self.tasks(): event = manager.save(self.project.id) assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id, ), event.datetime, event.datetime, ) == { event.group.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id, ), event.datetime, event.datetime, ) == { event.project.id: 1, } euser = EventUser.objects.get( project=self.project, ident='1', ) assert event.get_tag('sentry:user') == euser.tag_value # ensure event user is mapped to tags in second attempt manager = EventManager( self.make_event( **{'sentry.interfaces.User': { 'id': '1', 'name': 'jane', }})) manager.normalize() with self.tasks(): event = manager.save(self.project.id) euser = EventUser.objects.get(id=euser.id) assert event.get_tag('sentry:user') == euser.tag_value assert euser.name == 'jane' assert euser.ident == '1'
def prepare_project_issue_list(interval, project): start, stop = interval queryset = project.group_set.exclude(status=GroupStatus.MUTED) issue_ids = set() # Fetch all new issues. issue_ids.update( queryset.filter( first_seen__gte=start, first_seen__lt=stop, ).values_list('id', flat=True) ) # Fetch all regressions. This is a little weird, since there's no way to # tell *when* a group regressed using the Group model. Instead, we query # all groups that have been seen in the last week and have ever regressed # and query the Activity model to find out if they regressed within the # past week. (In theory, the activity table *could* be used to answer this # query without the subselect, but there's no suitable indexes to make it's # performance predictable.) issue_ids.update( Activity.objects.filter( group__in=queryset.filter( last_seen__gte=start, last_seen__lt=stop, resolved_at__isnull=False, # signals this has *ever* been resolved ), type__in=( Activity.SET_REGRESSION, Activity.SET_UNRESOLVED, ), datetime__gte=start, datetime__lt=stop, ).distinct().values_list('group_id', flat=True) ) rollup = 60 * 60 * 24 events = tsdb.get_sums( tsdb.models.group, issue_ids, start, stop, rollup=rollup, ) users = tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, issue_ids, start, stop, rollup=rollup, ) return ( len(issue_ids), trim_issue_list([(id, (events[id], users[id])) for id in issue_ids]), )
def fetch_state(project, records): # This reads a little strange, but remember that records are returned in # reverse chronological order, and we query the database in chronological # order. # NOTE: This doesn't account for any issues that are filtered out later. start = records[-1].datetime end = records[0].datetime groups = Group.objects.in_bulk(record.value.event.group_id for record in records) return { "project": project, "groups": groups, "rules": Rule.objects.in_bulk( itertools.chain.from_iterable(record.value.rules for record in records)), "event_counts": tsdb.get_sums(tsdb.models.group, groups.keys(), start, end), "user_counts": tsdb.get_distinct_counts_totals(tsdb.models.users_affected_by_group, groups.keys(), start, end), }
def test_event_user(self): manager = EventManager(self.make_event(**{ 'sentry.interfaces.User': { 'id': '1', } })) manager.normalize() event = manager.save(self.project.id) assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id,), event.datetime, event.datetime, ) == { event.group.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id,), event.datetime, event.datetime, ) == { event.project.id: 1, } assert EventUser.objects.filter( project=self.project, ident='1', ).exists() assert 'sentry:user' in dict(event.tags) # ensure event user is mapped to tags in second attempt manager = EventManager(self.make_event(**{ 'sentry.interfaces.User': { 'id': '1', } })) manager.normalize() event = manager.save(self.project.id) assert EventUser.objects.filter( project=self.project, ident='1', ).exists() assert 'sentry:user' in dict(event.tags)
def prepare_project_issue_list(interval, project): start, stop = interval queryset = project.group_set.exclude(status=GroupStatus.MUTED) issue_ids = set() # Fetch all new issues. issue_ids.update( queryset.filter( first_seen__gte=start, first_seen__lt=stop, ).values_list('id', flat=True)) # Fetch all regressions. This is a little weird, since there's no way to # tell *when* a group regressed using the Group model. Instead, we query # all groups that have been seen in the last week and have ever regressed # and query the Activity model to find out if they regressed within the # past week. (In theory, the activity table *could* be used to answer this # query without the subselect, but there's no suitable indexes to make it's # performance predictable.) issue_ids.update( Activity.objects.filter( group__in=queryset.filter( last_seen__gte=start, last_seen__lt=stop, resolved_at__isnull= False, # signals this has *ever* been resolved ), type__in=( Activity.SET_REGRESSION, Activity.SET_UNRESOLVED, ), datetime__gte=start, datetime__lt=stop, ).distinct().values_list('group_id', flat=True)) rollup = 60 * 60 * 24 events = tsdb.get_sums( tsdb.models.group, issue_ids, start, stop, rollup=rollup, ) users = tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, issue_ids, start, stop, rollup=rollup, ) return ( len(issue_ids), trim_issue_list([(id, (events[id], users[id])) for id in issue_ids]), )
def fetch_state(project, records): # This reads a little strange, but remember that records are returned in # reverse chronological order, and we query the database in chronological # order. # NOTE: This doesn't account for any issues that are filtered out later. start = records[-1].datetime end = records[0].datetime groups = Group.objects.in_bulk(record.value.event.group_id for record in records) return { 'project': project, 'groups': groups, 'rules': Rule.objects.in_bulk(itertools.chain.from_iterable(record.value.rules for record in records)), 'event_counts': tsdb.get_sums(tsdb.models.group, groups.keys(), start, end), 'user_counts': tsdb.get_distinct_counts_totals(tsdb.models.users_affected_by_group, groups.keys(), start, end), }
def test_event_user(self): manager = EventManager( make_event(event_id='a', environment='totally unique environment', **{'user': { 'id': '1', }})) manager.normalize() with self.tasks(): event = manager.save(self.project.id) environment_id = Environment.get_for_organization_id( event.project.organization_id, 'totally unique environment', ).id assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id, ), event.datetime, event.datetime, ) == { event.group.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id, ), event.datetime, event.datetime, ) == { event.project.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id, ), event.datetime, event.datetime, environment_id=environment_id, ) == { event.group.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id, ), event.datetime, event.datetime, environment_id=environment_id, ) == { event.project.id: 1, } euser = EventUser.objects.get( project_id=self.project.id, ident='1', ) assert event.get_tag('sentry:user') == euser.tag_value # ensure event user is mapped to tags in second attempt manager = EventManager( make_event(event_id='b', **{'user': { 'id': '1', 'name': 'jane', }})) manager.normalize() with self.tasks(): event = manager.save(self.project.id) euser = EventUser.objects.get(id=euser.id) assert event.get_tag('sentry:user') == euser.tag_value assert euser.name == 'jane' assert euser.ident == '1'
def test_event_user(self): manager = EventManager(make_event( event_id='a', environment='totally unique environment', **{'user': { 'id': '1', }} )) manager.normalize() with self.tasks(): event = manager.save(self.project.id) environment_id = Environment.get_for_organization_id( event.project.organization_id, 'totally unique environment', ).id assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id, ), event.datetime, event.datetime, ) == { event.group.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id, ), event.datetime, event.datetime, ) == { event.project.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id, ), event.datetime, event.datetime, environment_id=environment_id, ) == { event.group.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id, ), event.datetime, event.datetime, environment_id=environment_id, ) == { event.project.id: 1, } euser = EventUser.objects.get( project_id=self.project.id, ident='1', ) assert event.get_tag('sentry:user') == euser.tag_value # ensure event user is mapped to tags in second attempt manager = EventManager( make_event( event_id='b', **{'user': { 'id': '1', 'name': 'jane', }} ) ) manager.normalize() with self.tasks(): event = manager.save(self.project.id) euser = EventUser.objects.get(id=euser.id) assert event.get_tag('sentry:user') == euser.tag_value assert euser.name == 'jane' assert euser.ident == '1'