def test_multiline_error(self, _send_mail): group = Group( id=2, first_seen=timezone.now(), last_seen=timezone.now(), project=self.project, ) event = Event( group=group, message='hello world\nfoo bar', logger='root', project=self.project, datetime=group.last_seen, ) with self.Settings(SENTRY_URL_PREFIX='http://example.com'): self.plugin.notify_users(group, event) _send_mail.assert_called_once() args, kwargs = _send_mail.call_args assert kwargs.get('subject') == u"[{0}] ERROR: hello world".format( self.project.name)
def test_notify_users_renders_interfaces_with_utf8(self, _send_mail): group = Group( id=2, first_seen=timezone.now(), last_seen=timezone.now(), project=self.project, ) stacktrace = Mock(spec=Stacktrace) stacktrace.to_email_html.return_value = u'רונית מגן' stacktrace.get_title.return_value = 'Stacktrace' event = Event() event.group = group event.project = self.project event.message = 'hello world' event.interfaces = {'sentry.interfaces.Stacktrace': stacktrace} with self.settings(SENTRY_URL_PREFIX='http://example.com'): self.plugin.notify_users(group, event) stacktrace.get_title.assert_called_once_with() stacktrace.to_email_html.assert_called_once_with(event)
def test_default_value(self): event = Event( data={ 'sentry.interfaces.Stacktrace': { 'frames': [{ 'lineno': 1, 'filename': 'foo.py', }, { 'lineno': 1, 'filename': 'foo.py', 'in_app': True, }], }, 'sentry.interfaces.Http': { 'url': 'http://example.com' }, }, platform='python', message='Foo bar', ) fp_checksums = get_hashes_from_fingerprint(event, ["{{default}}"]) def_checksums = get_hashes_for_event(event) assert def_checksums == fp_checksums
def test_multiline_error(self, _send_mail): project = Project(id=1, name='Project Name') group = Group() group.first_seen = datetime.datetime.now() group.last_seen = group.first_seen group.project = project group.id = 2 event = Event() event.group = group event.message = 'hello world\nfoo bar' event.logger = 'root' event.project = project event.date = group.last_seen with self.Settings(SENTRY_URL_PREFIX='http://example.com'): p = MailProcessor(send_to=['*****@*****.**']) p.notify_users(group, event) _send_mail.assert_called_once() args, kwargs = _send_mail.call_args self.assertEquals(kwargs.get('subject'), u"[Project Name] ERROR: hello world")
def test_multiline_error(self, _send_mail): event_manager = EventManager({ 'message': 'hello world\nfoo bar', 'level': 'error', }) event_manager.normalize() event_data = event_manager.get_data() event_type = event_manager.get_event_type() group = Group(id=2, first_seen=timezone.now(), last_seen=timezone.now(), project=self.project, message=event_manager.get_search_message(), logger='root', short_id=2, data={ 'type': event_type.key, 'metadata': event_type.get_metadata(), }) event = Event( group=group, message=group.message, project=self.project, datetime=group.last_seen, data=event_data, ) notification = Notification(event=event) with self.options({'system.url-prefix': 'http://example.com'}): self.plugin.notify(notification) assert _send_mail.call_count is 1 args, kwargs = _send_mail.call_args assert kwargs.get('subject') == u'BAR-2 - hello world'
def test_notify_users_does_email(self, _send_mail): event_manager = EventManager({"message": "hello world", "level": "error"}) event_manager.normalize() event_data = event_manager.get_data() event_type = event_manager.get_event_type() event_data["type"] = event_type.key event_data["metadata"] = event_type.get_metadata(event_data) group = Group( id=2, first_seen=timezone.now(), last_seen=timezone.now(), project=self.project, message=event_manager.get_search_message(), logger="root", short_id=2, data={"type": event_type.key, "metadata": event_type.get_metadata(event_data)}, ) event = Event( group=group, message=group.message, project=self.project, datetime=group.last_seen, data=event_data, ) notification = Notification(event=event) with self.options({"system.url-prefix": "http://example.com"}): self.plugin.notify(notification) assert _send_mail.call_count is 1 args, kwargs = _send_mail.call_args self.assertEquals(kwargs.get("project"), self.project) self.assertEquals(kwargs.get("reference"), group) assert kwargs.get("subject") == u"BAR-2 - hello world"
def test_notify_users_renders_interfaces(self, _send_mail): group = Group( id=2, first_seen=timezone.now(), last_seen=timezone.now(), project=self.project, ) stacktrace = Mock(spec=Stacktrace) stacktrace.to_string.return_value = 'foo bar' stacktrace.get_title.return_value = 'Stacktrace' event = Event() event.group = group event.message = 'hello world' event.logger = 'root' event.site = None event.interfaces = {'sentry.interfaces.Stacktrace': stacktrace} with self.Settings(SENTRY_URL_PREFIX='http://example.com'): self.plugin.notify_users(group, event) stacktrace.get_title.assert_called_once_with() stacktrace.to_string.assert_called_once_with(event)
def get(self, request): org = Organization( id=1, slug='organization', name='My Company', ) project = Project( id=1, organization=org, slug='project', name='My Project', ) group = next(make_group_generator( get_random(request), project, ), ) event = Event( id=1, project=project, group=group, message=group.message, data=load_data('python'), datetime=datetime(2016, 6, 13, 3, 8, 24, tzinfo=timezone.utc), ) activity = Activity(group=event.group, project=event.project, **self.get_activity(request, event)) return render_to_response( 'sentry/debug/mail/preview.html', { 'preview': ActivityMailPreview(request, activity), 'format': request.GET.get('format'), })
def test_event_hash_variant(insta_snapshot, config_name, test_name, log): with open(os.path.join(_fixture_path, test_name + '.json')) as f: input = json.load(f) grouping_config = { 'id': config_name, } mgr = EventManager(data=input, grouping_config=grouping_config) mgr.normalize() data = mgr.get_data() evt = Event(data=data, platform=data['platform']) rv = [] for (key, value) in sorted(evt.get_grouping_variants().items()): if rv: rv.append('-' * 74) rv.append('%s:' % key) dump_variant(value, rv, 1) output = '\n'.join(rv) log(repr(evt.get_hashes())) assert evt.get_grouping_config() == grouping_config insta_snapshot(output)
def get_view_response(self, request, group): from sentry.models import Event self.selected = request.path == self.get_url(group) if not self.selected: return response = self.view(request, group) if not response: return if isinstance(response, HttpResponseRedirect): return response if not isinstance(response, Response): raise NotImplementedError( "Use self.render() when returning responses.") event = group.get_latest_event() or Event() event.group = group request.access = access.from_request(request, group.organization) return response.respond( request, { "plugin": self, "project": group.project, "group": group, "event": event, "can_admin_event": request.access.has_scope("event:write"), "can_remove_event": request.access.has_scope("event:admin"), }, )
def create_event(self, event_id=None, **kwargs): if event_id is None: event_id = uuid4().hex if 'group' not in kwargs: kwargs['group'] = self.group kwargs.setdefault('project', kwargs['group'].project) kwargs.setdefault('message', kwargs['group'].message) kwargs.setdefault('data', LEGACY_DATA.copy()) if kwargs.get('tags'): tags = kwargs.pop('tags') if isinstance(tags, dict): tags = tags.items() kwargs['data']['tags'] = tags kwargs['data'].setdefault('errors', [{ 'type': EventError.INVALID_DATA, 'name': 'foobar', }]) event = Event(event_id=event_id, **kwargs) # emulate EventManager refs event.data.bind_ref(event) event.save() return event
def get(self, request, organization, project, team, group_id, event_id_or_latest): try: # TODO(tkaemming): This should *actually* redirect, see similar # comment in ``GroupEndpoint.convert_args``. group, _ = get_group_with_redirect( group_id, queryset=Group.objects.filter(project=project), ) except Group.DoesNotExist: raise Http404 if event_id_or_latest == 'latest': # It's possible that a message would not be created under certain # circumstances (such as a post_save signal failing) event = group.get_latest_event() or Event(group=group) else: event = get_object_or_404(group.event_set, pk=event_id_or_latest) Event.objects.bind_nodes([event], 'data') GroupMeta.objects.populate_cache([group]) return HttpResponse(json.dumps(event.as_dict()), content_type='application/json')
def test_stacktrace_wins_over_http(self, http_comp_hash, stack_comp_hash): # this was a regression, and a very important one http_comp_hash.return_value = [['baz']] stack_comp_hash.return_value = [['foo', 'bar']] event = Event( data={ 'sentry.interfaces.Stacktrace': { 'frames': [{ 'lineno': 1, 'filename': 'foo.py', }], }, 'sentry.interfaces.Http': { 'url': 'http://example.com' }, }, message='Foo bar', ) checksums = get_hashes_for_event(event) assert len(checksums) == 1 checksum = checksums[0] stack_comp_hash.assert_called_once_with() assert not http_comp_hash.called assert checksum == '3858f62230ac3c915f300c664312c63f'
def test_notify_users_renders_interfaces_with_utf8(self, _send_mail): group = Group() group.first_seen = timezone.now() group.last_seen = group.first_seen group.id = 2 group.project_id = 1 stacktrace = Mock(spec=Stacktrace) stacktrace.to_string.return_value = u'רונית מגן' stacktrace.get_title.return_value = 'Stacktrace' event = Event() event.group = group event.message = 'hello world' event.logger = 'root' event.site = None event.interfaces = {'sentry.interfaces.Stacktrace': stacktrace} with self.Settings(SENTRY_URL_PREFIX='http://example.com'): p = MailProcessor(send_to=['*****@*****.**']) p.notify_users(group, event) stacktrace.get_title.assert_called_once_with() stacktrace.to_string.assert_called_once_with(event)
def from_kwargs(self, project, **kwargs): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager from sentry.models import Event, Project, View from sentry.views import View as ViewHandler project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = kwargs.pop('event_id', None) message = kwargs.pop('message', None) culprit = kwargs.pop('culprit', None) level = kwargs.pop('level', None) time_spent = kwargs.pop('time_spent', None) logger_name = kwargs.pop('logger', None) or settings.DEFAULT_LOGGER_NAME server_name = kwargs.pop('server_name', None) site = kwargs.pop('site', None) date = kwargs.pop('timestamp', None) or datetime.datetime.utcnow() checksum = kwargs.pop('checksum', None) # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE date = utc_to_local(date) data = kwargs kwargs = { 'level': level, 'message': message, } event = Event( project=project, event_id=event_id, culprit=culprit, logger=logger_name, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculcate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) views = set() for viewhandler in ViewHandler.objects.all(): try: if not viewhandler.should_store(event): continue path = '%s.%s' % (viewhandler.__module__, viewhandler.__class__.__name__) if not viewhandler.ref: # TODO: this should handle race conditions viewhandler.ref = View.objects.get_or_create( path=path, defaults=dict( verbose_name=viewhandler.verbose_name, verbose_name_plural=viewhandler.verbose_name_plural, ), )[0] views.add(viewhandler.ref) except Exception, exc: # TODO: should we mail admins when there are failures? try: logger.exception(exc) except Exception, exc: warnings.warn(exc)
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: # if we generate an implicit culprit, lets not call it a # transaction transaction_name = None culprit = generate_culprit(data, platform=platform) else: transaction_name = culprit date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event( project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) # convert this to a dict to ensure we're only storing one value per key # as most parts of Sentry dont currently play well with multiple values tags = dict(data.get('tags') or []) tags['level'] = LOG_LEVELS[level] if logger_name: tags['logger'] = logger_name if server_name: tags['server_name'] = server_name if site: tags['site'] = site if environment: tags['environment'] = environment if transaction_name: tags['transaction'] = transaction_name if release: # dont allow a conflicting 'release' tag if 'release' in tags: del tags['release'] tags['sentry:release'] = release event_user = self._get_event_user(project, data) if event_user: # dont allow a conflicting 'user' tag if 'user' in tags: del tags['user'] tags['sentry:user'] = event_user.tag_value for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: # plugins should not override user provided tags for key, value in added_tags: tags.setdefault(key, value) # tags are stored as a tuple tags = tags.items() # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] for path, iface in six.iteritems(event.interfaces): data['tags'].extend(iface.iter_tags()) # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = [ md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint) ] elif checksum: hashes = [checksum] data['checksum'] = checksum else: hashes = [ md5_from_hash(h) for h in get_hashes_for_event(event) ] # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, six.string_types): message = force_text(message) for value in six.itervalues(event_metadata): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) if culprit and culprit not in message: culprit_u = force_text(culprit, errors='replace') message = u'{} {}'.format(message, culprit_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'active_at': date, 'data': { 'last_received': event.data.get('received') or float(event.datetime.strftime('%s')), 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('duplicate.found', extra={'event_id': event.id}, exc_info=True) return event environment = Environment.get_or_create( project=project, name=environment, ) if release: ReleaseEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) counters = [ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ] if release: counters.append((tsdb.models.release, release.id)) tsdb.incr_multi(counters, timestamp=event.datetime) frequencies = [ # (tsdb.models.frequent_projects_by_organization, { # project.organization_id: { # project.id: 1, # }, # }), # (tsdb.models.frequent_issues_by_project, { # project.id: { # group.id: 1, # }, # }) (tsdb.models.frequent_environments_by_group, { group.id: { environment.id: 1, }, }) ] if release: frequencies.append( (tsdb.models.frequent_releases_by_group, { group.id: { grouprelease.id: 1, }, }) ) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('duplicate.found', extra={'event_id': event.id}, exc_info=True) return event index_event_tags.delay( project_id=project.id, group_id=group.id, event_id=event.id, tags=tags, ) if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def group(request, team, project, group, event_id=None): # It's possible that a message would not be created under certain # circumstances (such as a post_save signal failing) if event_id: event = get_object_or_404(group.event_set, id=event_id) else: event = group.get_latest_event() or Event() Event.objects.bind_nodes([event], 'data') # bind params to group in case they get hit event.group = group event.project = project if request.POST.get('o') == 'note' and request.user.is_authenticated(): add_note_form = NewNoteForm(request.POST) if add_note_form.is_valid(): add_note_form.save(event, request.user) return HttpResponseRedirect(request.path) else: add_note_form = NewNoteForm() activity_qs = Activity.objects.order_by('-datetime').select_related('user') # if event_id: # activity_qs = activity_qs.filter( # Q(event=event) | Q(event__isnull=True), # ) if project in Project.objects.get_for_user(request.user, team=team, superuser=False): # update that the user has seen this group create_or_update(GroupSeen, group=group, user=request.user, project=project, defaults={ 'last_seen': timezone.now(), }) # filter out dupe activity items activity_items = set() activity = [] for item in activity_qs.filter(group=group)[:20]: sig = (item.event_id, item.type, item.ident, item.user_id) # TODO: we could just generate a signature (hash(text)) for notes # so theres no special casing if item.type == Activity.NOTE: activity.append(item) elif sig not in activity_items: activity_items.add(sig) activity.append(item) activity.append( Activity(project=project, group=group, type=Activity.FIRST_SEEN, datetime=group.first_seen)) # trim to latest 5 activity = activity[:7] seen_by = sorted(filter(lambda ls: ls[0] != request.user and ls[0].email, [ (gs.user, gs.last_seen) for gs in GroupSeen.objects.filter(group=group).select_related('user') ]), key=lambda ls: ls[1], reverse=True) seen_by_extra = len(seen_by) - 5 if seen_by_extra < 0: seen_by_extra = 0 seen_by_faces = seen_by[:5] context = { 'add_note_form': add_note_form, 'page': 'details', 'activity': activity, 'seen_by': seen_by, 'seen_by_faces': seen_by_faces, 'seen_by_extra': seen_by_extra, } is_public = group_is_public(group, request.user) if is_public: template = 'sentry/groups/public_details.html' context['PROJECT_LIST'] = [project] else: template = 'sentry/groups/details.html' return render_with_group_context(group, template, context, request, event=event, is_public=is_public)
def group(request, team, project, group, event_id=None): # It's possible that a message would not be created under certain # circumstances (such as a post_save signal failing) activity_qs = Activity.objects.order_by('-datetime').select_related('user') if event_id: event = get_object_or_404(group.event_set, id=event_id) activity_qs = activity_qs.filter( Q(event=event) | Q(event__isnull=True), ) else: event = group.get_latest_event() or Event() # bind params to group in case they get hit event.group = group event.project = project if project in Project.objects.get_for_user(request.user, team=team): # update that the user has seen this group create_or_update(GroupSeen, group=group, user=request.user, project=project, defaults={ 'last_seen': timezone.now(), }) # filter out dupe activity items activity_items = set() activity = [] for item in activity_qs.filter(group=group)[:10]: sig = (item.event_id, item.type, item.ident, item.user_id) if sig not in activity_items: activity_items.add(sig) activity.append(item) # trim to latest 5 activity = activity[:5] seen_by = sorted(filter(lambda ls: ls[0] != request.user and ls[0].email, [ (gs.user, gs.last_seen) for gs in GroupSeen.objects.filter(group=group).select_related('user') ]), key=lambda ls: ls[1]) seen_by_extra = len(seen_by) - 5 if seen_by_extra < 0: seen_by_extra = 0 seen_by_faces = seen_by[:5] context = { 'page': 'details', 'activity': activity, 'seen_by': seen_by, 'seen_by_faces': seen_by_faces, 'seen_by_extra': seen_by_extra, } is_public = group_is_public(group, request.user) if is_public: template = 'sentry/groups/public_details.html' context['PROJECT_LIST'] = [project] else: template = 'sentry/groups/details.html' return render_with_group_context(group, template, context, request, event=event, is_public=is_public)
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags data = self.data project = Project.objects.get_from_cache(id=project) # Check to make sure we're not about to do a bunch of work that's # already been done if we've processed an event with this ID. (This # isn't a perfect solution -- this doesn't handle ``EventMapping`` and # there's a race condition between here and when the event is actually # saved, but it's an improvement. See GH-7677.) try: event = Event.objects.get( project_id=project.id, event_id=data['event_id'], ) except Event.DoesNotExist: pass else: self.logger.info('duplicate.found', exc_info=True, extra={ 'event_uuid': data['event_id'], 'project_id': project.id, 'model': Event.__name__, }) return event # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('transaction', None) if not culprit: culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) dist = data.pop('dist', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: # if we generate an implicit culprit, lets not call it a # transaction transaction_name = None culprit = generate_culprit(data, platform=platform) else: transaction_name = culprit culprit = force_text(culprit) recorded_timestamp = data.pop('timestamp') date = datetime.fromtimestamp(recorded_timestamp) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event(project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) event._project_cache = project # convert this to a dict to ensure we're only storing one value per key # as most parts of Sentry dont currently play well with multiple values tags = dict(data.get('tags') or []) tags['level'] = LOG_LEVELS[level] if logger_name: tags['logger'] = logger_name if server_name: tags['server_name'] = server_name if site: tags['site'] = site if environment: tags['environment'] = environment if transaction_name: tags['transaction'] = transaction_name if release: # dont allow a conflicting 'release' tag if 'release' in tags: del tags['release'] release = Release.get_or_create( project=project, version=release, date_added=date, ) tags['sentry:release'] = release.version if dist and release: dist = release.add_dist(dist, date) tags['sentry:dist'] = dist.name else: dist = None event_user = self._get_event_user(project, data) if event_user: # dont allow a conflicting 'user' tag if 'user' in tags: del tags['user'] tags['sentry:user'] = event_user.tag_value # At this point we want to normalize the in_app values in case the # clients did not set this appropriately so far. normalize_in_app(data) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: # plugins should not override user provided tags for key, value in added_tags: tags.setdefault(key, value) for path, iface in six.iteritems(event.interfaces): for k, v in iface.iter_tags(): tags[k] = v # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # tags are stored as a tuple tags = tags.items() data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = [ md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint) ] elif checksum: if HASH_RE.match(checksum): hashes = [checksum] else: hashes = [md5_from_hash([checksum]), checksum] data['checksum'] = checksum else: hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)] # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, six.string_types): message = force_text(message) for value in six.itervalues(event_metadata): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) if culprit and culprit not in message: culprit_u = force_text(culprit, errors='replace') message = u'{} {}'.format(message, culprit_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message received_timestamp = event.data.get('received') or float( event.datetime.strftime('%s')) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'active_at': date, 'data': { 'last_received': received_timestamp, 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: group_kwargs['first_release'] = release try: group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs) except HashDiscarded: event_discarded.send_robust( project=project, sender=EventManager, ) metrics.incr( 'events.discarded', skip_internal=True, tags={ 'organization_id': project.organization_id, 'platform': platform, }, ) raise else: event_saved.send_robust( project=project, sender=EventManager, ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) # When an event was sampled, the canonical source of truth # is the EventMapping table since we aren't going to be writing out an actual # Event row. Otherwise, if the Event isn't being sampled, we can safely # rely on the Event table itself as the source of truth and ignore # EventMapping since it's redundant information. if is_sample: try: with transaction.atomic( using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': EventMapping.__name__, }) return event environment = Environment.get_or_create( project=project, name=environment, ) group_environment, is_new_group_environment = GroupEnvironment.get_or_create( group_id=group.id, environment_id=environment.id, defaults={ 'first_release_id': release.id if release else None, }, ) if release: ReleaseEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) ReleaseProjectEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) counters = [ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ] if release: counters.append((tsdb.models.release, release.id)) tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id) frequencies = [ # (tsdb.models.frequent_projects_by_organization, { # project.organization_id: { # project.id: 1, # }, # }), # (tsdb.models.frequent_issues_by_project, { # project.id: { # group.id: 1, # }, # }) (tsdb.models.frequent_environments_by_group, { group.id: { environment.id: 1, }, }) ] if release: frequencies.append((tsdb.models.frequent_releases_by_group, { group.id: { grouprelease.id: 1, }, })) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update( group=group, environment=environment, ) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': Event.__name__, }) return event index_event_tags.delay( organization_id=project.organization_id, project_id=project.id, group_id=group.id, environment_id=environment.id, event_id=event.id, tags=tags, date_added=event.datetime, ) if event_user: tsdb.record_multi( ( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime, environment_id=environment.id, ) if release: if is_new: buffer.incr(ReleaseProject, {'new_groups': 1}, { 'release_id': release.id, 'project_id': project.id, }) if is_new_group_environment: buffer.incr(ReleaseProjectEnvironment, {'new_issues_count': 1}, { 'project_id': project.id, 'release_id': release.id, 'environment_id': environment.id, }) safe_execute(Group.objects.add_tags, group, environment, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=hashes[0], ) else: self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) metrics.timing( 'events.latency', received_timestamp - recorded_timestamp, tags={ 'project_id': project.id, }, ) return event
def test_to_string_returns_stacktrace(self, get_stacktrace): event = mock.Mock(spec=Event()) interface = Stacktrace(frames=[]) result = interface.to_string(event) get_stacktrace.assert_called_once_with(event, system_frames=False, max_frames=5) self.assertEquals(result, get_stacktrace.return_value)
def save(self, project, raw=False): project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) if not culprit: culprit = generate_culprit(data) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event(project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) if environment: tags.append(('environment', environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(('sentry:user', event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id, exc_info=True) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id, exc_info=True) return event if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info( 'Raw event passed; skipping post process for event_id=%s', event_id) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def digest(request): random = get_random(request) # TODO: Refactor all of these into something more manageable. org = Organization( id=1, slug='example', name='Example Organization', ) team = Team( id=1, slug='example', name='Example Team', organization=org, ) project = Project( id=1, slug='example', name='Example Project', team=team, organization=org, ) rules = {i: Rule( id=i, project=project, label="Rule #%s" % (i,), ) for i in range(1, random.randint(2, 4))} state = { 'project': project, 'groups': {}, 'rules': rules, 'event_counts': {}, 'user_counts': {}, } records = [] event_sequence = itertools.count(1) group_generator = make_group_generator(random, project) for i in range(random.randint(1, 30)): group = next(group_generator) state['groups'][group.id] = group offset = timedelta(seconds=0) for i in range(random.randint(1, 10)): offset += timedelta(seconds=random.random() * 120) event = Event( id=next(event_sequence), event_id=uuid.uuid4().hex, project=project, group=group, message=group.message, data=load_data('python'), datetime=to_datetime( random.randint( to_timestamp(group.first_seen), to_timestamp(group.last_seen), ), ) ) records.append( Record( event.event_id, Notification( event, random.sample(state['rules'], random.randint(1, len(state['rules']))), ), to_timestamp(event.datetime), ) ) state['event_counts'][group.id] = random.randint(10, 1e4) state['user_counts'][group.id] = random.randint(10, 1e4) digest = build_digest(project, records, state) start, end, counts = get_digest_metadata(digest) return MailPreview( html_template='sentry/emails/digests/body.html', text_template='sentry/emails/digests/body.txt', context={ 'project': project, 'counts': counts, 'digest': digest, 'start': start, 'end': end, }, ).render(request)
def digest(request): seed = request.GET.get('seed', str(time.time())) logger.debug('Using random seed value: %s') random = Random(seed) now = datetime.utcnow().replace(tzinfo=pytz.utc) # TODO: Refactor all of these into something more manageable. org = Organization( id=1, slug='example', name='Example Organization', ) team = Team( id=1, slug='example', name='Example Team', organization=org, ) project = Project( id=1, slug='example', name='Example Project', team=team, organization=org, ) rules = {i: Rule( id=i, project=project, label="Rule #%s" % (i,), ) for i in xrange(1, random.randint(2, 4))} state = { 'project': project, 'groups': {}, 'rules': rules, 'event_counts': {}, 'user_counts': {}, } records = [] group_sequence = itertools.count(1) event_sequence = itertools.count(1) for i in xrange(random.randint(1, 30)): group_id = next(group_sequence) culprit = '{module} in {function}'.format( module='.'.join( ''.join(random.sample(WORDS, random.randint(1, int(random.paretovariate(2.2))))) for word in xrange(1, 4) ), function=random.choice(WORDS) ) group = state['groups'][group_id] = Group( id=group_id, project=project, message=words(int(random.weibullvariate(8, 4)), common=False), culprit=culprit, level=random.choice(LOG_LEVELS.keys()), ) offset = timedelta(seconds=0) for i in xrange(random.randint(1, 10)): offset += timedelta(seconds=random.random() * 120) event = Event( id=next(event_sequence), event_id=uuid.uuid4().hex, project=project, group=group, message=group.message, data=load_data('python'), datetime=now - offset, ) records.append( Record( event.event_id, Notification( event, random.sample(state['rules'], random.randint(1, len(state['rules']))), ), to_timestamp(event.datetime), ) ) state['event_counts'][group_id] = random.randint(10, 1e4) state['user_counts'][group_id] = random.randint(10, 1e4) digest = build_digest(project, records, state) start, end, counts = get_digest_metadata(digest) return MailPreview( html_template='sentry/emails/digests/body.html', text_template='sentry/emails/digests/body.txt', context={ 'project': project, 'counts': counts, 'digest': digest, 'start': start, 'end': end, }, ).render()
def new_event(request): platform = request.GET.get('platform', 'python') org = Organization( id=1, slug='example', name='Example', ) team = Team( id=1, slug='example', name='Example', organization=org, ) project = Project( id=1, slug='example', name='Example', team=team, organization=org, ) random = get_random(request) group = next( make_group_generator(random, project), ) event = Event( id=1, project=project, group=group, message=group.message, data=load_data(platform), datetime=to_datetime( random.randint( to_timestamp(group.first_seen), to_timestamp(group.last_seen), ), ), ) rule = Rule(label="An example rule") interface_list = [] for interface in six.itervalues(event.interfaces): body = interface.to_email_html(event) if not body: continue interface_list.append((interface.get_title(), mark_safe(body))) return MailPreview( html_template='sentry/emails/error.html', text_template='sentry/emails/error.txt', context={ 'rule': rule, 'group': group, 'event': event, 'link': 'http://example.com/link', 'interfaces': interface_list, 'tags': event.get_tags(), 'project_label': project.name, 'tags': [ ('logger', 'javascript'), ('environment', 'prod'), ('level', 'error'), ('device', 'Other') ] }, ).render(request)
def test_get_stacktrace_with_only_filename(self): event = mock.Mock(spec=Event()) interface = Stacktrace(frames=[{'filename': 'foo'}, {'filename': 'bar'}]) result = interface.get_stacktrace(event) self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo"\n File "bar"')
def test_get_stacktrace_with_module(self): event = mock.Mock(spec=Event()) interface = Stacktrace.to_python(dict(frames=[{'module': 'foo'}, {'module': 'bar'}])) result = interface.get_stacktrace(event) self.assertEquals(result, 'Stacktrace (most recent call last):\n\n Module "foo"\n Module "bar"')
def test_get_group_creation_attributes(self): now = datetime(2017, 5, 3, 6, 6, 6, tzinfo=pytz.utc) events = [ Event( platform='javascript', message='Hello from JavaScript', datetime=now, data={ 'type': 'default', 'metadata': {}, 'tags': [ ['level', 'info'], ['logger', 'javascript'], ], }, ), Event( platform='python', message='Hello from Python', datetime=now - timedelta(hours=1), data={ 'type': 'default', 'metadata': {}, 'tags': [ ['level', 'error'], ['logger', 'python'], ], }, ), Event( platform='java', message='Hello from Java', datetime=now - timedelta(hours=2), data={ 'type': 'default', 'metadata': {}, 'tags': [ ['level', 'debug'], ['logger', 'java'], ], }, ), ] assert get_group_creation_attributes( get_caches(), events, ) == { 'active_at': now - timedelta(hours=2), 'first_seen': now - timedelta(hours=2), 'last_seen': now, 'platform': 'java', 'message': 'Hello from JavaScript', 'level': logging.INFO, 'score': ScoreClause.calculate(3, now), 'logger': 'java', 'times_seen': 3, 'first_release': None, 'culprit': '', 'data': { 'type': 'default', 'last_received': to_timestamp(now), 'metadata': {}, }, }
def from_kwargs(self, project, **kwargs): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager from sentry.models import Event, Project project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = kwargs.pop('event_id', None) message = kwargs.pop('message', None) culprit = kwargs.pop('culprit', None) level = kwargs.pop('level', None) or logging.ERROR time_spent = kwargs.pop('time_spent', None) logger_name = kwargs.pop('logger', None) or settings.DEFAULT_LOGGER_NAME server_name = kwargs.pop('server_name', None) site = kwargs.pop('site', None) date = kwargs.pop('timestamp', None) or timezone.now() checksum = kwargs.pop('checksum', None) tags = kwargs.pop('tags', []) # full support for dict syntax if isinstance(tags, dict): tags = tags.items() # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE if dj_settings.TIME_ZONE: if not timezone.is_aware(date): date = date.replace(tzinfo=timezone.utc) elif timezone.is_aware(date): date = date.replace(tzinfo=None) data = kwargs kwargs = { 'level': level, 'message': message, } event = Event(project=project, event_id=event_id, culprit=culprit or '', logger=logger_name, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs) # Calculcate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) views = self._get_views(event) try: group, is_new, is_sample = self._create_group(event, tags=tags, **group_kwargs) except Exception, exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc)
def test_get_stacktrace_with_filename_and_function(self): event = mock.Mock(spec=Event()) interface = Stacktrace.to_python(dict(frames=[{'filename': 'foo', 'function': 'biz'}, {'filename': 'bar', 'function': 'baz'}])) result = interface.get_stacktrace(event) self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo", in biz\n File "bar", in baz')
def test_get_stacktrace_with_filename_function_lineno_and_context(self): event = mock.Mock(spec=Event()) interface = Stacktrace(frames=[{'filename': 'foo', 'function': 'biz', 'lineno': 3, 'context_line': ' def foo(r):'}, {'filename': 'bar', 'function': 'baz', 'lineno': 5, 'context_line': ' return None'}]) result = interface.get_stacktrace(event) self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo", line 3, in biz\n def foo(r):\n File "bar", line 5, in baz\n return None')