event_id=event_id, culprit=culprit, logger=logger_name, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculcate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) views = set() for viewhandler in ViewHandler.objects.all(): try: if not viewhandler.should_store(event): continue
def from_kwargs(self, project, **kwargs): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager from sentry.models import Event, Project, View from sentry.views import View as ViewHandler project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = kwargs.pop('event_id', None) message = kwargs.pop('message', None) culprit = kwargs.pop('culprit', None) level = kwargs.pop('level', None) time_spent = kwargs.pop('time_spent', None) logger_name = kwargs.pop('logger', None) or settings.DEFAULT_LOGGER_NAME server_name = kwargs.pop('server_name', None) site = kwargs.pop('site', None) date = kwargs.pop('timestamp', None) or datetime.datetime.utcnow() checksum = kwargs.pop('checksum', None) # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE date = utc_to_local(date) data = kwargs kwargs = { 'level': level, 'message': message, } event = Event( project=project, event_id=event_id, culprit=culprit, logger=logger_name, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculcate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) views = set() for viewhandler in ViewHandler.objects.all(): try: if not viewhandler.should_store(event): continue path = '%s.%s' % (viewhandler.__module__, viewhandler.__class__.__name__) if not viewhandler.ref: # TODO: this should handle race conditions viewhandler.ref = View.objects.get_or_create( path=path, defaults=dict( verbose_name=viewhandler.verbose_name, verbose_name_plural=viewhandler.verbose_name_plural, ), )[0] views.add(viewhandler.ref) except Exception, exc: # TODO: should we mail admins when there are failures? try: logger.exception(exc) except Exception, exc: warnings.warn(exc)
def from_kwargs(self, project, **kwargs): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager from sentry.models import Event, Project project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = kwargs.pop("event_id", None) message = kwargs.pop("message", None) culprit = kwargs.pop("culprit", None) level = kwargs.pop("level", None) or logging.ERROR time_spent = kwargs.pop("time_spent", None) logger_name = kwargs.pop("logger", None) or settings.DEFAULT_LOGGER_NAME server_name = kwargs.pop("server_name", None) site = kwargs.pop("site", None) date = kwargs.pop("timestamp", None) or timezone.now() checksum = kwargs.pop("checksum", None) tags = kwargs.pop("tags", []) # full support for dict syntax if isinstance(tags, dict): tags = tags.items() # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE if dj_settings.TIME_ZONE: if not timezone.is_aware(date): date = date.replace(tzinfo=timezone.utc) elif timezone.is_aware(date): date = date.replace(tzinfo=None) data = kwargs data["tags"] = tags kwargs = {"level": level, "message": message} event = Event( project=project, event_id=event_id, culprit=culprit or "", logger=logger_name, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculcate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update( { "last_seen": date, "first_seen": date, "time_spent_total": time_spent or 0, "time_spent_count": time_spent and 1 or 0, } ) try: group, is_new, is_sample = self._create_group(event, tags=tags, **group_kwargs) except Exception, exc: # TODO: should we mail admins when there are failures? try: logger.exception(u"Unable to process log entry: %s", exc) except Exception, exc: warnings.warn(u"Unable to process log entry: %s", exc)
def from_kwargs(self, project, **kwargs): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager from sentry.models import Event, Project project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = kwargs.pop('event_id', None) message = kwargs.pop('message', None) culprit = kwargs.pop('culprit', None) level = kwargs.pop('level', None) or logging.ERROR time_spent = kwargs.pop('time_spent', None) logger_name = kwargs.pop('logger', None) or settings.DEFAULT_LOGGER_NAME server_name = kwargs.pop('server_name', None) site = kwargs.pop('site', None) date = kwargs.pop('timestamp', None) or timezone.now() checksum = kwargs.pop('checksum', None) tags = kwargs.pop('tags', []) # full support for dict syntax if isinstance(tags, dict): tags = tags.items() # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE if dj_settings.TIME_ZONE: if not timezone.is_aware(date): date = date.replace(tzinfo=timezone.utc) elif timezone.is_aware(date): date = date.replace(tzinfo=None) data = kwargs kwargs = { 'level': level, 'message': message, } event = Event(project=project, event_id=event_id, culprit=culprit or '', logger=logger_name, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs) # Calculcate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) views = self._get_views(event) try: group, is_new, is_sample = self._create_group(event, tags=tags, **group_kwargs) except Exception, exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc)
def save(self, project, raw=False): # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) or '' time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) platform = data.pop('platform', None) release = data.pop('release', None) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event( project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if checksum: hashes = [checksum] else: hashes = get_hashes_for_event(event) # TODO(dcramer): remove checksum usage event.checksum = hashes[0] group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) result = safe_execute( self._save_aggregate, event=event, tags=tags, hashes=hashes, **group_kwargs ) if result is None: return group, is_new, is_regression, is_sample = result using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def from_kwargs(self, project, **kwargs): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.models import Event, Project project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = kwargs.pop('event_id', None) message = kwargs.pop('message', None) culprit = kwargs.pop('culprit', None) level = kwargs.pop('level', None) or logging.ERROR time_spent = kwargs.pop('time_spent', None) logger_name = kwargs.pop('logger', None) or settings.DEFAULT_LOGGER_NAME server_name = kwargs.pop('server_name', None) site = kwargs.pop('site', None) date = kwargs.pop('timestamp', None) or timezone.now() checksum = kwargs.pop('checksum', None) tags = kwargs.pop('tags', []) platform = kwargs.pop('platform', None) # full support for dict syntax if isinstance(tags, dict): tags = tags.items() # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE if dj_settings.TIME_ZONE: if not timezone.is_aware(date): date = date.replace(tzinfo=timezone.utc) elif timezone.is_aware(date): date = date.replace(tzinfo=None) data = kwargs data['tags'] = tags kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event( project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculcate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) try: group, is_new, is_sample = self._create_group(event, tags=tags, **group_kwargs) except Exception, exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc)
def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) Raven.tags_context({'project': project.id}) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]} # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event( project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs ) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception as exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: send_group_processors( group=group, event=event, is_new=is_new, is_sample=is_sample ) if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH): index_event.delay(event) # TODO: move this to the queue if is_new and not raw: regression_signal.send_robust(sender=self.model, instance=group) return event
def save_data(self, project, data): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event(project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS_DICT[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group(event=event, tags=data['tags'], **group_kwargs) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc) return
def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]} # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event( project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs ) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc) return
def save(self, project, raw=False): # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) or '' time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) platform = data.pop('platform', None) release = data.pop('release', None) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event( project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if checksum: hashes = [checksum] else: hashes = get_hashes_for_event(event) # TODO(dcramer): remove checksum usage event.checksum = hashes[0] group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) result = safe_execute( self._save_aggregate, event=event, tags=tags, hashes=hashes, **group_kwargs ) if result is None: return group, is_new, is_regression, is_sample = result using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save_data(self, project, data): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event( project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) try: group, is_new, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs ) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc) return
def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') or '' level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') checksum = data.pop('checksum') platform = data.pop('platform') date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event(project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_regression, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception as exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: send_group_processors( group=group, event=event, is_new=is_new or is_regression, # backwards compat is_sample=is_sample, is_regression=is_regression, ) index_event.delay(event) # TODO: move this to the queue if is_new and not raw: regression_signal.send_robust(sender=self.model, instance=group) return event
def from_kwargs(self, project, **kwargs): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager from sentry.models import Event, Project project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = kwargs.pop('event_id', None) message = kwargs.pop('message', None) culprit = kwargs.pop('culprit', None) level = kwargs.pop('level', None) or logging.ERROR time_spent = kwargs.pop('time_spent', None) logger_name = kwargs.pop('logger', None) or settings.DEFAULT_LOGGER_NAME server_name = kwargs.pop('server_name', None) site = kwargs.pop('site', None) date = kwargs.pop('timestamp', None) or datetime.datetime.utcnow() checksum = kwargs.pop('checksum', None) tags = kwargs.pop('tags', []) # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE date = utc_to_local(date) data = kwargs kwargs = { 'level': level, 'message': message, } event = Event( project=project, event_id=event_id, culprit=culprit or '', logger=logger_name, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculcate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) views = self._get_views(event) try: group, is_new, is_sample = self._create_group(event, tags=tags, **group_kwargs) except Exception, exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc)
def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop("event_id") message = data.pop("message") culprit = data.pop("culprit") level = data.pop("level") time_spent = data.pop("time_spent") logger_name = data.pop("logger") server_name = data.pop("server_name") site = data.pop("site") date = data.pop("timestamp") checksum = data.pop("checksum") platform = data.pop("platform") if "sentry.interfaces.Exception" in data: if "values" not in data["sentry.interfaces.Exception"]: data["sentry.interfaces.Exception"] = {"values": [data["sentry.interfaces.Exception"]]} # convert stacktrace + exception into expanded exception if "sentry.interfaces.Stacktrace" in data: data["sentry.interfaces.Exception"]["values"][0]["stacktrace"] = data.pop( "sentry.interfaces.Stacktrace" ) kwargs = { "level": level, "message": message, "platform": platform, "culprit": culprit or "", "logger": logger_name, } event = Event( project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update( { "last_seen": date, "first_seen": date, "time_spent_total": time_spent or 0, "time_spent_count": time_spent and 1 or 0, } ) tags = data["tags"] tags.append(("level", LOG_LEVELS[level])) if logger: tags.append(("logger", logger_name)) if server_name: tags.append(("server_name", server_name)) if site: tags.append(("site", site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group(event=event, tags=data["tags"], **group_kwargs) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u"Unable to process log entry: %s", exc) except Exception, exc: warnings.warn(u"Unable to process log entry: %s", exc) return