def save(self, project, raw=False): project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) if not culprit: culprit = generate_culprit(data) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event( project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) if environment: tags.append(('environment', environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(('sentry:user', event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or '{{ default }}' # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release Activity.objects.create( type=Activity.RELEASE, project=project, ident=release, data={'version': release}, datetime=date, ) group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group event.group_id = group.id # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id) return event if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) Raven.tags_context({'project': project.id}) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]} # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event( project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs ) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception as exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: send_group_processors( group=group, event=event, is_new=is_new, is_sample=is_sample ) if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH): index_event.delay(event) # TODO: move this to the queue if is_new and not raw: regression_signal.send_robust(sender=self.model, instance=group) return event
def save(self, project, raw=False): # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) or '' time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) platform = data.pop('platform', None) release = data.pop('release', None) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event( project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if checksum: hashes = [checksum] else: hashes = get_hashes_for_event(event) # TODO(dcramer): remove checksum usage event.checksum = hashes[0] group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) result = safe_execute( self._save_aggregate, event=event, tags=tags, hashes=hashes, **group_kwargs ) if result is None: return group, is_new, is_regression, is_sample = result using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) or '' time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) platform = data.pop('platform', None) release = data.pop('release', None) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event( project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags # Calculate the checksum from the first highest scoring interface if checksum: hashes = [checksum] else: hashes = get_hashes_for_event(event) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: group_kwargs['first_release'] = Release.get_or_create( project=project, version=release, date_added=date, ) Activity.objects.create( type=Activity.RELEASE, project=project, ident=release, data={'version': release}, datetime=date, ) group, is_new, is_regression, is_sample = safe_execute( self._save_aggregate, event=event, hashes=hashes, **group_kwargs ) using = group._state.db event.group = group try: with transaction.atomic(): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id) return event safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) if not culprit: culprit = generate_culprit(data) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event(project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) if environment: tags.append(('environment', environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(('sentry:user', event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id, exc_info=True) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id, exc_info=True) return event if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info( 'Raw event passed; skipping post process for event_id=%s', event_id) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
class GroupManager(BaseManager, ChartMixin): use_for_related_fields = True def normalize_event_data(self, data): # First we pull out our top-level (non-data attr) kwargs if not data.get('level') or data['level'] not in LOG_LEVELS_DICT: data['level'] = logging.ERROR if not data.get('logger'): data['logger'] = settings.DEFAULT_LOGGER_NAME timestamp = data.get('timestamp') if not timestamp: timestamp = timezone.now() if not data.get('culprit'): data['culprit'] = '' # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE if dj_settings.TIME_ZONE: if not timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=timezone.utc) elif timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=None) data['timestamp'] = timestamp if not data.get('event_id'): data['event_id'] = uuid.uuid4().hex data.setdefault('message', None) data.setdefault('time_spent', None) data.setdefault('server_name', None) data.setdefault('site', None) data.setdefault('checksum', None) data.setdefault('platform', None) tags = data.get('tags') if not tags: tags = [] # full support for dict syntax elif isinstance(tags, dict): tags = tags.items() else: tags = list(tags) data['tags'] = tags if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]} # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace') return data def from_kwargs(self, project, **kwargs): data = self.normalize_event_data(kwargs) return self.save_data(project, data) @transaction.commit_on_success def save_data(self, project, data): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(pk=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event( project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS_DICT[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs ) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) send_group_processors( group=group, event=event, is_new=is_new, is_sample=is_sample ) if settings.USE_SEARCH: index_event.delay(event) # TODO: move this to the queue if is_new: regression_signal.send_robust(sender=self.model, instance=group) return event
try: maybe_delay(index_event, event) except Exception, e: transaction.rollback_unless_managed(using=group._state.db) logger.exception(u'Error indexing document: %s', e) if settings.SCRAPE_JAVASCRIPT_CONTEXT and event.platform == 'javascript' and not is_sample: try: maybe_delay(fetch_javascript_source, event) except Exception, e: transaction.rollback_unless_managed(using=group._state.db) logger.exception(u'Error fetching javascript source: %s', e) if is_new: try: regression_signal.send_robust(sender=self.model, instance=group) except Exception, e: transaction.rollback_unless_managed(using=group._state.db) logger.exception(u'Error sending regression signal: %s', e) send_group_processors(group=group, event=event, is_new=is_new, is_sample=is_sample) return event def _create_group(self, event, tags=None, **kwargs): from sentry.models import ProjectCountByMinute, GroupCountByMinute date = event.datetime time_spent = event.time_spent project = event.project
except IntegrityError: transaction.rollback_unless_managed(using=group._state.db) return event transaction.commit_unless_managed(using=group._state.db) if settings.USE_SEARCH: try: index_event.delay(event) except Exception, e: transaction.rollback_unless_managed(using=group._state.db) logger.exception(u'Error indexing document: %s', e) if is_new: try: regression_signal.send_robust(sender=self.model, instance=group) except Exception, e: transaction.rollback_unless_managed(using=group._state.db) logger.exception(u'Error sending regression signal: %s', e) send_group_processors(group=group, event=event, is_new=is_new, is_sample=is_sample) return event def should_sample(self, group, event): if not settings.SAMPLE_DATA: return False
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) # Check to make sure we're not about to do a bunch of work that's # already been done if we've processed an event with this ID. (This # isn't a perfect solution -- this doesn't handle ``EventMapping`` and # there's a race condition between here and when the event is actually # saved, but it's an improvement. See GH-7677.) try: event = Event.objects.get( project_id=project.id, event_id=self.data['event_id'], ) except Event.DoesNotExist: pass else: self.logger.info('duplicate.found', exc_info=True, extra={ 'event_uuid': self.data['event_id'], 'project_id': project.id, 'model': Event.__name__, }) return event data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('transaction', None) if not culprit: culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) dist = data.pop('dist', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: # if we generate an implicit culprit, lets not call it a # transaction transaction_name = None culprit = generate_culprit(data, platform=platform) else: transaction_name = culprit culprit = force_text(culprit) recorded_timestamp = data.pop('timestamp') date = datetime.fromtimestamp(recorded_timestamp) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event(project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) event._project_cache = project # convert this to a dict to ensure we're only storing one value per key # as most parts of Sentry dont currently play well with multiple values tags = dict(data.get('tags') or []) tags['level'] = LOG_LEVELS[level] if logger_name: tags['logger'] = logger_name if server_name: tags['server_name'] = server_name if site: tags['site'] = site if environment: tags['environment'] = environment if transaction_name: tags['transaction'] = transaction_name if release: # dont allow a conflicting 'release' tag if 'release' in tags: del tags['release'] release = Release.get_or_create( project=project, version=release, date_added=date, ) tags['sentry:release'] = release.version if dist and release: dist = release.add_dist(dist, date) tags['sentry:dist'] = dist.name else: dist = None event_user = self._get_event_user(project, data) if event_user: # dont allow a conflicting 'user' tag if 'user' in tags: del tags['user'] tags['sentry:user'] = event_user.tag_value # At this point we want to normalize the in_app values in case the # clients did not set this appropriately so far. normalize_in_app(data) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: # plugins should not override user provided tags for key, value in added_tags: tags.setdefault(key, value) for path, iface in six.iteritems(event.interfaces): for k, v in iface.iter_tags(): tags[k] = v # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # tags are stored as a tuple tags = tags.items() # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = [ md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint) ] elif checksum: if HASH_RE.match(checksum): hashes = [checksum] else: hashes = [md5_from_hash([checksum]), checksum] data['checksum'] = checksum else: hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)] # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, six.string_types): message = force_text(message) for value in six.itervalues(event_metadata): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) if culprit and culprit not in message: culprit_u = force_text(culprit, errors='replace') message = u'{} {}'.format(message, culprit_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message received_timestamp = event.data.get('received') or float( event.datetime.strftime('%s')) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'active_at': date, 'data': { 'last_received': received_timestamp, 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: group_kwargs['first_release'] = release try: group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs) except HashDiscarded: event_discarded.send_robust( project=project, sender=EventManager, ) metrics.incr( 'events.discarded', skip_internal=True, tags={ 'organization_id': project.organization_id, 'platform': platform, }, ) raise else: event_saved.send_robust( project=project, sender=EventManager, ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) # When an event was sampled, the canonical source of truth # is the EventMapping table since we aren't going to be writing out an actual # Event row. Otherwise, if the Event isn't being sampled, we can safely # rely on the Event table itself as the source of truth and ignore # EventMapping since it's redundant information. if is_sample: try: with transaction.atomic( using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': EventMapping.__name__, }) return event environment = Environment.get_or_create( project=project, name=environment, ) group_environment, is_new_group_environment = GroupEnvironment.get_or_create( group_id=group.id, environment_id=environment.id, defaults={ 'first_release_id': release.id if release else None, }, ) if release: ReleaseEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) ReleaseProjectEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) counters = [ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ] if release: counters.append((tsdb.models.release, release.id)) tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id) frequencies = [ # (tsdb.models.frequent_projects_by_organization, { # project.organization_id: { # project.id: 1, # }, # }), # (tsdb.models.frequent_issues_by_project, { # project.id: { # group.id: 1, # }, # }) (tsdb.models.frequent_environments_by_group, { group.id: { environment.id: 1, }, }) ] if release: frequencies.append((tsdb.models.frequent_releases_by_group, { group.id: { grouprelease.id: 1, }, })) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update( group=group, environment=environment, ) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': Event.__name__, }) return event index_event_tags.delay( organization_id=project.organization_id, project_id=project.id, group_id=group.id, environment_id=environment.id, event_id=event.id, tags=tags, date_added=event.datetime, ) if event_user: tsdb.record_multi( ( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime, environment_id=environment.id, ) if release: if is_new: buffer.incr(ReleaseProject, {'new_groups': 1}, { 'release_id': release.id, 'project_id': project.id, }) if is_new_group_environment: buffer.incr(ReleaseProjectEnvironment, {'new_issues_count': 1}, { 'project_id': project.id, 'release_id': release.id, 'environment_id': environment.id, }) safe_execute(Group.objects.add_tags, group, environment, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=hashes[0], ) else: self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) metrics.timing( 'events.latency', received_timestamp - recorded_timestamp, tags={ 'project_id': project.id, }, ) return event
class GroupManager(BaseManager, ChartMixin): use_for_related_fields = True def normalize_event_data(self, data): # TODO(dcramer): store http.env.REMOTE_ADDR as user.ip # First we pull out our top-level (non-data attr) kwargs if not data.get('level') or data['level'] not in LOG_LEVELS: data['level'] = logging.ERROR if not data.get('logger'): data['logger'] = DEFAULT_LOGGER_NAME else: data['logger'] = trim(data['logger'], 64) timestamp = data.get('timestamp') if not timestamp: timestamp = timezone.now() # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE if settings.TIME_ZONE: if not timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=timezone.utc) elif timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=None) data['timestamp'] = timestamp if not data.get('event_id'): data['event_id'] = uuid.uuid4().hex data.setdefault('message', None) data.setdefault('culprit', None) data.setdefault('time_spent', None) data.setdefault('server_name', None) data.setdefault('site', None) data.setdefault('checksum', None) data.setdefault('platform', None) data.setdefault('extra', {}) tags = data.get('tags') if not tags: tags = [] # full support for dict syntax elif isinstance(tags, dict): tags = tags.items() # prevent [tag, tag, tag] (invalid) syntax elif not all(len(t) == 2 for t in tags): tags = [] else: tags = list(tags) data['tags'] = tags data['message'] = strip(data['message']) data['culprit'] = strip(data['culprit']) if not isinstance(data['extra'], dict): # throw it away data['extra'] = {} trim_dict(data['extra'], max_size=MAX_EXTRA_VARIABLE_SIZE) if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = { 'values': [data['sentry.interfaces.Exception']] } # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0][ 'stacktrace'] = data.pop('sentry.interfaces.Stacktrace') for exc_data in data['sentry.interfaces.Exception']['values']: for key in ('type', 'module', 'value'): value = exc_data.get(key) if value: exc_data[key] = trim(value) if exc_data.get('stacktrace'): trim_frames(exc_data['stacktrace']) for frame in exc_data['stacktrace']['frames']: stack_vars = frame.get('vars', {}) trim_dict(stack_vars) if 'sentry.interfaces.Stacktrace' in data: trim_frames(data['sentry.interfaces.Stacktrace']) for frame in data['sentry.interfaces.Stacktrace']['frames']: stack_vars = frame.get('vars', {}) trim_dict(stack_vars) if 'sentry.interfaces.Message' in data: msg_data = data['sentry.interfaces.Message'] trim(msg_data['message'], 1024) if msg_data.get('params'): msg_data['params'] = trim(msg_data['params']) if 'sentry.interfaces.Http' in data: http_data = data['sentry.interfaces.Http'] for key in ('cookies', 'querystring', 'headers', 'env', 'url'): value = http_data.get(key) if not value: continue if type(value) == dict: trim_dict(value) else: http_data[key] = trim(value) value = http_data.get('data') if value: http_data['data'] = trim(value, 2048) # default the culprit to the url if not data['culprit']: data['culprit'] = trim(strip(http_data.get('url')), MAX_CULPRIT_LENGTH) return data def from_kwargs(self, project, **kwargs): data = self.normalize_event_data(kwargs) return self.save_data(project, data) @transaction.commit_on_success def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') date = data.pop('timestamp') checksum = data.pop('checksum') platform = data.pop('platform') if 'sentry.interfaces.Exception' in data: if 'values' not in data['sentry.interfaces.Exception']: data['sentry.interfaces.Exception'] = { 'values': [data['sentry.interfaces.Exception']] } # convert stacktrace + exception into expanded exception if 'sentry.interfaces.Stacktrace' in data: data['sentry.interfaces.Exception']['values'][0][ 'stacktrace'] = data.pop('sentry.interfaces.Stacktrace') kwargs = { 'level': level, 'message': message, 'platform': platform, 'culprit': culprit or '', 'logger': logger_name, } event = Event(project=project, event_id=event_id, data=data, server_name=server_name, site=site, time_spent=time_spent, datetime=date, **kwargs) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_sample = self._create_group(event=event, tags=data['tags'], **group_kwargs) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception, exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: send_group_processors(group=group, event=event, is_new=is_new, is_sample=is_sample) if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH): index_event.delay(event) # TODO: move this to the queue if is_new and not raw: regression_signal.send_robust(sender=self.model, instance=group) return event
def save(self, project, raw=False): # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) or '' time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event(project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags # Calculate the checksum from the first highest scoring interface if checksum: hashes = [checksum] elif fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) else: hashes = map(md5_from_hash, get_hashes_for_event(event)) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release Activity.objects.create( type=Activity.RELEASE, project=project, ident=release, data={'version': release}, datetime=date, ) group, is_new, is_regression, is_sample = safe_execute( self._save_aggregate, event=event, hashes=hashes, **group_kwargs) using = group._state.db event.group = group try: with transaction.atomic(): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id) return event if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info( 'Raw event passed; skipping post process for event_id=%s', event_id) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save_data(self, project, data, raw=False): # TODO: this function is way too damn long and needs refactored # the inner imports also suck so let's try to move it away from # the objects manager # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. from sentry.plugins import plugins from sentry.models import Event, Project, EventMapping project = Project.objects.get_from_cache(id=project) # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') culprit = data.pop('culprit') or '' level = data.pop('level') time_spent = data.pop('time_spent') logger_name = data.pop('logger') server_name = data.pop('server_name') site = data.pop('site') checksum = data.pop('checksum') platform = data.pop('platform') date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event(project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) # Calculate the checksum from the first highest scoring interface if not checksum: checksum = get_checksum_from_event(event) event.checksum = checksum group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) try: group, is_new, is_regression, is_sample = self._create_group( event=event, tags=data['tags'], **group_kwargs) except Exception as exc: # TODO: should we mail admins when there are failures? try: logger.exception(u'Unable to process log entry: %s', exc) except Exception as exc: warnings.warn(u'Unable to process log entry: %s', exc) return using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: send_group_processors( group=group, event=event, is_new=is_new or is_regression, # backwards compat is_sample=is_sample, is_regression=is_regression, ) index_event.delay(event) # TODO: move this to the queue if is_new and not raw: regression_signal.send_robust(sender=self.model, instance=group) return event
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop("event_id") message = data.pop("message") level = data.pop("level") culprit = data.pop("culprit", None) logger_name = data.pop("logger", None) server_name = data.pop("server_name", None) site = data.pop("site", None) checksum = data.pop("checksum", None) fingerprint = data.pop("fingerprint", None) platform = data.pop("platform", None) release = data.pop("release", None) environment = data.pop("environment", None) # unused time_spent = data.pop("time_spent", None) if not culprit: culprit = generate_culprit(data, platform=platform) date = datetime.fromtimestamp(data.pop("timestamp")) date = date.replace(tzinfo=timezone.utc) kwargs = {"message": message, "platform": platform} event = Event( project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) tags = data.get("tags") or [] tags.append(("level", LOG_LEVELS[level])) if logger_name: tags.append(("logger", logger_name)) if server_name: tags.append(("server_name", server_name)) if site: tags.append(("site", site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(("sentry:release", release)) if environment: tags.append(("environment", environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(("sentry:user", event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data["tags"] = tags data["fingerprint"] = fingerprint or ["{{ default }}"] # Get rid of ephemeral interface data for interface_class, _ in iter_interfaces(): interface = interface_class() if interface.ephemeral: data.pop(interface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) # TODO(dcramer): temp workaround for complexity data["message"] = message event_type = eventtypes.get(data.get("type", "default"))(data) group_kwargs = kwargs.copy() group_kwargs.update( { "culprit": culprit, "logger": logger_name, "level": level, "last_seen": date, "first_seen": date, "data": { "last_received": event.data.get("received") or float(event.datetime.strftime("%s")), "type": event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream "metadata": event_type.get_metadata(), }, } ) # TODO(dcramer): temp workaround for complexity del data["message"] if release: release = Release.get_or_create(project=project, version=release, date_added=date) group_kwargs["first_release"] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info("Duplicate EventMapping found for event_id=%s", event_id, exc_info=True) return event UserReport.objects.filter(project=project, event_id=event_id).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info("Duplicate Event found for event_id=%s", event_id, exc_info=True) return event index_event_tags.delay(project_id=project.id, event_id=event.id, tags=tags) if event_user: tsdb.record_multi( ( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)), ), timestamp=event.datetime, ) if is_new and release: buffer.incr(Release, {"new_groups": 1}, {"id": release.id}) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression ) else: self.logger.info("Raw event passed; skipping post process for event_id=%s", event_id) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: # if we generate an implicit culprit, lets not call it a # transaction transaction_name = None culprit = generate_culprit(data, platform=platform) else: transaction_name = culprit date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event( project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) # convert this to a dict to ensure we're only storing one value per key # as most parts of Sentry dont currently play well with multiple values tags = dict(data.get('tags') or []) tags['level'] = LOG_LEVELS[level] if logger_name: tags['logger'] = logger_name if server_name: tags['server_name'] = server_name if site: tags['site'] = site if environment: tags['environment'] = environment if transaction_name: tags['transaction'] = transaction_name if release: # dont allow a conflicting 'release' tag if 'release' in tags: del tags['release'] tags['sentry:release'] = release event_user = self._get_event_user(project, data) if event_user: # dont allow a conflicting 'user' tag if 'user' in tags: del tags['user'] tags['sentry:user'] = event_user.tag_value for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: # plugins should not override user provided tags for key, value in added_tags: tags.setdefault(key, value) # tags are stored as a tuple tags = tags.items() # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] for path, iface in six.iteritems(event.interfaces): data['tags'].extend(iface.iter_tags()) # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = [ md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint) ] elif checksum: hashes = [checksum] data['checksum'] = checksum else: hashes = [ md5_from_hash(h) for h in get_hashes_for_event(event) ] # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, six.string_types): message = force_text(message) for value in six.itervalues(event_metadata): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) if culprit and culprit not in message: culprit_u = force_text(culprit, errors='replace') message = u'{} {}'.format(message, culprit_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'active_at': date, 'data': { 'last_received': event.data.get('received') or float(event.datetime.strftime('%s')), 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('duplicate.found', extra={'event_id': event.id}, exc_info=True) return event environment = Environment.get_or_create( project=project, name=environment, ) if release: ReleaseEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) counters = [ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ] if release: counters.append((tsdb.models.release, release.id)) tsdb.incr_multi(counters, timestamp=event.datetime) frequencies = [ # (tsdb.models.frequent_projects_by_organization, { # project.organization_id: { # project.id: 1, # }, # }), # (tsdb.models.frequent_issues_by_project, { # project.id: { # group.id: 1, # }, # }) (tsdb.models.frequent_environments_by_group, { group.id: { environment.id: 1, }, }) ] if release: frequencies.append( (tsdb.models.frequent_releases_by_group, { group.id: { grouprelease.id: 1, }, }) ) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('duplicate.found', extra={'event_id': event.id}, exc_info=True) return event index_event_tags.delay( project_id=project.id, group_id=group.id, event_id=event.id, tags=tags, ) if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: culprit = generate_culprit(data, platform=platform) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event( project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) if environment: tags.append(('environment', environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(('sentry:user', event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] for path, iface in event.interfaces.iteritems(): data['tags'].extend(iface.iter_tags()) # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, basestring): message = force_text(message) for value in event_metadata.itervalues(): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'data': { 'last_received': event.data.get('received') or float(event.datetime.strftime('%s')), 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id, exc_info=True) return event if release: grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) tsdb.incr_multi([ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ], timestamp=event.datetime) frequencies = [ (tsdb.models.frequent_projects_by_organization, { project.organization_id: { project.id: 1, }, }), (tsdb.models.frequent_issues_by_project, { project.id: { group.id: 1, }, }) ] if release: frequencies.append( (tsdb.models.frequent_releases_by_groups, { group.id: { grouprelease.id: 1, }, }) ) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id, exc_info=True) return event index_event_tags.delay( project_id=project.id, group_id=group.id, event_id=event.id, tags=tags, ) if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('transaction', None) if not culprit: culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) dist = data.pop('dist', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: # if we generate an implicit culprit, lets not call it a # transaction transaction_name = None culprit = generate_culprit(data, platform=platform) else: transaction_name = culprit date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event( project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) event._project_cache = project # convert this to a dict to ensure we're only storing one value per key # as most parts of Sentry dont currently play well with multiple values tags = dict(data.get('tags') or []) tags['level'] = LOG_LEVELS[level] if logger_name: tags['logger'] = logger_name if server_name: tags['server_name'] = server_name if site: tags['site'] = site if environment: tags['environment'] = environment if transaction_name: tags['transaction'] = transaction_name if release: # dont allow a conflicting 'release' tag if 'release' in tags: del tags['release'] release = Release.get_or_create( project=project, version=release, date_added=date, ) tags['sentry:release'] = release.version if dist and release: dist = release.add_dist(dist, date) tags['sentry:dist'] = dist.name else: dist = None event_user = self._get_event_user(project, data) if event_user: # dont allow a conflicting 'user' tag if 'user' in tags: del tags['user'] tags['sentry:user'] = event_user.tag_value # At this point we want to normalize the in_app values in case the # clients did not set this appropriately so far. normalize_in_app(data) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: # plugins should not override user provided tags for key, value in added_tags: tags.setdefault(key, value) # tags are stored as a tuple tags = tags.items() # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] for path, iface in six.iteritems(event.interfaces): data['tags'].extend(iface.iter_tags()) # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = [md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint)] elif checksum: hashes = [checksum] data['checksum'] = checksum else: hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)] # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, six.string_types): message = force_text(message) for value in six.itervalues(event_metadata): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) if culprit and culprit not in message: culprit_u = force_text(culprit, errors='replace') message = u'{} {}'.format(message, culprit_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message group_kwargs = kwargs.copy() group_kwargs.update( { 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'active_at': date, 'data': { 'last_received': event.data.get('received') or float(event.datetime.strftime('%s')), 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, } ) if release: group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info( 'duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': EventMapping.__name__, } ) return event environment = Environment.get_or_create( project=project, name=environment, ) if release: ReleaseEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) counters = [ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ] if release: counters.append((tsdb.models.release, release.id)) tsdb.incr_multi(counters, timestamp=event.datetime) frequencies = [ # (tsdb.models.frequent_projects_by_organization, { # project.organization_id: { # project.id: 1, # }, # }), # (tsdb.models.frequent_issues_by_project, { # project.id: { # group.id: 1, # }, # }) (tsdb.models.frequent_environments_by_group, { group.id: { environment.id: 1, }, }) ] if release: frequencies.append( (tsdb.models.frequent_releases_by_group, { group.id: { grouprelease.id: 1, }, }) ) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info( 'duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': Event.__name__, } ) return event index_event_tags.delay( organization_id=project.organization_id, project_id=project.id, group_id=group.id, event_id=event.id, tags=tags, ) if event_user: tsdb.record_multi( ( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime ) if is_new and release: buffer.incr( ReleaseProject, {'new_groups': 1}, { 'release_id': release.id, 'project_id': project.id, } ) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: culprit = generate_culprit(data, platform=platform) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event(project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) if environment: tags.append(('environment', environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(('sentry:user', event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] # Get rid of ephemeral interface data for interface_class, _ in iter_interfaces(): interface = interface_class() if interface.ephemeral: data.pop(interface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, basestring): message = force_text(message) for value in event_metadata.itervalues(): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'data': { 'last_received': event.data.get('received') or float(event.datetime.strftime('%s')), 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id, exc_info=True) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id, exc_info=True) return event index_event_tags.delay( project_id=project.id, group_id=group.id, event_id=event.id, tags=tags, ) if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info( 'Raw event passed; skipping post process for event_id=%s', event_id) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event