def _dispatch_post_process_group_task( self, event_id: str, project_id: int, group_id: Optional[int], is_new: bool, is_regression: bool, is_new_group_environment: bool, primary_hash: Optional[str], skip_consume: bool = False, ) -> None: if skip_consume: logger.info("post_process.skip.raw_event", extra={"event_id": event_id}) else: cache_key = cache_key_for_event({ "project": project_id, "event_id": event_id }) post_process_group.delay( is_new=is_new, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=primary_hash, cache_key=cache_key, group_id=group_id, )
def _dispatch_post_process_group_task( self, event, is_new, is_regression, is_new_group_environment, primary_hash, skip_consume=False, ): if skip_consume: logger.info("post_process.skip.raw_event", extra={"event_id": event.event_id}) else: cache_key = cache_key_for_event({ "project": event.project_id, "event_id": event.event_id }) post_process_group.delay( is_new=is_new, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=primary_hash, cache_key=cache_key, group_id=event.group_id, )
def relay(self, consumer_group, commit_log_topic, synchronize_commit_group, commit_batch_size=100, initial_offset_reset='latest'): consumer = SynchronizedConsumer( bootstrap_servers=self.producer_configuration['bootstrap.servers'], consumer_group=consumer_group, commit_log_topic=commit_log_topic, synchronize_commit_group=synchronize_commit_group, initial_offset_reset=initial_offset_reset, ) consumer.subscribe([self.publish_topic]) offsets = {} def commit_offsets(): consumer.commit(offsets=[ TopicPartition(topic, partition, offset) for (topic, partition), offset in offsets.items() ], asynchronous=False) try: i = 0 while True: message = consumer.poll(0.1) if message is None: continue error = message.error() if error is not None: raise Exception(error) i = i + 1 offsets[(message.topic(), message.partition())] = message.offset() + 1 payload = parse_event_message(message.value()) if payload is not None: post_process_group.delay(**payload) if i % commit_batch_size == 0: commit_offsets() except KeyboardInterrupt: pass logger.info('Committing offsets and closing consumer...') if offsets: commit_offsets() consumer.close()
def publish(self, group, event, is_new, is_sample, is_regression, is_new_group_environment, primary_hash, skip_consume=False): if skip_consume: logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) else: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=primary_hash, )
def insert(self, group, event, is_new, is_sample, is_regression, is_new_group_environment, primary_hash, skip_consume=False): if skip_consume: logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) else: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=primary_hash, )
def _dispatch_post_process_group_task( self, event, is_new, is_regression, is_new_group_environment, primary_hash, skip_consume=False, ): if skip_consume: logger.info("post_process.skip.raw_event", extra={"event_id": event.event_id}) else: post_process_group.delay( event=event, is_new=is_new, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=primary_hash, )
def _dispatch_post_process_group_task( self, event, is_new, is_regression, is_new_group_environment, primary_hash, skip_consume=False, ): if skip_consume: logger.info("post_process.skip.raw_event", extra={"event_id": event.event_id}) else: random_val = random.random() cache_key = cache_key_for_event({ "project": event.project_id, "event_id": event.event_id }) if options.get("postprocess.use-cache-key") > random_val: post_process_group.delay( event=None, is_new=is_new, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=primary_hash, cache_key=cache_key, group_id=event.group_id, ) else: # Pass the cache key here to ensure that the processing cache is removed. post_process_group.delay( event=event, is_new=is_new, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=primary_hash, cache_key=cache_key, )
def save(self, project, raw=False): # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) or '' time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) platform = data.pop('platform', None) release = data.pop('release', None) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event( project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) # Calculate the checksum from the first highest scoring interface if checksum: hashes = [checksum] else: hashes = get_hashes_for_event(event) # TODO(dcramer): remove checksum usage event.checksum = hashes[0] group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) tags = data['tags'] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) for plugin in plugins.for_project(project): added_tags = safe_execute(plugin.get_tags, event) if added_tags: tags.extend(added_tags) result = safe_execute( self._save_aggregate, event=event, tags=tags, hashes=hashes, **group_kwargs ) if result is None: return group, is_new, is_regression, is_sample = result using = group._state.db event.group = group # save the event unless its been sampled if not is_sample: sid = transaction.savepoint(using=using) try: event.save() except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) sid = transaction.savepoint(using=using) try: EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: transaction.savepoint_rollback(sid, using=using) return event transaction.savepoint_commit(sid, using=using) transaction.commit_unless_managed(using=using) if not raw: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) or '' time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) platform = data.pop('platform', None) release = data.pop('release', None) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event( project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags # Calculate the checksum from the first highest scoring interface if checksum: hashes = [checksum] else: hashes = get_hashes_for_event(event) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: group_kwargs['first_release'] = Release.get_or_create( project=project, version=release, date_added=date, ) Activity.objects.create( type=Activity.RELEASE, project=project, ident=release, data={'version': release}, datetime=date, ) group, is_new, is_regression, is_sample = safe_execute( self._save_aggregate, event=event, hashes=hashes, **group_kwargs ) using = group._state.db event.group = group try: with transaction.atomic(): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id) return event safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def send_group_processors(group, event, **kwargs): post_process_group.delay(event=event, **kwargs)
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: culprit = generate_culprit(data, platform=platform) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event( project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) if environment: tags.append(('environment', environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(('sentry:user', event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] for path, iface in event.interfaces.iteritems(): data['tags'].extend(iface.iter_tags()) # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, basestring): message = force_text(message) for value in event_metadata.itervalues(): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'data': { 'last_received': event.data.get('received') or float(event.datetime.strftime('%s')), 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id, exc_info=True) return event if release: grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) tsdb.incr_multi([ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ], timestamp=event.datetime) frequencies = [ (tsdb.models.frequent_projects_by_organization, { project.organization_id: { project.id: 1, }, }), (tsdb.models.frequent_issues_by_project, { project.id: { group.id: 1, }, }) ] if release: frequencies.append( (tsdb.models.frequent_releases_by_groups, { group.id: { grouprelease.id: 1, }, }) ) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id, exc_info=True) return event index_event_tags.delay( project_id=project.id, group_id=group.id, event_id=event.id, tags=tags, ) if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: # if we generate an implicit culprit, lets not call it a # transaction transaction_name = None culprit = generate_culprit(data, platform=platform) else: transaction_name = culprit date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event( project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) # convert this to a dict to ensure we're only storing one value per key # as most parts of Sentry dont currently play well with multiple values tags = dict(data.get('tags') or []) tags['level'] = LOG_LEVELS[level] if logger_name: tags['logger'] = logger_name if server_name: tags['server_name'] = server_name if site: tags['site'] = site if environment: tags['environment'] = environment if transaction_name: tags['transaction'] = transaction_name if release: # dont allow a conflicting 'release' tag if 'release' in tags: del tags['release'] tags['sentry:release'] = release event_user = self._get_event_user(project, data) if event_user: # dont allow a conflicting 'user' tag if 'user' in tags: del tags['user'] tags['sentry:user'] = event_user.tag_value for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: # plugins should not override user provided tags for key, value in added_tags: tags.setdefault(key, value) # tags are stored as a tuple tags = tags.items() # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] for path, iface in six.iteritems(event.interfaces): data['tags'].extend(iface.iter_tags()) # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = [ md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint) ] elif checksum: hashes = [checksum] data['checksum'] = checksum else: hashes = [ md5_from_hash(h) for h in get_hashes_for_event(event) ] # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, six.string_types): message = force_text(message) for value in six.itervalues(event_metadata): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) if culprit and culprit not in message: culprit_u = force_text(culprit, errors='replace') message = u'{} {}'.format(message, culprit_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'active_at': date, 'data': { 'last_received': event.data.get('received') or float(event.datetime.strftime('%s')), 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('duplicate.found', extra={'event_id': event.id}, exc_info=True) return event environment = Environment.get_or_create( project=project, name=environment, ) if release: ReleaseEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) counters = [ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ] if release: counters.append((tsdb.models.release, release.id)) tsdb.incr_multi(counters, timestamp=event.datetime) frequencies = [ # (tsdb.models.frequent_projects_by_organization, { # project.organization_id: { # project.id: 1, # }, # }), # (tsdb.models.frequent_issues_by_project, { # project.id: { # group.id: 1, # }, # }) (tsdb.models.frequent_environments_by_group, { group.id: { environment.id: 1, }, }) ] if release: frequencies.append( (tsdb.models.frequent_releases_by_group, { group.id: { grouprelease.id: 1, }, }) ) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('duplicate.found', extra={'event_id': event.id}, exc_info=True) return event index_event_tags.delay( project_id=project.id, group_id=group.id, event_id=event.id, tags=tags, ) if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) if not culprit: culprit = generate_culprit(data) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event( project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) if environment: tags.append(('environment', environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(('sentry:user', event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or '{{ default }}' # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release Activity.objects.create( type=Activity.RELEASE, project=project, ident=release, data={'version': release}, datetime=date, ) group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group event.group_id = group.id # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id) return event if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) if not culprit: culprit = generate_culprit(data) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event(project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) if environment: tags.append(('environment', environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(('sentry:user', event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id, exc_info=True) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id, exc_info=True) return event if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info( 'Raw event passed; skipping post process for event_id=%s', event_id) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) or '' time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event(project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags # Calculate the checksum from the first highest scoring interface if checksum: hashes = [checksum] elif fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) else: hashes = map(md5_from_hash, get_hashes_for_event(event)) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release Activity.objects.create( type=Activity.RELEASE, project=project, ident=release, data={'version': release}, datetime=date, ) group, is_new, is_regression, is_sample = safe_execute( self._save_aggregate, event=event, hashes=hashes, **group_kwargs) using = group._state.db event.group = group try: with transaction.atomic(): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id) return event if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info( 'Raw event passed; skipping post process for event_id=%s', event_id) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop("event_id") message = data.pop("message") level = data.pop("level") culprit = data.pop("culprit", None) logger_name = data.pop("logger", None) server_name = data.pop("server_name", None) site = data.pop("site", None) checksum = data.pop("checksum", None) fingerprint = data.pop("fingerprint", None) platform = data.pop("platform", None) release = data.pop("release", None) environment = data.pop("environment", None) # unused time_spent = data.pop("time_spent", None) if not culprit: culprit = generate_culprit(data, platform=platform) date = datetime.fromtimestamp(data.pop("timestamp")) date = date.replace(tzinfo=timezone.utc) kwargs = {"message": message, "platform": platform} event = Event( project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) tags = data.get("tags") or [] tags.append(("level", LOG_LEVELS[level])) if logger_name: tags.append(("logger", logger_name)) if server_name: tags.append(("server_name", server_name)) if site: tags.append(("site", site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(("sentry:release", release)) if environment: tags.append(("environment", environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(("sentry:user", event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data["tags"] = tags data["fingerprint"] = fingerprint or ["{{ default }}"] # Get rid of ephemeral interface data for interface_class, _ in iter_interfaces(): interface = interface_class() if interface.ephemeral: data.pop(interface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) # TODO(dcramer): temp workaround for complexity data["message"] = message event_type = eventtypes.get(data.get("type", "default"))(data) group_kwargs = kwargs.copy() group_kwargs.update( { "culprit": culprit, "logger": logger_name, "level": level, "last_seen": date, "first_seen": date, "data": { "last_received": event.data.get("received") or float(event.datetime.strftime("%s")), "type": event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream "metadata": event_type.get_metadata(), }, } ) # TODO(dcramer): temp workaround for complexity del data["message"] if release: release = Release.get_or_create(project=project, version=release, date_added=date) group_kwargs["first_release"] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info("Duplicate EventMapping found for event_id=%s", event_id, exc_info=True) return event UserReport.objects.filter(project=project, event_id=event_id).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info("Duplicate Event found for event_id=%s", event_id, exc_info=True) return event index_event_tags.delay(project_id=project.id, event_id=event.id, tags=tags) if event_user: tsdb.record_multi( ( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)), ), timestamp=event.datetime, ) if is_new and release: buffer.incr(Release, {"new_groups": 1}, {"id": release.id}) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression ) else: self.logger.info("Raw event passed; skipping post process for event_id=%s", event_id) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def send_group_processors(group, **kwargs): post_process_group.delay(group=group, **kwargs)
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: culprit = generate_culprit(data, platform=platform) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event(project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) if environment: tags.append(('environment', environment)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) event_user = self._get_event_user(project, data) if event_user: tags.append(('sentry:user', event_user.tag_value)) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] # Get rid of ephemeral interface data for interface_class, _ in iter_interfaces(): interface = interface_class() if interface.ephemeral: data.pop(interface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, basestring): message = force_text(message) for value in event_metadata.itervalues(): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'data': { 'last_received': event.data.get('received') or float(event.datetime.strftime('%s')), 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id, exc_info=True) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id, exc_info=True) return event index_event_tags.delay( project_id=project.id, group_id=group.id, event_id=event.id, tags=tags, ) if event_user: tsdb.record_multi(( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime) if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info( 'Raw event passed; skipping post process for event_id=%s', event_id) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags data = self.data project = Project.objects.get_from_cache(id=project) # Check to make sure we're not about to do a bunch of work that's # already been done if we've processed an event with this ID. (This # isn't a perfect solution -- this doesn't handle ``EventMapping`` and # there's a race condition between here and when the event is actually # saved, but it's an improvement. See GH-7677.) try: event = Event.objects.get( project_id=project.id, event_id=data['event_id'], ) except Event.DoesNotExist: pass else: self.logger.info('duplicate.found', exc_info=True, extra={ 'event_uuid': data['event_id'], 'project_id': project.id, 'model': Event.__name__, }) return event # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('transaction', None) if not culprit: culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) dist = data.pop('dist', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: # if we generate an implicit culprit, lets not call it a # transaction transaction_name = None culprit = generate_culprit(data, platform=platform) else: transaction_name = culprit culprit = force_text(culprit) recorded_timestamp = data.pop('timestamp') date = datetime.fromtimestamp(recorded_timestamp) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event(project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs) event._project_cache = project # convert this to a dict to ensure we're only storing one value per key # as most parts of Sentry dont currently play well with multiple values tags = dict(data.get('tags') or []) tags['level'] = LOG_LEVELS[level] if logger_name: tags['logger'] = logger_name if server_name: tags['server_name'] = server_name if site: tags['site'] = site if environment: tags['environment'] = environment if transaction_name: tags['transaction'] = transaction_name if release: # dont allow a conflicting 'release' tag if 'release' in tags: del tags['release'] release = Release.get_or_create( project=project, version=release, date_added=date, ) tags['sentry:release'] = release.version if dist and release: dist = release.add_dist(dist, date) tags['sentry:dist'] = dist.name else: dist = None event_user = self._get_event_user(project, data) if event_user: # dont allow a conflicting 'user' tag if 'user' in tags: del tags['user'] tags['sentry:user'] = event_user.tag_value # At this point we want to normalize the in_app values in case the # clients did not set this appropriately so far. normalize_in_app(data) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: # plugins should not override user provided tags for key, value in added_tags: tags.setdefault(key, value) for path, iface in six.iteritems(event.interfaces): for k, v in iface.iter_tags(): tags[k] = v # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # tags are stored as a tuple tags = tags.items() data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = [ md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint) ] elif checksum: if HASH_RE.match(checksum): hashes = [checksum] else: hashes = [md5_from_hash([checksum]), checksum] data['checksum'] = checksum else: hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)] # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, six.string_types): message = force_text(message) for value in six.itervalues(event_metadata): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) if culprit and culprit not in message: culprit_u = force_text(culprit, errors='replace') message = u'{} {}'.format(message, culprit_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message received_timestamp = event.data.get('received') or float( event.datetime.strftime('%s')) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'active_at': date, 'data': { 'last_received': received_timestamp, 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, }) if release: group_kwargs['first_release'] = release try: group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs) except HashDiscarded: event_discarded.send_robust( project=project, sender=EventManager, ) metrics.incr( 'events.discarded', skip_internal=True, tags={ 'organization_id': project.organization_id, 'platform': platform, }, ) raise else: event_saved.send_robust( project=project, sender=EventManager, ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) # When an event was sampled, the canonical source of truth # is the EventMapping table since we aren't going to be writing out an actual # Event row. Otherwise, if the Event isn't being sampled, we can safely # rely on the Event table itself as the source of truth and ignore # EventMapping since it's redundant information. if is_sample: try: with transaction.atomic( using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': EventMapping.__name__, }) return event environment = Environment.get_or_create( project=project, name=environment, ) group_environment, is_new_group_environment = GroupEnvironment.get_or_create( group_id=group.id, environment_id=environment.id, defaults={ 'first_release_id': release.id if release else None, }, ) if release: ReleaseEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) ReleaseProjectEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) counters = [ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ] if release: counters.append((tsdb.models.release, release.id)) tsdb.incr_multi(counters, timestamp=event.datetime, environment_id=environment.id) frequencies = [ # (tsdb.models.frequent_projects_by_organization, { # project.organization_id: { # project.id: 1, # }, # }), # (tsdb.models.frequent_issues_by_project, { # project.id: { # group.id: 1, # }, # }) (tsdb.models.frequent_environments_by_group, { group.id: { environment.id: 1, }, }) ] if release: frequencies.append((tsdb.models.frequent_releases_by_group, { group.id: { grouprelease.id: 1, }, })) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update( group=group, environment=environment, ) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info('duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': Event.__name__, }) return event index_event_tags.delay( organization_id=project.organization_id, project_id=project.id, group_id=group.id, environment_id=environment.id, event_id=event.id, tags=tags, date_added=event.datetime, ) if event_user: tsdb.record_multi( ( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime, environment_id=environment.id, ) if release: if is_new: buffer.incr(ReleaseProject, {'new_groups': 1}, { 'release_id': release.id, 'project_id': project.id, }) if is_new_group_environment: buffer.incr(ReleaseProjectEnvironment, {'new_issues_count': 1}, { 'project_id': project.id, 'release_id': release.id, 'environment_id': environment.id, }) safe_execute(Group.objects.add_tags, group, environment, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, is_new_group_environment=is_new_group_environment, primary_hash=hashes[0], ) else: self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) metrics.timing( 'events.latency', received_timestamp - recorded_timestamp, tags={ 'project_id': project.id, }, ) return event
def run_post_process_forwarder(self, consumer_group, commit_log_topic, synchronize_commit_group, commit_batch_size=100, initial_offset_reset='latest'): logger.debug('Starting post-process forwarder...') consumer = SynchronizedConsumer( bootstrap_servers=self.producer_configuration['bootstrap.servers'], consumer_group=consumer_group, commit_log_topic=commit_log_topic, synchronize_commit_group=synchronize_commit_group, initial_offset_reset=initial_offset_reset, ) owned_partition_offsets = {} def commit(partitions): results = consumer.commit(offsets=partitions, asynchronous=False) errors = filter(lambda i: i.error is not None, results) if errors: raise Exception('Failed to commit %s/%s partitions: %r' % (len(errors), len(partitions), errors)) return results def on_assign(consumer, partitions): logger.debug('Received partition assignment: %r', partitions) for i in partitions: if i.offset == OFFSET_INVALID: updated_offset = None elif i.offset < 0: raise Exception( 'Received unexpected negative offset during partition assignment: %r' % (i, )) else: updated_offset = i.offset key = (i.topic, i.partition) previous_offset = owned_partition_offsets.get(key, None) if previous_offset is not None and previous_offset != updated_offset: logger.warning( 'Received new offset for owned partition %r, will overwrite previous stored offset %r with %r.', key, previous_offset, updated_offset) owned_partition_offsets[key] = updated_offset def on_revoke(consumer, partitions): logger.debug('Revoked partition assignment: %r', partitions) offsets_to_commit = [] for i in partitions: key = (i.topic, i.partition) try: offset = owned_partition_offsets.pop(key) except KeyError: logger.warning( 'Received unexpected partition revocation for unowned partition: %r', i, exc_info=True) continue if offset is None: logger.debug( 'Skipping commit of unprocessed partition: %r', i) continue offsets_to_commit.append( TopicPartition(i.topic, i.partition, offset)) if offsets_to_commit: logger.debug( 'Committing offset(s) for %s revoked partition(s): %r', len(offsets_to_commit), offsets_to_commit) commit(offsets_to_commit) consumer.subscribe( [self.publish_topic], on_assign=on_assign, on_revoke=on_revoke, ) def commit_offsets(): offsets_to_commit = [] for (topic, partition), offset in owned_partition_offsets.items(): if offset is None: logger.debug( 'Skipping commit of unprocessed partition: %r', (topic, partition)) continue offsets_to_commit.append( TopicPartition(topic, partition, offset)) if offsets_to_commit: logger.debug( 'Committing offset(s) for %s owned partition(s): %r', len(offsets_to_commit), offsets_to_commit) commit(offsets_to_commit) try: i = 0 while True: message = consumer.poll(0.1) if message is None: continue error = message.error() if error is not None: raise Exception(error) key = (message.topic(), message.partition()) if key not in owned_partition_offsets: logger.warning( 'Skipping message for unowned partition: %r', key) continue i = i + 1 owned_partition_offsets[key] = message.offset() + 1 task_kwargs = get_task_kwargs_for_message(message.value()) if task_kwargs is not None: post_process_group.delay(**task_kwargs) if i % commit_batch_size == 0: commit_offsets() except KeyboardInterrupt: pass logger.debug('Committing offsets and closing consumer...') commit_offsets() consumer.close()
def save(self, project, raw=False): from sentry.tasks.post_process import index_event_tags project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') level = data.pop('level') culprit = data.pop('transaction', None) if not culprit: culprit = data.pop('culprit', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) dist = data.pop('dist', None) environment = data.pop('environment', None) # unused time_spent = data.pop('time_spent', None) message = data.pop('message', '') if not culprit: # if we generate an implicit culprit, lets not call it a # transaction transaction_name = None culprit = generate_culprit(data, platform=platform) else: transaction_name = culprit date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'platform': platform, } event = Event( project_id=project.id, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) event._project_cache = project # convert this to a dict to ensure we're only storing one value per key # as most parts of Sentry dont currently play well with multiple values tags = dict(data.get('tags') or []) tags['level'] = LOG_LEVELS[level] if logger_name: tags['logger'] = logger_name if server_name: tags['server_name'] = server_name if site: tags['site'] = site if environment: tags['environment'] = environment if transaction_name: tags['transaction'] = transaction_name if release: # dont allow a conflicting 'release' tag if 'release' in tags: del tags['release'] release = Release.get_or_create( project=project, version=release, date_added=date, ) tags['sentry:release'] = release.version if dist and release: dist = release.add_dist(dist, date) tags['sentry:dist'] = dist.name else: dist = None event_user = self._get_event_user(project, data) if event_user: # dont allow a conflicting 'user' tag if 'user' in tags: del tags['user'] tags['sentry:user'] = event_user.tag_value # At this point we want to normalize the in_app values in case the # clients did not set this appropriately so far. normalize_in_app(data) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: # plugins should not override user provided tags for key, value in added_tags: tags.setdefault(key, value) # tags are stored as a tuple tags = tags.items() # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags data['fingerprint'] = fingerprint or ['{{ default }}'] for path, iface in six.iteritems(event.interfaces): data['tags'].extend(iface.iter_tags()) # Get rid of ephemeral interface data if iface.ephemeral: data.pop(iface.get_path(), None) # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = [md5_from_hash(h) for h in get_hashes_from_fingerprint(event, fingerprint)] elif checksum: hashes = [checksum] data['checksum'] = checksum else: hashes = [md5_from_hash(h) for h in get_hashes_for_event(event)] # TODO(dcramer): temp workaround for complexity data['message'] = message event_type = eventtypes.get(data.get('type', 'default'))(data) event_metadata = event_type.get_metadata() # TODO(dcramer): temp workaround for complexity del data['message'] data['type'] = event_type.key data['metadata'] = event_metadata # index components into ``Event.message`` # See GH-3248 if event_type.key != 'default': if 'sentry.interfaces.Message' in data and \ data['sentry.interfaces.Message']['message'] != message: message = u'{} {}'.format( message, data['sentry.interfaces.Message']['message'], ) if not message: message = '' elif not isinstance(message, six.string_types): message = force_text(message) for value in six.itervalues(event_metadata): value_u = force_text(value, errors='replace') if value_u not in message: message = u'{} {}'.format(message, value_u) if culprit and culprit not in message: culprit_u = force_text(culprit, errors='replace') message = u'{} {}'.format(message, culprit_u) message = trim(message.strip(), settings.SENTRY_MAX_MESSAGE_LENGTH) event.message = message kwargs['message'] = message group_kwargs = kwargs.copy() group_kwargs.update( { 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'active_at': date, 'data': { 'last_received': event.data.get('received') or float(event.datetime.strftime('%s')), 'type': event_type.key, # we cache the events metadata on the group to ensure its # accessible in the stream 'metadata': event_metadata, }, } ) if release: group_kwargs['first_release'] = release group, is_new, is_regression, is_sample = self._save_aggregate( event=event, hashes=hashes, release=release, **group_kwargs ) event.group = group # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(using=router.db_for_write(EventMapping)): EventMapping.objects.create(project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info( 'duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': EventMapping.__name__, } ) return event environment = Environment.get_or_create( project=project, name=environment, ) if release: ReleaseEnvironment.get_or_create( project=project, release=release, environment=environment, datetime=date, ) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=environment, datetime=date, ) counters = [ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ] if release: counters.append((tsdb.models.release, release.id)) tsdb.incr_multi(counters, timestamp=event.datetime) frequencies = [ # (tsdb.models.frequent_projects_by_organization, { # project.organization_id: { # project.id: 1, # }, # }), # (tsdb.models.frequent_issues_by_project, { # project.id: { # group.id: 1, # }, # }) (tsdb.models.frequent_environments_by_group, { group.id: { environment.id: 1, }, }) ] if release: frequencies.append( (tsdb.models.frequent_releases_by_group, { group.id: { grouprelease.id: 1, }, }) ) tsdb.record_frequency_multi(frequencies, timestamp=event.datetime) UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(using=router.db_for_write(Event)): event.save() except IntegrityError: self.logger.info( 'duplicate.found', exc_info=True, extra={ 'event_uuid': event_id, 'project_id': project.id, 'group_id': group.id, 'model': Event.__name__, } ) return event index_event_tags.delay( organization_id=project.organization_id, project_id=project.id, group_id=group.id, event_id=event.id, tags=tags, ) if event_user: tsdb.record_multi( ( (tsdb.models.users_affected_by_group, group.id, (event_user.tag_value, )), (tsdb.models.users_affected_by_project, project.id, (event_user.tag_value, )), ), timestamp=event.datetime ) if is_new and release: buffer.incr( ReleaseProject, {'new_groups': 1}, { 'release_id': release.id, 'project_id': project.id, } ) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: if not project.first_event: project.update(first_event=date) first_event_received.send(project=project, group=group, sender=Project) post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('post_process.skip.raw_event', extra={'event_id': event.id}) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event