def validate_and_normalize(report, client_ip=None): manager = EventManager(report, client_ip=client_ip) manager.normalize() return manager.get_data()
def test_marks_as_unresolved_with_new_release_with_integration( self, plugin_is_regression, mock_send_activity_notifications_delay, mock_sync_status_outbound, ): plugin_is_regression.return_value = True old_release = Release.objects.create( version="a", organization_id=self.project.organization_id, date_added=timezone.now() - timedelta(minutes=30), ) old_release.add_project(self.project) manager = EventManager( make_event( event_id="a" * 32, checksum="a" * 32, timestamp=time() - 50000, # need to work around active_at release=old_release.version, )) event = manager.save(1) group = event.group org = group.organization integration = Integration.objects.create(provider="example", name="Example") integration.add_organization(org, self.user) OrganizationIntegration.objects.filter( integration_id=integration.id, organization_id=group.organization.id).update( config={ "sync_comments": True, "sync_status_outbound": True, "sync_status_inbound": True, "sync_assignee_outbound": True, "sync_assignee_inbound": True, }) external_issue = ExternalIssue.objects.get_or_create( organization_id=org.id, integration_id=integration.id, key="APP-%s" % group.id)[0] GroupLink.objects.get_or_create( group_id=group.id, project_id=group.project_id, linked_type=GroupLink.LinkedType.issue, linked_id=external_issue.id, relationship=GroupLink.Relationship.references, )[0] group.update(status=GroupStatus.RESOLVED) resolution = GroupResolution.objects.create(release=old_release, group=group) activity = Activity.objects.create( group=group, project=group.project, type=Activity.SET_RESOLVED_IN_RELEASE, ident=resolution.id, data={"version": ""}, ) manager = EventManager( make_event(event_id="b" * 32, checksum="a" * 32, timestamp=time(), release=old_release.version)) with self.tasks(): with self.feature({"organizations:integrations-issue-sync": True}): event = manager.save(1) assert event.group_id == group.id group = Group.objects.get(id=group.id) assert group.status == GroupStatus.RESOLVED activity = Activity.objects.get(id=activity.id) assert activity.data["version"] == "" assert GroupResolution.objects.filter(group=group).exists() manager = EventManager( make_event(event_id="c" * 32, checksum="a" * 32, timestamp=time(), release="b")) event = manager.save(1) mock_sync_status_outbound.assert_called_once_with( external_issue, False, event.group.project_id) assert event.group_id == group.id group = Group.objects.get(id=group.id) assert group.status == GroupStatus.UNRESOLVED activity = Activity.objects.get(id=activity.id) assert activity.data["version"] == "b" assert not GroupResolution.objects.filter(group=group).exists() activity = Activity.objects.get(group=group, type=Activity.SET_REGRESSION) mock_send_activity_notifications_delay.assert_called_once_with( activity.id)
def test_transaction_as_culprit(self): manager = EventManager(make_event(transaction="foobar")) manager.normalize() event = manager.save(1) assert event.transaction == "foobar" assert event.culprit == "foobar"
def test_event_user(self): manager = EventManager(make_event( event_id='a', environment='totally unique environment', **{'sentry.interfaces.User': { 'id': '1', }} )) manager.normalize() with self.tasks(): event = manager.save(self.project.id) environment_id = Environment.get_for_organization_id( event.project.organization_id, 'totally unique environment', ).id assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id, ), event.datetime, event.datetime, ) == { event.group.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id, ), event.datetime, event.datetime, ) == { event.project.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_group, (event.group.id, ), event.datetime, event.datetime, environment_id=environment_id, ) == { event.group.id: 1, } assert tsdb.get_distinct_counts_totals( tsdb.models.users_affected_by_project, (event.project.id, ), event.datetime, event.datetime, environment_id=environment_id, ) == { event.project.id: 1, } euser = EventUser.objects.get( project_id=self.project.id, ident='1', ) assert event.get_tag('sentry:user') == euser.tag_value # ensure event user is mapped to tags in second attempt manager = EventManager( make_event( event_id='b', **{'sentry.interfaces.User': { 'id': '1', 'name': 'jane', }} ) ) manager.normalize() with self.tasks(): event = manager.save(self.project.id) euser = EventUser.objects.get(id=euser.id) assert event.get_tag('sentry:user') == euser.tag_value assert euser.name == 'jane' assert euser.ident == '1'
def from_kwargs(self, project, **kwargs): from sentry.event_manager import EventManager manager = EventManager(kwargs) manager.normalize() return manager.save(project)
def validate_and_normalize(data): manager = EventManager(data) manager.normalize() return manager.get_data()
def make_release_event(self, release_name, project_id): manager = EventManager(make_event(release=release_name)) manager.normalize() event = manager.save(project_id) return event
def test_marks_as_unresolved_only_with_new_release(self, plugin_is_regression): plugin_is_regression.return_value = True old_release = Release.objects.create( version='a', project=self.project, date_added=timezone.now() - timedelta(minutes=30), ) manager = EventManager(self.make_event( event_id='a' * 32, checksum='a' * 32, timestamp=time() - 50000, # need to work around active_at release=old_release.version, )) event = manager.save(1) group = event.group group.update(status=GroupStatus.RESOLVED) resolution = GroupResolution.objects.create( release=old_release, group=group, ) activity = Activity.objects.create( group=group, project=group.project, type=Activity.SET_RESOLVED_IN_RELEASE, ident=resolution.id, data={'version': ''}, ) manager = EventManager(self.make_event( event_id='b' * 32, checksum='a' * 32, timestamp=time(), release=old_release.version, )) event = manager.save(1) assert event.group_id == group.id group = Group.objects.get(id=group.id) assert group.status == GroupStatus.RESOLVED activity = Activity.objects.get(id=activity.id) assert activity.data['version'] == '' assert GroupResolution.objects.filter(group=group).exists() manager = EventManager(self.make_event( event_id='c' * 32, checksum='a' * 32, timestamp=time(), release='b', )) event = manager.save(1) assert event.group_id == group.id group = Group.objects.get(id=group.id) assert group.status == GroupStatus.UNRESOLVED activity = Activity.objects.get(id=activity.id) assert activity.data['version'] == 'b' assert not GroupResolution.objects.filter(group=group).exists() assert Activity.objects.filter( group=group, type=Activity.SET_REGRESSION, ).exists()
def test_long_message(self): manager = EventManager(self.make_event( message='x' * (settings.SENTRY_MAX_MESSAGE_LENGTH + 1), )) data = manager.normalize() assert len(data['message']) == settings.SENTRY_MAX_MESSAGE_LENGTH
def alert(request): platform = request.GET.get("platform", "python") org = Organization(id=1, slug="example", name="Example") project = Project(id=1, slug="example", name="Example", organization=org) random = get_random(request) group = next(make_group_generator(random, project)) data = dict(load_data(platform)) data["message"] = group.message data["event_id"] = "44f1419e73884cd2b45c79918f4b6dc4" data.pop("logentry", None) data["environment"] = "prod" data["tags"] = [ ("logger", "javascript"), ("environment", "prod"), ("level", "error"), ("device", "Other"), ] event_manager = EventManager(data) event_manager.normalize() data = event_manager.get_data() event = event_manager.save(project.id) # Prevent CI screenshot from constantly changing event.data["timestamp"] = 1504656000.0 # datetime(2017, 9, 6, 0, 0) event_type = get_event_type(event.data) group.message = event.search_message group.data = { "type": event_type.key, "metadata": event_type.get_metadata(data) } rule = Rule(label="An example rule") # XXX: this interface_list code needs to be the same as in # src/sentry/mail/adapter.py interface_list = [] for interface in six.itervalues(event.interfaces): body = interface.to_email_html(event) if not body: continue text_body = interface.to_string(event) interface_list.append( (interface.get_title(), mark_safe(body), text_body)) return MailPreview( html_template="sentry/emails/error.html", text_template="sentry/emails/error.txt", context={ "rule": rule, "group": group, "event": event, "link": "http://example.com/link", "interfaces": interface_list, "tags": event.tags, "project_label": project.slug, "commits": [{ # TODO(dcramer): change to use serializer "repository": { "status": "active", "name": "Example Repo", "url": "https://github.com/example/example", "dateCreated": "2018-02-28T23:39:22.402Z", "provider": { "id": "github", "name": "GitHub" }, "id": "1", }, "score": 2, "subject": "feat: Do something to raven/base.py", "message": "feat: Do something to raven/base.py\naptent vivamus vehicula tempus volutpat hac tortor", "id": "1b17483ffc4a10609e7921ee21a8567bfe0ed006", "shortId": "1b17483", "author": { "username": "******", "isManaged": False, "lastActive": "2018-03-01T18:25:28.149Z", "id": "1", "isActive": True, "has2fa": False, "name": "*****@*****.**", "avatarUrl": "https://secure.gravatar.com/avatar/51567a4f786cd8a2c41c513b592de9f9?s=32&d=mm", "dateJoined": "2018-02-27T22:04:32.847Z", "emails": [{ "is_verified": False, "id": "1", "email": "*****@*****.**" }], "avatar": { "avatarUuid": None, "avatarType": "letter_avatar" }, "lastLogin": "******", "email": "*****@*****.**", }, }], }, ).render(request)
def digest(request): random = get_random(request) # TODO: Refactor all of these into something more manageable. org = Organization(id=1, slug="example", name="Example Organization") project = Project(id=1, slug="example", name="Example Project", organization=org) rules = { i: Rule(id=i, project=project, label="Rule #%s" % (i, )) for i in range(1, random.randint(2, 4)) } state = { "project": project, "groups": {}, "rules": rules, "event_counts": {}, "user_counts": {}, } records = [] group_generator = make_group_generator(random, project) for i in range(random.randint(1, 30)): group = next(group_generator) state["groups"][group.id] = group offset = timedelta(seconds=0) for i in range(random.randint(1, 10)): offset += timedelta(seconds=random.random() * 120) data = dict(load_data("python")) data["message"] = group.message data.pop("logentry", None) event_manager = EventManager(data) event_manager.normalize() data = event_manager.get_data() data["timestamp"] = random.randint(to_timestamp(group.first_seen), to_timestamp(group.last_seen)) event = eventstore.create_event(event_id=uuid.uuid4().hex, group_id=group.id, project_id=project.id, data=data.data) records.append( Record( event.event_id, Notification( event, random.sample(list(state["rules"].keys()), random.randint(1, len(state["rules"]))), ), to_timestamp(event.datetime), )) state["event_counts"][group.id] = random.randint(10, 1e4) state["user_counts"][group.id] = random.randint(10, 1e4) digest = build_digest(project, records, state) start, end, counts = get_digest_metadata(digest) context = { "project": project, "counts": counts, "digest": digest, "start": start, "end": end, "referrer": "digest_email", } add_unsubscribe_link(context) return MailPreview( html_template="sentry/emails/digests/body.html", text_template="sentry/emails/digests/body.txt", context=context, ).render(request)
def _do_save_event( cache_key=None, data=None, start_time=None, event_id=None, project_id=None, **kwargs ): """ Saves an event to the database. """ set_current_project(project_id) from sentry.event_manager import EventManager, HashDiscarded event_type = "none" if cache_key and data is None: with metrics.timer("tasks.store.do_save_event.get_cache") as metric_tags: data = event_processing_store.get(cache_key) if data is not None: metric_tags["event_type"] = event_type = data.get("type") or "none" with metrics.global_tags(event_type=event_type): if data is not None: data = CanonicalKeyDict(data) if event_id is None and data is not None: event_id = data["event_id"] # only when we come from reprocessing we get a project_id sent into # the task. if project_id is None: project_id = data.pop("project") set_current_project(project_id) # We only need to delete raw events for events that support # reprocessing. If the data cannot be found we want to assume # that we need to delete the raw event. if not data or reprocessing.event_supports_reprocessing(data): with metrics.timer("tasks.store.do_save_event.delete_raw_event"): delete_raw_event(project_id, event_id, allow_hint_clear=True) # This covers two cases: where data is None because we did not manage # to fetch it from the default cache or the empty dictionary was # stored in the default cache. The former happens if the event # expired while being on the queue, the second happens on reprocessing # if the raw event was deleted concurrently while we held on to # it. This causes the node store to delete the data and we end up # fetching an empty dict. We could in theory not invoke `save_event` # in those cases but it's important that we always clean up the # reprocessing reports correctly or they will screw up the UI. So # to future proof this correctly we just handle this case here. if not data: metrics.incr( "events.failed", tags={"reason": "cache", "stage": "post"}, skip_internal=False ) return try: with metrics.timer("tasks.store.do_save_event.event_manager.save"): manager = EventManager(data) # event.project.organization is populated after this statement. manager.save( project_id, assume_normalized=True, start_time=start_time, cache_key=cache_key ) except HashDiscarded: pass finally: if cache_key: with metrics.timer("tasks.store.do_save_event.delete_cache"): event_processing_store.delete_by_key(cache_key) with metrics.timer("tasks.store.do_save_event.delete_attachment_cache"): attachment_cache.delete(cache_key) if start_time: metrics.timing( "events.time-to-process", time() - start_time, instance=data["platform"] )
def process(self, request, project, key, auth, helper, data, attachments=None, **kwargs): metrics.incr('events.total') if not data: raise APIError('No JSON data was found') remote_addr = request.META['REMOTE_ADDR'] event_manager = EventManager( data, project=project, key=key, auth=auth, client_ip=remote_addr, user_agent=helper.context.agent, version=auth.version, content_encoding=request.META.get('HTTP_CONTENT_ENCODING', ''), ) del data self.pre_normalize(event_manager, helper) event_manager.normalize() agent = request.META.get('HTTP_USER_AGENT') # TODO: Some form of coordination between the Kafka consumer # and this method (the 'relay') to decide whether a 429 should # be returned here. # Everything before this will eventually be done in the relay. if (kafka_publisher is not None and not attachments and random.random() < options.get('store.kafka-sample-rate')): process_in_kafka = options.get('store.process-in-kafka') try: kafka_publisher.publish( channel=getattr(settings, 'KAFKA_EVENTS_PUBLISHER_TOPIC', 'store-events'), # Relay will (eventually) need to produce a Kafka message # with this JSON format. value=json.dumps({ 'data': event_manager.get_data(), 'project_id': project.id, 'auth': { 'sentry_client': auth.client, 'sentry_version': auth.version, 'sentry_secret': auth.secret_key, 'sentry_key': auth.public_key, 'is_public': auth.is_public, }, 'remote_addr': remote_addr, 'agent': agent, # Whether or not the Kafka consumer is in charge # of actually processing this event. 'should_process': process_in_kafka, })) except Exception as e: logger.exception("Cannot publish event to Kafka: {}".format( e.message)) else: if process_in_kafka: # This event will be processed by the Kafka consumer, so we # shouldn't double process it here. return event_manager.get_data()['event_id'] # Everything after this will eventually be done in a Kafka consumer. return process_event(event_manager, project, key, remote_addr, helper, attachments)
def send(self, **kwargs): # TODO(dcramer): this should respect rate limits/etc and use the normal # pipeline # Report the issue to an upstream Sentry if active # NOTE: we don't want to check self.is_enabled() like normal, since # is_enabled behavior is overridden in this class. We explicitly # want to check if the remote is active. if self.remote.is_active(): from sentry import options # Append some extra tags that are useful for remote reporting super_kwargs = copy.deepcopy(kwargs) super_kwargs['tags']['install-id'] = options.get('sentry:install-id') super(SentryInternalClient, self).send(**super_kwargs) if not is_current_event_safe(): return from sentry.app import tsdb from sentry.coreapi import ClientApiHelper from sentry.event_manager import EventManager from sentry.models import Project helper = ClientApiHelper( agent='raven-python/%s (sentry %s)' % (raven.VERSION, sentry.VERSION), project_id=settings.SENTRY_PROJECT, version=self.protocol_version, ) try: project = Project.objects.get_from_cache(id=settings.SENTRY_PROJECT) except DatabaseError: self.error_logger.error('Unable to fetch internal project', exc_info=True) return except Project.DoesNotExist: self.error_logger.error('Internal project (id=%s) does not exist', settings.SENTRY_PROJECT) return except Exception: self.error_logger.error( 'Unable to fetch internal project for some unknown reason', exc_info=True) return helper.context.bind_project(project) metrics.incr('events.total') kwargs['project'] = project.id try: manager = EventManager(kwargs) data = manager.normalize() tsdb.incr_multi([ (tsdb.models.project_total_received, project.id), (tsdb.models.organization_total_received, project.organization_id), ]) helper.insert_data_to_database(data) except Exception as e: if self.raise_send_errors: raise self.error_logger.error( 'Unable to record event: %s\nEvent was: %r', e, kwargs['message'], exc_info=True)
def test_default_version(self): manager = EventManager(self.make_event()) data = manager.normalize() assert data['version'] == '5'
def test_bad_logger(self): manager = EventManager(self.make_event(logger='foo bar')) data = manager.normalize() assert data['logger'] == DEFAULT_LOGGER_NAME
def test_explicit_version(self): manager = EventManager(self.make_event(), '6') data = manager.normalize() assert data['version'] == '6'
def alert(request): platform = request.GET.get('platform', 'python') org = Organization( id=1, slug='example', name='Example', ) project = Project( id=1, slug='example', name='Example', organization=org, ) random = get_random(request) group = next(make_group_generator(random, project), ) data = dict(load_data(platform)) data['message'] = group.message data['event_id'] = '44f1419e73884cd2b45c79918f4b6dc4' data.pop('logentry', None) data['environment'] = 'prod' data['tags'] = [('logger', 'javascript'), ('environment', 'prod'), ('level', 'error'), ('device', 'Other')] event_manager = EventManager(data) event_manager.normalize() data = event_manager.get_data() event = event_manager.save(project.id) # Prevent Percy screenshot from constantly changing event.datetime = datetime(2017, 9, 6, 0, 0) event.save() event_type = event_manager.get_event_type() group.message = event_manager.get_search_message() group.data = { 'type': event_type.key, 'metadata': event_type.get_metadata(data), } rule = Rule(label="An example rule") interface_list = [] for interface in six.itervalues(event.interfaces): body = interface.to_email_html(event) if not body: continue interface_list.append((interface.get_title(), mark_safe(body))) return MailPreview( html_template='sentry/emails/error.html', text_template='sentry/emails/error.txt', context={ 'rule': rule, 'group': group, 'event': event, 'link': 'http://example.com/link', 'interfaces': interface_list, 'tags': event.get_tags(), 'project_label': project.slug, 'commits': [{ # TODO(dcramer): change to use serializer "repository": { "status": "active", "name": "Example Repo", "url": "https://github.com/example/example", "dateCreated": "2018-02-28T23:39:22.402Z", "provider": { "id": "github", "name": "GitHub" }, "id": "1" }, "score": 2, "subject": "feat: Do something to raven/base.py", "message": "feat: Do something to raven/base.py\naptent vivamus vehicula tempus volutpat hac tortor", "id": "1b17483ffc4a10609e7921ee21a8567bfe0ed006", "shortId": "1b17483", "author": { "username": "******", "isManaged": False, "lastActive": "2018-03-01T18:25:28.149Z", "id": "1", "isActive": True, "has2fa": False, "name": "*****@*****.**", "avatarUrl": "https://secure.gravatar.com/avatar/51567a4f786cd8a2c41c513b592de9f9?s=32&d=mm", "dateJoined": "2018-02-27T22:04:32.847Z", "emails": [{ "is_verified": False, "id": "1", "email": "*****@*****.**" }], "avatar": { "avatarUuid": None, "avatarType": "letter_avatar" }, "lastLogin": "******", "email": "*****@*****.**" } }], }, ).render(request)
def _do_save_event(cache_key=None, data=None, start_time=None, event_id=None, project_id=None, **kwargs): """ Saves an event to the database. """ from sentry.event_manager import HashDiscarded, EventManager from sentry import quotas from sentry.models import ProjectKey from sentry.utils.outcomes import Outcome, track_outcome from sentry.ingest.outcomes_consumer import mark_signal_sent event_type = "none" if cache_key and data is None: with metrics.timer( "tasks.store.do_save_event.get_cache") as metric_tags: data = default_cache.get(cache_key) if data is not None: metric_tags["event_type"] = event_type = data.get( "type") or "none" with metrics.global_tags(event_type=event_type): if data is not None: data = CanonicalKeyDict(data) if event_id is None and data is not None: event_id = data["event_id"] # only when we come from reprocessing we get a project_id sent into # the task. if project_id is None: project_id = data.pop("project") key_id = None if data is None else data.get("key_id") if key_id is not None: key_id = int(key_id) timestamp = to_datetime(start_time) if start_time is not None else None # We only need to delete raw events for events that support # reprocessing. If the data cannot be found we want to assume # that we need to delete the raw event. if not data or reprocessing.event_supports_reprocessing(data): with metrics.timer("tasks.store.do_save_event.delete_raw_event"): delete_raw_event(project_id, event_id, allow_hint_clear=True) # This covers two cases: where data is None because we did not manage # to fetch it from the default cache or the empty dictionary was # stored in the default cache. The former happens if the event # expired while being on the queue, the second happens on reprocessing # if the raw event was deleted concurrently while we held on to # it. This causes the node store to delete the data and we end up # fetching an empty dict. We could in theory not invoke `save_event` # in those cases but it's important that we always clean up the # reprocessing reports correctly or they will screw up the UI. So # to future proof this correctly we just handle this case here. if not data: metrics.incr("events.failed", tags={ "reason": "cache", "stage": "post" }, skip_internal=False) return with configure_scope() as scope: scope.set_tag("project", project_id) event = None try: with metrics.timer("tasks.store.do_save_event.event_manager.save"): manager = EventManager(data) # event.project.organization is populated after this statement. event = manager.save(project_id, assume_normalized=True, cache_key=cache_key) with metrics.timer("tasks.store.do_save_event.track_outcome"): # This is where we can finally say that we have accepted the event. track_outcome( event.project.organization_id, event.project.id, key_id, Outcome.ACCEPTED, None, timestamp, event_id, ) except HashDiscarded: project = Project.objects.get_from_cache(id=project_id) reason = FilterStatKeys.DISCARDED_HASH project_key = None try: if key_id is not None: project_key = ProjectKey.objects.get_from_cache(id=key_id) except ProjectKey.DoesNotExist: pass quotas.refund(project, key=project_key, timestamp=start_time) # There is no signal supposed to be sent for this particular # outcome-reason combination. Prevent the outcome consumer from # emitting it for now. # # XXX(markus): Revisit decision about signals once outcomes consumer is stable. mark_signal_sent(project_id, event_id) track_outcome( project.organization_id, project_id, key_id, Outcome.FILTERED, reason, timestamp, event_id, ) finally: if cache_key: with metrics.timer("tasks.store.do_save_event.delete_cache"): default_cache.delete(cache_key) with metrics.timer( "tasks.store.do_save_event.delete_attachment_cache"): # For the unlikely case that we did not manage to persist the # event we also delete the key always. if event is None or features.has( "organizations:event-attachments", event.project.organization, actor=None): attachment_cache.delete(cache_key) if start_time: metrics.timing("events.time-to-process", time() - start_time, instance=data["platform"])
def process(self, request, project, auth, data, **kwargs): event_received.send_robust(ip=request.META['REMOTE_ADDR'], sender=type(self)) # TODO: improve this API (e.g. make RateLimit act on __ne__) rate_limit = safe_execute(app.quotas.is_rate_limited, project=project) if isinstance(rate_limit, bool): rate_limit = RateLimit(is_limited=rate_limit, retry_after=None) if rate_limit is not None and rate_limit.is_limited: raise APIRateLimited(rate_limit.retry_after) result = plugins.first('has_perm', request.user, 'create_event', project) if result is False: raise APIForbidden('Creation of this event was blocked') content_encoding = request.META.get('HTTP_CONTENT_ENCODING', '') if content_encoding == 'gzip': data = decompress_gzip(data) elif content_encoding == 'deflate': data = decompress_deflate(data) elif not data.startswith('{'): data = decode_and_decompress_data(data) data = safely_load_json_string(data) try: # mutates data validate_data(project, data, auth.client) except InvalidData as e: raise APIError(u'Invalid data: %s (%s)' % (six.text_type(e), type(e))) # mutates data manager = EventManager(data, version=auth.version) data = manager.normalize() # insert IP address if not available if auth.is_public: ensure_has_ip(data, request.META['REMOTE_ADDR']) event_id = data['event_id'] # TODO(dcramer): ideally we'd only validate this if the event_id was # supplied by the user cache_key = 'ev:%s:%s' % (project.id, event_id,) if cache.get(cache_key) is not None: logger.warning('Discarded recent duplicate event from project %s/%s (id=%s)', project.team.slug, project.slug, event_id) raise InvalidRequest('An event with the same ID already exists.') # We filter data immediately before it ever gets into the queue inst = SensitiveDataFilter() inst.apply(data) # mutates data (strips a lot of context if not queued) insert_data_to_database(data) cache.set(cache_key, '', 60 * 5) logger.debug('New event from project %s/%s (id=%s)', project.team.slug, project.slug, event_id) return event_id
def test_key_id_remains_in_data(self): manager = EventManager(make_event(key_id=12345)) manager.normalize() assert manager.get_data()['key_id'] == 12345 event = manager.save(1) assert event.data['key_id'] == 12345
def test_tags_as_dict(self): manager = EventManager(self.make_event(tags={'foo': 'bar'})) data = manager.normalize() assert data['tags'] == [('foo', 'bar')]
def test_default_fingerprint(self): manager = EventManager(make_event()) manager.normalize() event = manager.save(self.project.id) assert event.data.get('fingerprint') == ['{{ default }}']
def test_interface_is_relabeled(self): manager = EventManager(self.make_event(user={'id': '1'})) data = manager.normalize() assert data['sentry.interfaces.User'] == {'id': '1'} assert 'user' not in data
def test_marks_as_unresolved_with_new_release( self, plugin_is_regression, mock_send_activity_notifications_delay): plugin_is_regression.return_value = True old_release = Release.objects.create( version="a", organization_id=self.project.organization_id, date_added=timezone.now() - timedelta(minutes=30), ) old_release.add_project(self.project) manager = EventManager( make_event( event_id="a" * 32, checksum="a" * 32, timestamp=time() - 50000, # need to work around active_at release=old_release.version, )) event = manager.save(1) group = event.group group.update(status=GroupStatus.RESOLVED) resolution = GroupResolution.objects.create(release=old_release, group=group) activity = Activity.objects.create( group=group, project=group.project, type=Activity.SET_RESOLVED_IN_RELEASE, ident=resolution.id, data={"version": ""}, ) manager = EventManager( make_event(event_id="b" * 32, checksum="a" * 32, timestamp=time(), release=old_release.version)) event = manager.save(1) assert event.group_id == group.id group = Group.objects.get(id=group.id) assert group.status == GroupStatus.RESOLVED activity = Activity.objects.get(id=activity.id) assert activity.data["version"] == "" assert GroupResolution.objects.filter(group=group).exists() manager = EventManager( make_event(event_id="c" * 32, checksum="a" * 32, timestamp=time(), release="b")) event = manager.save(1) assert event.group_id == group.id group = Group.objects.get(id=group.id) assert group.status == GroupStatus.UNRESOLVED activity = Activity.objects.get(id=activity.id) assert activity.data["version"] == "b" assert not GroupResolution.objects.filter(group=group).exists() activity = Activity.objects.get(group=group, type=Activity.SET_REGRESSION) mock_send_activity_notifications_delay.assert_called_once_with( activity.id)
def test_long_culprit(self): manager = EventManager( self.make_event(culprit='x' * (MAX_CULPRIT_LENGTH + 1), )) data = manager.normalize() assert len(data['culprit']) == MAX_CULPRIT_LENGTH
def test_invalid_transaction(self): dict_input = {"messages": "foo"} manager = EventManager(make_event(transaction=dict_input)) manager.normalize() event = manager.save(1) assert event.transaction is None
def test_long_transaction(self): manager = EventManager( self.make_event(transaction='x' * (MAX_CULPRIT_LENGTH + 1), )) data = manager.normalize() assert len(data['transaction']) == MAX_CULPRIT_LENGTH
def test_culprit_is_not_transaction(self): manager = EventManager(make_event(culprit="foobar")) manager.normalize() event1 = manager.save(1) assert event1.transaction is None assert event1.culprit == "foobar"
def test_inferred_culprit_from_empty_stacktrace(self): manager = EventManager(make_event(stacktrace={"frames": []})) manager.normalize() event = manager.save(1) assert event.culprit == ''