def convert_args(self, request, issue_id, *args, **kwargs): # TODO(tkaemming): Ideally, this would return a 302 response, rather # than just returning the data that is bound to the new group. (It # technically shouldn't be a 301, since the response could change again # as the result of another merge operation that occurs later. This # wouldn't break anything though -- it will just be a "permanent" # redirect to *another* permanent redirect.) This would require # rebuilding the URL in one of two ways: either by hacking it in with # string replacement, or making the endpoint aware of the URL pattern # that caused it to be dispatched, and reversing it with the correct # `issue_id` keyword argument. try: group, _ = get_group_with_redirect( issue_id, queryset=Group.objects.select_related('project', 'project__organization'), ) except Group.DoesNotExist: raise ResourceDoesNotExist self.check_object_permissions(request, group) raven.tags_context({ 'project': group.project_id, 'organization': group.project.organization_id, }) if group.status in EXCLUDED_STATUSES: raise ResourceDoesNotExist request._request.organization = group.project.organization kwargs['group'] = group return (args, kwargs)
def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs): # A CSP report is sent as a POST request with no Origin or Referer # header. What we're left with is a 'document-uri' key which is # inside of the JSON body of the request. This 'document-uri' value # should be treated as an origin check since it refers to the page # that triggered the report. The Content-Type is supposed to be # `application/csp-report`, but FireFox sends it as `application/json`. if request.method != 'POST': return HttpResponseNotAllowed(['POST']) if request.META.get('CONTENT_TYPE') not in self.content_types: raise APIError('Invalid Content-Type') request.user = AnonymousUser() project = self._get_project_from_id(project_id) helper.context.bind_project(project) raven.tags_context(helper.context.get_tags_context()) # This is yanking the auth from the querystring since it's not # in the POST body. This means we expect a `sentry_key` and # `sentry_version` to be set in querystring auth = helper.auth_from_request(request) key = helper.project_key_from_auth(auth) if key.project_id != project.id: raise APIError('Two different projects were specified') helper.context.bind_auth(auth) raven.tags_context(helper.context.get_tags_context()) return super(APIView, self).dispatch( request=request, project=project, auth=auth, helper=helper, key=key, **kwargs )
def index_event_tags(organization_id, project_id, event_id, tags, group_id, environment_id, date_added=None, **kwargs): from sentry import tagstore raven.tags_context({ 'project': project_id, }) create_event_tags_kwargs = {} if date_added is not None: create_event_tags_kwargs['date_added'] = date_added metrics.timing('tagstore.tags_per_event', len(tags), tags={ 'organization_id': organization_id, }) tagstore.create_event_tags(project_id=project_id, group_id=group_id, environment_id=environment_id, event_id=event_id, tags=tags, **create_event_tags_kwargs)
def _do_preprocess_event(cache_key, data, start_time, event_id, process_event): if cache_key: data = default_cache.get(cache_key) if data is None: metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'}) error_logger.error('preprocess.failed.empty', extra={'cache_key': cache_key}) return data = CanonicalKeyDict(data) project = data['project'] raven.tags_context({ 'project': project, }) if should_process(data): process_event.delay(cache_key=cache_key, start_time=start_time, event_id=event_id) return # If we get here, that means the event had no preprocessing needed to be done # so we can jump directly to save_event if cache_key: data = None save_event.delay( cache_key=cache_key, data=data, start_time=start_time, event_id=event_id, project_id=project )
def convert_args(self, request, organization_slug, project_slug, *args, **kwargs): try: project = Project.objects.filter( organization__slug=organization_slug, slug=project_slug, ).select_related('organization').prefetch_related('teams').get() except Project.DoesNotExist: try: # Project may have been renamed redirect = ProjectRedirect.objects.select_related('project') redirect = redirect.get( organization__slug=organization_slug, redirect_slug=project_slug ) raise ResourceMoved(detail={'slug': redirect.project.slug}) except ProjectRedirect.DoesNotExist: raise ResourceDoesNotExist if project.status != ProjectStatus.VISIBLE: raise ResourceDoesNotExist self.check_object_permissions(request, project) raven.tags_context({ 'project': project.id, 'organization': project.organization_id, }) request._request.organization = project.organization kwargs['project'] = project return (args, kwargs)
def convert_args(self, request, issue_id, *args, **kwargs): # TODO(tkaemming): Ideally, this would return a 302 response, rather # than just returning the data that is bound to the new group. (It # technically shouldn't be a 301, since the response could change again # as the result of another merge operation that occurs later. This # wouldn't break anything though -- it will just be a "permanent" # redirect to *another* permanent redirect.) This would require # rebuilding the URL in one of two ways: either by hacking it in with # string replacement, or making the endpoint aware of the URL pattern # that caused it to be dispatched, and reversing it with the correct # `issue_id` keyword argument. try: group, _ = get_group_with_redirect( issue_id, queryset=Group.objects.select_related('project'), ) except Group.DoesNotExist: raise ResourceDoesNotExist self.check_object_permissions(request, group) raven.tags_context({ 'project': group.project_id, 'organization': group.project.organization_id, }) kwargs['group'] = group return (args, kwargs)
def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs): request.user = AnonymousUser() project = self._get_project_from_id(project_id) if project: helper.context.bind_project(project) raven.tags_context(helper.context.get_tags_context()) if origin is not None: # This check is specific for clients who need CORS support if not project: raise APIError('Client must be upgraded for CORS support') if not is_valid_origin(origin, project): tsdb.incr(tsdb.models.project_total_received_cors, project.id) raise APIForbidden('Invalid origin: %s' % (origin, )) # XXX: It seems that the OPTIONS call does not always include custom headers if request.method == 'OPTIONS': response = self.options(request, project) else: auth = self._parse_header(request, helper, project) key = helper.project_key_from_auth(auth) # Legacy API was /api/store/ and the project ID was only available elsewhere if not project: project = Project.objects.get_from_cache(id=key.project_id) helper.context.bind_project(project) elif key.project_id != project.id: raise APIError('Two different projects were specified') helper.context.bind_auth(auth) raven.tags_context(helper.context.get_tags_context()) # Explicitly bind Organization so we don't implicitly query it later # this just allows us to comfortably assure that `project.organization` is safe. # This also allows us to pull the object from cache, instead of being # implicitly fetched from database. project.organization = Organization.objects.get_from_cache( id=project.organization_id) response = super(APIView, self).dispatch( request=request, project=project, auth=auth, helper=helper, key=key, **kwargs ) if origin: if origin == 'null': # If an Origin is `null`, but we got this far, that means # we've gotten past our CORS check for some reason. But the # problem is that we can't return "null" as a valid response # to `Access-Control-Allow-Origin` and we don't have another # value to work with, so just allow '*' since they've gotten # this far. response['Access-Control-Allow-Origin'] = '*' else: response['Access-Control-Allow-Origin'] = origin return response
def plugin_post_process_group(plugin_slug, event, **kwargs): """ Fires post processing hooks for a group. """ raven.tags_context({ 'project': event.project_id, }) plugin = plugins.get(plugin_slug) safe_execute(plugin.post_process, event=event, group=event.group, **kwargs)
def authenticate_credentials(self, token): try: token = ApiToken.objects.get(token=token) except ApiToken.DoesNotExist: raise AuthenticationFailed('Invalid token') if not token.user.is_active: raise AuthenticationFailed('User inactive or deleted') raven.tags_context({ 'api_token': token.id, }) return (token.user, token)
def convert_args(self, request, uuid, *args, **kwargs): try: install = SentryAppInstallation.objects.get_from_cache(uuid=uuid) except SentryAppInstallation.DoesNotExist: raise ResourceDoesNotExist self.check_object_permissions(request, install) raven.tags_context({ 'sentry_app_installation': install.id, }) kwargs['install'] = install return (args, kwargs)
def convert_args(self, request, slug, *args, **kwargs): try: sentry_app = SentryApp.objects.get_from_cache(slug=slug) except SentryApp.DoesNotExist: raise ResourceDoesNotExist self.check_object_permissions(request, sentry_app) raven.tags_context({ 'sentry_app': sentry_app.id, }) kwargs['sentry_app'] = sentry_app return (args, kwargs)
def convert_args(self, request, sentry_app_slug, *args, **kwargs): try: sentry_app = SentryApp.objects.get_from_cache(slug=sentry_app_slug) except SentryApp.DoesNotExist: raise ResourceDoesNotExist self.check_object_permissions(request, sentry_app) raven.tags_context({ 'sentry_app': sentry_app.id, }) kwargs['sentry_app'] = sentry_app return (args, kwargs)
def convert_args(self, request, organization_slug, *args, **kwargs): try: organization = Organization.objects.get_from_cache( slug=organization_slug, ) except Organization.DoesNotExist: raise ResourceDoesNotExist self.check_object_permissions(request, organization) raven.tags_context({ 'organization': organization.id, }) kwargs['organization'] = organization return (args, kwargs)
def authenticate_credentials(self, userid, password): if password: return try: key = ApiKey.objects.get_from_cache(key=userid) except ApiKey.DoesNotExist: return None if not key.is_active: raise AuthenticationFailed("Key is disabled") raven.tags_context({"api_key": userid}) return (AnonymousUser(), key)
def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs): # TODO(ja): Refactor shared code with CspReportView. Especially, look at # the sentry_key override and test it. # A minidump submission as implemented by Breakpad and Crashpad or any # other library following the Mozilla Soccorro protocol is a POST request # without Origin or Referer headers. Therefore, we cannot validate the # origin of the request, but we *can* validate the "prod" key in future. if request.method != 'POST': return HttpResponseNotAllowed(['POST']) content_type = request.META.get('CONTENT_TYPE') # In case of multipart/form-data, the Content-Type header also includes # a boundary. Therefore, we cannot check for an exact match. if content_type is None or not content_type.startswith( self.content_types): raise APIError('Invalid Content-Type') request.user = AnonymousUser() project = self._get_project_from_id(project_id) helper.context.bind_project(project) raven.tags_context(helper.context.get_tags_context()) # This is yanking the auth from the querystring since it's not # in the POST body. This means we expect a `sentry_key` and # `sentry_version` to be set in querystring auth = helper.auth_from_request(request) key = helper.project_key_from_auth(auth) if key.project_id != project.id: raise APIError('Two different projects were specified') helper.context.bind_auth(auth) raven.tags_context(helper.context.get_tags_context()) return super(APIView, self).dispatch(request=request, project=project, auth=auth, helper=helper, key=key, **kwargs)
def authenticate_credentials(self, userid, password): if password: return None try: key = ApiKey.objects.get_from_cache(key=userid) except ApiKey.DoesNotExist: raise AuthenticationFailed('API key is not valid') if not key.is_active: raise AuthenticationFailed('Key is disabled') raven.tags_context({ 'api_key': key.id, }) return (AnonymousUser(), key)
def convert_args(self, request, organization_slug, project_slug, *args, **kwargs): try: project = Project.objects.filter( organization__slug=organization_slug, slug=project_slug, ).select_related('organization').prefetch_related('teams').get() except Project.DoesNotExist: try: # Project may have been renamed redirect = ProjectRedirect.objects.select_related('project') redirect = redirect.get( organization__slug=organization_slug, redirect_slug=project_slug ) # get full path so that we keep query strings requested_url = request.get_full_path() new_url = requested_url.replace( 'projects/%s/%s/' % (organization_slug, project_slug), 'projects/%s/%s/' % (organization_slug, redirect.project.slug)) # Resource was moved/renamed if the requested url is different than the new url if requested_url != new_url: raise ProjectMoved(new_url, redirect.project.slug) # otherwise project doesn't exist raise ResourceDoesNotExist except ProjectRedirect.DoesNotExist: raise ResourceDoesNotExist if project.status != ProjectStatus.VISIBLE: raise ResourceDoesNotExist self.check_object_permissions(request, project) raven.tags_context({ 'project': project.id, 'organization': project.organization_id, }) request._request.organization = project.organization kwargs['project'] = project return (args, kwargs)
def convert_args(self, request, organization_slug, team_slug, *args, **kwargs): try: team = Team.objects.filter( organization__slug=organization_slug, slug=team_slug, ).select_related('organization').get() except Team.DoesNotExist: raise ResourceDoesNotExist if team.status != TeamStatus.VISIBLE: raise ResourceDoesNotExist self.check_object_permissions(request, team) raven.tags_context({ 'organization': team.organization_id, }) kwargs['team'] = team return (args, kwargs)
def authenticate_credentials(self, relay_id, relay_sig, request): raven.tags_context({ 'relay_id': relay_id, }) try: relay = Relay.objects.get(relay_id=relay_id) except Relay.DoesNotExist: raise AuthenticationFailed('Unknown relay') try: data = relay.public_key_object.unpack(request.body, relay_sig, max_age=60 * 5) request.relay = relay request.relay_request_data = data except semaphore.UnpackError: raise AuthenticationFailed('Invalid relay signature') # TODO(mitsuhiko): can we return the relay here? would be nice if we # could find some common interface for it return (AnonymousUser(), None)
def convert_args(self, request, organization_slug, project_slug, *args, **kwargs): try: project = Project.objects.get_from_cache( organization__slug=organization_slug, slug=project_slug, ) except Project.DoesNotExist: raise ResourceDoesNotExist if project.status != ProjectStatus.VISIBLE: raise ResourceDoesNotExist self.check_object_permissions(request, project) raven.tags_context({ 'project': project.id, 'organization': project.organization_id, }) kwargs['project'] = project return (args, kwargs)
def convert_args(self, request, organization_slug, *args, **kwargs): try: organization = Organization.objects.get_from_cache( slug=organization_slug, ) except Organization.DoesNotExist: raise ResourceDoesNotExist self.check_object_permissions(request, organization) raven.tags_context({ 'organization': organization.id, }) request._request.organization = organization # Track the 'active' organization when the request came from # a cookie based agent (react app) if request.auth is None and request.user: request.session['activeorg'] = organization.slug kwargs['organization'] = organization return (args, kwargs)
def authenticate_credentials(self, token): try: token = ApiToken.objects.filter(token=token, ).select_related( 'user', 'application').get() except ApiToken.DoesNotExist: raise AuthenticationFailed('Invalid token') if token.is_expired(): raise AuthenticationFailed('Token expired') if not token.user.is_active: raise AuthenticationFailed('User inactive or deleted') if token.application and not token.application.is_active: raise AuthenticationFailed('UserApplication inactive or deleted') raven.tags_context({ 'api_token': token.id, }) return (token.user, token)
def authenticate_credentials(self, token): try: token = ApiToken.objects.filter( token=token, ).select_related('user', 'application').get() except ApiToken.DoesNotExist: raise AuthenticationFailed('Invalid token') if token.is_expired(): raise AuthenticationFailed('Token expired') if not token.user.is_active: raise AuthenticationFailed('User inactive or deleted') if token.application and not token.application.is_active: raise AuthenticationFailed('UserApplication inactive or deleted') raven.tags_context({ 'api_token': token.id, }) return (token.user, token)
def authenticate_credentials(self, userid, password): if password: return None root_api_key = options.get('system.root-api-key') if root_api_key: if constant_time_compare(root_api_key, userid): return (None, ROOT_KEY) try: key = ApiKey.objects.get_from_cache(key=userid) except ApiKey.DoesNotExist: raise AuthenticationFailed('API key is not valid') if not key.is_active: raise AuthenticationFailed('Key is disabled') raven.tags_context({ 'api_key': key.id, }) return (AnonymousUser(), key)
def authenticate_credentials(self, userid, password): if password: return None root_api_key = options.get('system.root-api-key') if root_api_key: if constant_time_compare(root_api_key, userid): return (None, ROOT_KEY) try: key = ApiKey.objects.get_from_cache(key=userid) except ApiKey.DoesNotExist: raise AuthenticationFailed('API key is not valid') if not key.is_active: raise AuthenticationFailed('Key is disabled') raven.tags_context({ 'api_key': userid, }) return (AnonymousUser(), key)
def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs): # TODO(ja): Refactor shared code with CspReportView. Especially, look at # the sentry_key override and test it. # A minidump submission as implemented by Breakpad and Crashpad or any # other library following the Mozilla Soccorro protocol is a POST request # without Origin or Referer headers. Therefore, we cannot validate the # origin of the request, but we *can* validate the "prod" key in future. if request.method != 'POST': return HttpResponseNotAllowed(['POST']) content_type = request.META.get('CONTENT_TYPE') # In case of multipart/form-data, the Content-Type header also includes # a boundary. Therefore, we cannot check for an exact match. if content_type is None or not content_type.startswith(self.content_types): raise APIError('Invalid Content-Type') request.user = AnonymousUser() project = self._get_project_from_id(project_id) helper.context.bind_project(project) raven.tags_context(helper.context.get_tags_context()) # This is yanking the auth from the querystring since it's not # in the POST body. This means we expect a `sentry_key` and # `sentry_version` to be set in querystring auth = helper.auth_from_request(request) key = helper.project_key_from_auth(auth) if key.project_id != project.id: raise APIError('Two different projects were specified') helper.context.bind_auth(auth) raven.tags_context(helper.context.get_tags_context()) return super(APIView, self).dispatch( request=request, project=project, auth=auth, helper=helper, key=key, **kwargs )
def convert_args(self, request, organization_slug, project_slug, *args, **kwargs): try: project = Project.objects.filter( organization__slug=organization_slug, slug=project_slug, ).select_related('organization', 'team').get() except Project.DoesNotExist: raise ResourceDoesNotExist if project.status != ProjectStatus.VISIBLE: raise ResourceDoesNotExist project.team.organization = project.organization self.check_object_permissions(request, project) raven.tags_context({ 'project': project.id, 'organization': project.organization_id, }) kwargs['project'] = project return (args, kwargs)
def _do_process_event(cache_key, start_time, event_id, process_task): from sentry.plugins import plugins data = default_cache.get(cache_key) if data is None: metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'process'}) error_logger.error('process.failed.empty', extra={'cache_key': cache_key}) return data = CanonicalKeyDict(data) project = data['project'] raven.tags_context({ 'project': project, }) has_changed = False # Fetch the reprocessing revision reprocessing_rev = reprocessing.get_reprocessing_revision(project) # Stacktrace based event processors. These run before anything else. new_data = process_stacktraces(data) if new_data is not None: has_changed = True data = new_data # TODO(dcramer): ideally we would know if data changed by default # Default event processors. for plugin in plugins.all(version=2): processors = safe_execute( plugin.get_event_preprocessors, data=data, _with_transaction=False ) for processor in (processors or ()): result = safe_execute(processor, data) if result: data = result has_changed = True assert data['project'] == project, 'Project cannot be mutated by preprocessor' if has_changed: issues = data.get('processing_issues') try: if issues and create_failed_event( cache_key, project, list(issues.values()), event_id=event_id, start_time=start_time, reprocessing_rev=reprocessing_rev ): return except RetryProcessing: # If `create_failed_event` indicates that we need to retry we # invoke outselves again. This happens when the reprocessing # revision changed while we were processing. process_task.delay(cache_key, start_time=start_time, event_id=event_id) return # We cannot persist canonical types in the cache, so we need to # downgrade this. if isinstance(data, CANONICAL_TYPES): data = dict(data.items()) default_cache.set(cache_key, data, 3600) save_event.delay( cache_key=cache_key, data=None, start_time=start_time, event_id=event_id, project_id=project )
def post_process_group(event, is_new, is_regression, is_sample, is_new_group_environment, **kwargs): """ Fires post processing hooks for a group. """ if check_event_already_post_processed(event): logger.info('post_process.skipped', extra={ 'project_id': event.project_id, 'event_id': event.event_id, 'reason': 'duplicate', }) return # NOTE: we must pass through the full Event object, and not an # event_id since the Event object may not actually have been stored # in the database due to sampling. from sentry.models import Project from sentry.models.group import get_group_with_redirect from sentry.rules.processor import RuleProcessor from sentry.tasks.servicehooks import process_service_hook # Re-bind Group since we're pickling the whole Event object # which may contain a stale Group. event.group, _ = get_group_with_redirect(event.group_id) event.group_id = event.group.id project_id = event.group.project_id raven.tags_context({ 'project': project_id, }) # Re-bind Project since we're pickling the whole Event object # which may contain a stale Project. event.project = Project.objects.get_from_cache(id=project_id) _capture_stats(event, is_new) # we process snoozes before rules as it might create a regression process_snoozes(event.group) rp = RuleProcessor(event, is_new, is_regression, is_new_group_environment) has_alert = False # TODO(dcramer): ideally this would fanout, but serializing giant # objects back and forth isn't super efficient for callback, futures in rp.apply(): has_alert = True safe_execute(callback, event, futures) if features.has( 'projects:servicehooks', project=event.project, ): allowed_events = set(['event.created']) if has_alert: allowed_events.add('event.alert') if allowed_events: for servicehook_id, events in _get_service_hooks( project_id=event.project_id): if any(e in allowed_events for e in events): process_service_hook.delay( servicehook_id=servicehook_id, event=event, ) for plugin in plugins.for_project(event.project): plugin_post_process_group( plugin_slug=plugin.slug, event=event, is_new=is_new, is_regresion=is_regression, is_sample=is_sample, ) event_processed.send_robust( sender=post_process_group, project=event.project, group=event.group, event=event, primary_hash=kwargs.get('primary_hash'), )
def save_event(cache_key=None, data=None, start_time=None, event_id=None, project_id=None, **kwargs): """ Saves an event to the database. """ from sentry.event_manager import HashDiscarded, EventManager from sentry import quotas, tsdb from sentry.models import ProjectKey if cache_key: data = default_cache.get(cache_key) if data is not None: data = CanonicalKeyDict(data) if event_id is None and data is not None: event_id = data['event_id'] # only when we come from reprocessing we get a project_id sent into # the task. if project_id is None: project_id = data.pop('project') delete_raw_event(project_id, event_id, allow_hint_clear=True) # This covers two cases: where data is None because we did not manage # to fetch it from the default cache or the empty dictionary was # stored in the default cache. The former happens if the event # expired while being on the queue, the second happens on reprocessing # if the raw event was deleted concurrently while we held on to # it. This causes the node store to delete the data and we end up # fetching an empty dict. We could in theory not invoke `save_event` # in those cases but it's important that we always clean up the # reprocessing reports correctly or they will screw up the UI. So # to future proof this correctly we just handle this case here. if not data: metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'post'}) return raven.tags_context({ 'project': project_id, }) try: manager = EventManager(data) event = manager.save(project_id) # Always load attachments from the cache so we can later prune them. # Only save them if the event-attachments feature is active, though. if features.has('organizations:event-attachments', event.project.organization, actor=None): attachments = attachment_cache.get(cache_key) or [] for attachment in attachments: save_attachment(event, attachment) except HashDiscarded: increment_list = [ (tsdb.models.project_total_received_discarded, project_id), ] try: project = Project.objects.get_from_cache(id=project_id) except Project.DoesNotExist: pass else: increment_list.extend([ (tsdb.models.project_total_blacklisted, project.id), (tsdb.models.organization_total_blacklisted, project.organization_id), ]) project_key = None if data.get('key_id') is not None: try: project_key = ProjectKey.objects.get_from_cache(id=data['key_id']) except ProjectKey.DoesNotExist: pass else: increment_list.append((tsdb.models.key_total_blacklisted, project_key.id)) quotas.refund( project, key=project_key, timestamp=start_time, ) tsdb.incr_multi( increment_list, timestamp=to_datetime(start_time) if start_time is not None else None, ) finally: if cache_key: default_cache.delete(cache_key) attachment_cache.delete(cache_key) if start_time: metrics.timing( 'events.time-to-process', time() - start_time, instance=data['platform'])