Esempio n. 1
0
    def convert_args(self,
                     request,
                     issue_id,
                     organization_slug=None,
                     *args,
                     **kwargs):
        # TODO(tkaemming): Ideally, this would return a 302 response, rather
        # than just returning the data that is bound to the new group. (It
        # technically shouldn't be a 301, since the response could change again
        # as the result of another merge operation that occurs later. This
        # wouldn't break anything though -- it will just be a "permanent"
        # redirect to *another* permanent redirect.) This would require
        # rebuilding the URL in one of two ways: either by hacking it in with
        # string replacement, or making the endpoint aware of the URL pattern
        # that caused it to be dispatched, and reversing it with the correct
        # `issue_id` keyword argument.
        if organization_slug:
            try:
                organization = Organization.objects.get_from_cache(
                    slug=organization_slug, )
            except Organization.DoesNotExist:
                raise ResourceDoesNotExist

            with configure_scope() as scope:
                scope.set_tag("organization", organization.id)

            request._request.organization = organization
        else:
            organization = None

        try:
            group, _ = get_group_with_redirect(
                issue_id,
                queryset=Group.objects.select_related('project',
                                                      'project__organization'),
                organization=organization,
            )
        except Group.DoesNotExist:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, group)

        with configure_scope() as scope:
            scope.set_tag("project", group.project_id)
            scope.set_tag("organization", group.project.organization_id)

        if group.status in EXCLUDED_STATUSES:
            raise ResourceDoesNotExist

        request._request.organization = group.project.organization

        kwargs['group'] = group

        return (args, kwargs)
Esempio n. 2
0
def index_event_tags(organization_id,
                     project_id,
                     event_id,
                     tags,
                     group_id,
                     environment_id,
                     date_added=None,
                     **kwargs):
    from sentry import tagstore

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    create_event_tags_kwargs = {}
    if date_added is not None:
        create_event_tags_kwargs['date_added'] = date_added

    metrics.timing('tagstore.tags_per_event',
                   len(tags),
                   tags={
                       'organization_id': organization_id,
                   })

    tagstore.create_event_tags(project_id=project_id,
                               group_id=group_id,
                               environment_id=environment_id,
                               event_id=event_id,
                               tags=tags,
                               **create_event_tags_kwargs)
Esempio n. 3
0
def get_project_options(project):
    """Returns a dict containing the config for a project for the sentry relay"""

    with configure_scope() as scope:
        scope.set_tag("project", project.id)

    project_keys = ProjectKey.objects.filter(project=project, ).all()

    public_keys = {}
    for project_key in list(project_keys):
        public_keys[project_key.public_key] = project_key.status == 0

    now = datetime.utcnow().replace(tzinfo=utc)

    org_options = OrganizationOption.objects.get_all_values(
        project.organization_id)

    rv = {
        'disabled': project.status > 0,
        'slug': project.slug,
        'lastFetch': now,
        'lastChange': project.get_option('sentry:relay-rev-lastchange', now),
        'rev': project.get_option('sentry:relay-rev',
                                  uuid.uuid4().hex),
        'publicKeys': public_keys,
        'config': {
            'allowedDomains': project.get_option('sentry:origins', ['*']),
            'trustedRelays': org_options.get('sentry:trusted-relays', []),
            'piiConfig': get_pii_config(project, org_options),
        },
    }
    return rv
Esempio n. 4
0
def _do_preprocess_event(cache_key, data, start_time, event_id, process_task):
    if cache_key and data is None:
        data = default_cache.get(cache_key)

    if data is None:
        metrics.incr('events.failed',
                     tags={
                         'reason': 'cache',
                         'stage': 'pre'
                     },
                     skip_internal=False)
        error_logger.error('preprocess.failed.empty',
                           extra={'cache_key': cache_key})
        return

    original_data = data
    data = CanonicalKeyDict(data)
    project_id = data['project']

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    project = Project.objects.get_from_cache(id=project_id)

    if should_process(data):
        from_reprocessing = process_task is process_event_from_reprocessing
        submit_process(project, from_reprocessing, cache_key, event_id,
                       start_time, original_data)
        return

    submit_save_event(project, cache_key, event_id, start_time, original_data)
Esempio n. 5
0
    def convert_args(self, request, organization_slug, *args, **kwargs):
        from sentry.models import Organization  # Django 1.9 setup issue
        try:
            organization = Organization.objects.get_from_cache(
                slug=organization_slug,
            )
        except Organization.DoesNotExist:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, organization)

        with configure_scope() as scope:
            scope.set_tag("organization", organization.id)

        request._request.organization = organization

        # Track the 'active' organization when the request came from
        # a cookie based agent (react app)
        if request.auth is None and request.user:
            request.session['activeorg'] = organization.slug

        kwargs['organization'] = organization

        # For now we're only setting the context if in an OrganizationEndpoint, not Endpoint
        from clims.handlers import context_store
        context_store.set(app=self.app, organization=organization, user=request.user)

        return (args, kwargs)
Esempio n. 6
0
    def bind_auth(self, auth):
        self.agent = auth.client
        self.version = auth.version

        with configure_scope() as scope:
            scope.set_tag("agent", self.agent)
            scope.set_tag("protocol", self.version)
Esempio n. 7
0
def download_dsyms(session: Session, credentials: AppConnectCredentials,
                   url: str, path: pathlib.Path) -> None:
    """Downloads dSYMs at `url` into `path` which must be a filename."""
    headers = _get_authorization_header(credentials)

    with session.get(url, headers=headers, stream=True, timeout=15) as res:
        status = res.status_code
        if status == HTTPStatus.UNAUTHORIZED:
            raise UnauthorizedError
        elif status == HTTPStatus.FORBIDDEN:
            raise ForbiddenError
        elif status != HTTPStatus.OK:
            raise RequestError(f"Bad status code downloading dSYM: {status}")

        start = time.time()
        bytes_count = 0
        with open(path, "wb") as fp:
            for chunk in res.iter_content(chunk_size=io.DEFAULT_BUFFER_SIZE):
                # The 315s is just above how long it would take a 4MB/s connection to download
                # 2GB.
                if (time.time() - start) > 315:
                    with sdk.configure_scope() as scope:
                        scope.set_extra("dSYM.bytes_fetched", bytes_count)
                    raise Timeout("Timeout during dSYM download")
                bytes_count += len(chunk)
                fp.write(chunk)
Esempio n. 8
0
def assemble_dif(project_id, name, checksum, chunks, **kwargs):
    from sentry.models import ChunkFileState, debugfile, Project, \
        set_assemble_status, BadDif
    from sentry.reprocessing import bump_reprocessing_revision

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    project = Project.objects.filter(id=project_id).get()
    set_assemble_status(project, checksum, ChunkFileState.ASSEMBLING)

    # Assemble the chunks into files
    rv = assemble_file(project, name, checksum, chunks,
                       file_type='project.dif')

    # If not file has been created this means that the file failed to
    # assemble because of bad input data.  Return.
    if rv is None:
        return

    file, temp_file = rv
    delete_file = True
    try:
        with temp_file:
            # We only permit split difs to hit this endpoint.  The
            # client is required to split them up first or we error.
            try:
                result = debugfile.detect_dif_from_path(temp_file.name)
            except BadDif as e:
                set_assemble_status(project, checksum, ChunkFileState.ERROR,
                                    detail=e.args[0])
                return

            if len(result) != 1:
                set_assemble_status(project, checksum, ChunkFileState.ERROR,
                                    detail='Contained wrong number of '
                                    'architectures (expected one, got %s)'
                                    % len(result))
                return

            dif_type, cpu, file_id, filename, data = result[0]
            dif, created = debugfile.create_dif_from_id(
                project, dif_type, cpu, file_id, data,
                os.path.basename(name),
                file=file)
            indicate_success = True
            delete_file = False

            if created:
                # Bump the reprocessing revision since the symbol has changed
                # and might resolve processing issues. If the file was not
                # created, someone else has created it and will bump the
                # revision instead.
                bump_reprocessing_revision(project)

            if indicate_success:
                set_assemble_status(project, checksum, ChunkFileState.OK)
    finally:
        if delete_file:
            file.delete()
Esempio n. 9
0
 def _update_settings(
     self,
     provider: ExternalProviders,
     type: NotificationSettingTypes,
     value: NotificationSettingOptionValues,
     scope_type: NotificationScopeType,
     scope_identifier: int,
     target_id: int,
 ) -> None:
     """Save a NotificationSettings row."""
     with configure_scope() as scope:
         with transaction.atomic():
             setting, created = self.get_or_create(
                 provider=provider.value,
                 type=type.value,
                 scope_type=scope_type.value,
                 scope_identifier=scope_identifier,
                 target_id=target_id,
                 defaults={"value": value.value},
             )
             if not created and setting.value != value.value:
                 scope.set_tag("notif_setting_type", setting.type_str)
                 scope.set_tag("notif_setting_value", setting.value_str)
                 scope.set_tag("notif_setting_provider",
                               setting.provider_str)
                 scope.set_tag("notif_setting_scope", setting.scope_str)
                 setting.update(value=value.value)
Esempio n. 10
0
    def convert_args(self, request, monitor_id, checkin_id, *args, **kwargs):
        try:
            monitor = Monitor.objects.get(guid=monitor_id)
        except Monitor.DoesNotExist:
            raise ResourceDoesNotExist

        project = Project.objects.get_from_cache(id=monitor.project_id)
        if project.status != ProjectStatus.VISIBLE:
            raise ResourceDoesNotExist

        if hasattr(request.auth, "project_id") and project.id != request.auth.project_id:
            return self.respond(status=400)

        if not features.has("organizations:monitors", project.organization, actor=request.user):
            raise ResourceDoesNotExist

        self.check_object_permissions(request, project)

        with configure_scope() as scope:
            scope.set_tag("project", project.id)

        bind_organization_context(project.organization)

        try:
            checkin = MonitorCheckIn.objects.get(monitor=monitor, guid=checkin_id)
        except MonitorCheckIn.DoesNotExist:
            raise ResourceDoesNotExist

        request._request.organization = project.organization

        kwargs.update({"checkin": checkin, "monitor": monitor, "project": project})
        return (args, kwargs)
Esempio n. 11
0
    def convert_args(self, request, monitor_id, *args, **kwargs):
        try:
            monitor = Monitor.objects.get(guid=monitor_id)
        except Monitor.DoesNotExist:
            raise ResourceDoesNotExist

        project = Project.objects.get_from_cache(id=monitor.project_id)
        if project.status != ProjectStatus.VISIBLE:
            raise ResourceDoesNotExist

        # HACK: This doesn't work since we can't return a 400 from here,
        # and actually just results in a 500.
        if hasattr(request.auth, "project_id") and project.id != request.auth.project_id:
            return self.respond(status=400)

        if not features.has("organizations:monitors", project.organization, actor=request.user):
            raise ResourceDoesNotExist

        self.check_object_permissions(request, project)

        with configure_scope() as scope:
            scope.set_tag("project", project.id)

        bind_organization_context(project.organization)

        request._request.organization = project.organization

        kwargs.update({"monitor": monitor, "project": project})
        return (args, kwargs)
Esempio n. 12
0
    def convert_args(self, request, issue_id, *args, **kwargs):
        # TODO(tkaemming): Ideally, this would return a 302 response, rather
        # than just returning the data that is bound to the new group. (It
        # technically shouldn't be a 301, since the response could change again
        # as the result of another merge operation that occurs later. This
        # wouldn't break anything though -- it will just be a "permanent"
        # redirect to *another* permanent redirect.) This would require
        # rebuilding the URL in one of two ways: either by hacking it in with
        # string replacement, or making the endpoint aware of the URL pattern
        # that caused it to be dispatched, and reversing it with the correct
        # `issue_id` keyword argument.
        try:
            group, _ = get_group_with_redirect(
                issue_id,
                queryset=Group.objects.select_related('project', 'project__organization'),
            )
        except Group.DoesNotExist:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, group)

        with configure_scope() as scope:
            scope.set_tag("project", group.project_id)
            scope.set_tag("organization", group.project.organization_id)

        if group.status in EXCLUDED_STATUSES:
            raise ResourceDoesNotExist

        request._request.organization = group.project.organization

        kwargs['group'] = group

        return (args, kwargs)
Esempio n. 13
0
    def bind_auth(self, auth):
        self.agent = auth.client
        self.version = auth.version

        with configure_scope() as scope:
            scope.set_tag("agent", self.agent)
            scope.set_tag("protocol", self.version)
Esempio n. 14
0
    def get(self, request, *args, **kwargs):
        with configure_scope() as scope:
            try:
                # make sure this exists and is valid
                jira_auth = self.get_jira_auth()
            except (ApiError, JiraTenant.DoesNotExist, ExpiredSignatureError) as e:
                scope.set_tag("result", f"error.{e.__class__.__name__}")
                return self.get_response("error.html")

            if request.user.is_anonymous:
                scope.set_tag("result", "signin")
                return self.get_response("signin.html")

            org = jira_auth.organization
            context = self.get_context()
            if org is None:
                context.update(
                    {
                        "error_message": (
                            "You still need to configure this plugin, which "
                            "can be done from the Manage Add-ons page."
                        )
                    }
                )
                scope.set_tag("result", "error.no_org")
                return self.get_response("error.html", context)

            bind_organization_context(org)
            context.update({"organization_slug": org.slug})

            scope.set_tag("result", "success")
            return self.get_response("widget.html", context)
Esempio n. 15
0
def _do_preprocess_event(cache_key, data, start_time, event_id, process_event):
    if cache_key:
        data = default_cache.get(cache_key)

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'})
        error_logger.error('preprocess.failed.empty', extra={'cache_key': cache_key})
        return

    data = CanonicalKeyDict(data)
    project = data['project']

    with configure_scope() as scope:
        scope.set_tag("project", project)

    if should_process(data):
        process_event.delay(cache_key=cache_key, start_time=start_time, event_id=event_id)
        return

    # If we get here, that means the event had no preprocessing needed to be done
    # so we can jump directly to save_event
    if cache_key:
        data = None
    save_event.delay(
        cache_key=cache_key, data=data, start_time=start_time, event_id=event_id,
        project_id=project
    )
Esempio n. 16
0
def _do_preprocess_event(cache_key, data, start_time, event_id, process_event):
    if cache_key:
        data = default_cache.get(cache_key)

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'}, skip_internal=False)
        error_logger.error('preprocess.failed.empty', extra={'cache_key': cache_key})
        return

    data = CanonicalKeyDict(data)
    project = data['project']

    with configure_scope() as scope:
        scope.set_tag("project", project)

    if should_process(data):
        process_event.delay(cache_key=cache_key, start_time=start_time, event_id=event_id)
        return

    # If we get here, that means the event had no preprocessing needed to be done
    # so we can jump directly to save_event
    if cache_key:
        data = None
    save_event.delay(
        cache_key=cache_key, data=data, start_time=start_time, event_id=event_id,
        project_id=project
    )
Esempio n. 17
0
    def convert_args(self, request, monitor_id, *args, **kwargs):
        try:
            monitor = Monitor.objects.get(
                guid=monitor_id,
            )
        except Monitor.DoesNotExist:
            raise ResourceDoesNotExist

        project = Project.objects.get_from_cache(id=monitor.project_id)
        if project.status != ProjectStatus.VISIBLE:
            raise ResourceDoesNotExist

        if hasattr(request.auth, 'project_id') and project.id != request.auth.project_id:
            return self.respond(status=400)

        if not features.has('organizations:monitors',
                            project.organization, actor=request.user):
            raise ResourceDoesNotExist

        self.check_object_permissions(request, project)

        with configure_scope() as scope:
            scope.set_tag("organization", project.organization_id)
            scope.set_tag("project", project.id)

        request._request.organization = project.organization

        kwargs.update({
            'monitor': monitor,
            'project': project,
        })
        return (args, kwargs)
Esempio n. 18
0
def _do_preprocess_event(cache_key, data, start_time, event_id, process_task,
                         project):
    if cache_key and data is None:
        data = default_cache.get(cache_key)

    if data is None:
        metrics.incr("events.failed",
                     tags={
                         "reason": "cache",
                         "stage": "pre"
                     },
                     skip_internal=False)
        error_logger.error("preprocess.failed.empty",
                           extra={"cache_key": cache_key})
        return

    original_data = data
    data = CanonicalKeyDict(data)
    project_id = data["project"]

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    if project is None:
        project = Project.objects.get_from_cache(id=project_id)
    else:
        assert project.id == project_id, (project.id, project_id)

    if should_process(data):
        from_reprocessing = process_task is process_event_from_reprocessing
        submit_process(project, from_reprocessing, cache_key, event_id,
                       start_time, original_data)
        return

    submit_save_event(project, cache_key, event_id, start_time, original_data)
Esempio n. 19
0
    def convert_args(self, request, monitor_id, *args, **kwargs):
        try:
            monitor = Monitor.objects.get(
                guid=monitor_id,
            )
        except Monitor.DoesNotExist:
            raise ResourceDoesNotExist

        project = Project.objects.get_from_cache(id=monitor.project_id)
        if project.status != ProjectStatus.VISIBLE:
            raise ResourceDoesNotExist

        if hasattr(request.auth, 'project_id') and project.id != request.auth.project_id:
            return self.respond(status=400)

        if not features.has('organizations:monitors',
                            project.organization, actor=request.user):
            raise ResourceDoesNotExist

        self.check_object_permissions(request, project)

        with configure_scope() as scope:
            scope.set_tag("organization", project.organization_id)
            scope.set_tag("project", project.id)

        request._request.organization = project.organization

        kwargs.update({
            'monitor': monitor,
            'project': project,
        })
        return (args, kwargs)
Esempio n. 20
0
    def authenticate_credentials(self, request, token_str):
        token = SystemToken.from_request(request, token_str)
        try:
            token = (
                token
                or ApiToken.objects.filter(token=token_str)
                .select_related("user", "application")
                .get()
            )
        except ApiToken.DoesNotExist:
            raise AuthenticationFailed("Invalid token")

        if token.is_expired():
            raise AuthenticationFailed("Token expired")

        if not token.user.is_active:
            raise AuthenticationFailed("User inactive or deleted")

        if token.application and not token.application.is_active:
            raise AuthenticationFailed("UserApplication inactive or deleted")

        with configure_scope() as scope:
            scope.set_tag("api_token_type", self.token_name)
            scope.set_tag("api_token", token.id)
            scope.set_tag("api_token_is_sentry_app", getattr(token.user, "is_sentry_app", False))

        return (token.user, token)
Esempio n. 21
0
def index_event_tags(organization_id, project_id, event_id, tags,
                     group_id, environment_id, date_added=None, **kwargs):
    from sentry import tagstore

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    create_event_tags_kwargs = {}
    if date_added is not None:
        create_event_tags_kwargs['date_added'] = date_added

    metrics.timing(
        'tagstore.tags_per_event',
        len(tags),
        tags={
            'organization_id': organization_id,
        }
    )

    tagstore.create_event_tags(
        project_id=project_id,
        group_id=group_id,
        environment_id=environment_id,
        event_id=event_id,
        tags=tags,
        **create_event_tags_kwargs
    )
Esempio n. 22
0
        def new_disable_transaction_events():
            with configure_scope() as scope:
                assert scope.span.sampled
                assert scope.span.transaction
                disable_transaction_events()
                assert not scope.span.sampled

            calls.append(1)
Esempio n. 23
0
    def get_filter_params(self,
                          request,
                          organization,
                          date_filter_optional=False,
                          project_ids=None):
        """
        Extracts common filter parameters from the request and returns them
        in a standard format.
        :param request:
        :param organization: Organization to get params for
        :param date_filter_optional: Defines what happens if no date filter
        :param project_ids: Project ids if they were already grabbed but not
        validated yet
        parameters are passed. If False, no date filtering occurs. If True, we
        provide default values.
        :return: A dict with keys:
         - start: start date of the filter
         - end: end date of the filter
         - project_id: A list of project ids to filter on
         - environment(optional): If environments were passed in, a list of
         environment names
        """
        # get the top level params -- projects, time range, and environment
        # from the request
        try:
            start, end = get_date_range_from_params(
                request.GET, optional=date_filter_optional)
            if start and end:
                with configure_scope() as scope:
                    scope.set_tag("query.period",
                                  (end - start).total_seconds())
        except InvalidParams as e:
            raise ParseError(detail=u"Invalid date range: {}".format(e))

        with sentry_sdk.start_span(
                op="PERF: org.get_filter_params - projects"):
            try:
                projects = self.get_projects(request, organization,
                                             project_ids)
            except ValueError:
                raise ParseError(detail="Invalid project ids")

        if not projects:
            raise NoProjects

        environments = self.get_environments(request, organization)
        params = {
            "start": start,
            "end": end,
            "project_id": [p.id for p in projects],
            "organization_id": organization.id,
        }
        if environments:
            params["environment"] = [env.name for env in environments]
            params["environment_objects"] = environments

        return params
Esempio n. 24
0
def disable_transaction_events():
    """
    Do not send a transaction event for the current transaction.

    This is used in StoreView to prevent infinite recursion.
    """
    with configure_scope() as scope:
        if scope.span:
            scope.span.sampled = False
Esempio n. 25
0
def plugin_post_process_group(plugin_slug, event, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    with configure_scope() as scope:
        scope.set_tag("project", event.project_id)

    plugin = ioc.app.plugins.get(plugin_slug)
    safe_execute(plugin.post_process, event=event, group=event.group, **kwargs)
Esempio n. 26
0
    def __init__(self, data, **kwargs):
        rust_renormalized = _should_skip_to_python(data.get('event_id'))
        if rust_renormalized:
            normalizer = StoreNormalizer(is_renormalize=True)
            data = normalizer.normalize_event(dict(data))

        metrics.incr('rust.renormalized', tags={'value': rust_renormalized})

        with configure_scope() as scope:
            scope.set_tag("rust.renormalized", rust_renormalized)

        CanonicalKeyDict.__init__(self, data, **kwargs)
Esempio n. 27
0
    def convert_args(self, request, sentry_app_slug, *args, **kwargs):
        try:
            sentry_app = SentryApp.objects.get_from_cache(slug=sentry_app_slug)
        except SentryApp.DoesNotExist:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, sentry_app)

        with configure_scope() as scope:
            scope.set_tag("sentry_app", sentry_app.id)

        kwargs['sentry_app'] = sentry_app
        return (args, kwargs)
Esempio n. 28
0
    def convert_args(self, request, uuid, *args, **kwargs):
        try:
            install = SentryAppInstallation.objects.get_from_cache(uuid=uuid)
        except SentryAppInstallation.DoesNotExist:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, install)

        with configure_scope() as scope:
            scope.set_tag("sentry_app_installation", install.id)

        kwargs['install'] = install
        return (args, kwargs)
Esempio n. 29
0
    def convert_args(self, request, sentry_app_slug, *args, **kwargs):
        try:
            sentry_app = SentryApp.objects.get(slug=sentry_app_slug)
        except SentryApp.DoesNotExist:
            raise Http404

        self.check_object_permissions(request, sentry_app)

        with configure_scope() as scope:
            scope.set_tag("sentry_app", sentry_app.slug)

        kwargs["sentry_app"] = sentry_app
        return (args, kwargs)
Esempio n. 30
0
    def convert_args(self, request, uuid, *args, **kwargs):
        try:
            installation = SentryAppInstallation.objects.get(uuid=uuid)
        except SentryAppInstallation.DoesNotExist:
            raise Http404

        self.check_object_permissions(request, installation)

        with configure_scope() as scope:
            scope.set_tag("sentry_app_installation", installation.uuid)

        kwargs["installation"] = installation
        return (args, kwargs)
Esempio n. 31
0
    def __init__(self, data, skip_renormalization=False, **kwargs):
        is_renormalized = (isinstance(data, EventDict)
                           or (isinstance(data, NodeData)
                               and isinstance(data.data, EventDict)))

        with configure_scope() as scope:
            scope.set_tag("rust.is_renormalized", is_renormalized)
            scope.set_tag("rust.skip_renormalization", skip_renormalization)
            scope.set_tag("rust.renormalized", "null")

        if not skip_renormalization and not is_renormalized:
            rust_renormalized = _should_skip_to_python(data.get('event_id'))
            if rust_renormalized:
                normalizer = StoreNormalizer(is_renormalize=True)
                data = normalizer.normalize_event(dict(data))

            metrics.incr('rust.renormalized',
                         tags={'value': rust_renormalized})

            with configure_scope() as scope:
                scope.set_tag("rust.renormalized", rust_renormalized)

        CanonicalKeyDict.__init__(self, data, **kwargs)
Esempio n. 32
0
    def convert_args(self, request, sentry_app_slug, *args, **kwargs):
        from sentry.models import SentryApp  # Django 1.9 setup issue
        try:
            sentry_app = SentryApp.objects.get(slug=sentry_app_slug, )
        except SentryApp.DoesNotExist:
            raise Http404

        self.check_object_permissions(request, sentry_app)

        with configure_scope() as scope:
            scope.set_tag("sentry_app", sentry_app.slug)

        kwargs['sentry_app'] = sentry_app
        return (args, kwargs)
Esempio n. 33
0
    def convert_args(self, request, uuid, *args, **kwargs):
        from sentry.models import SentryAppInstallation  # Django 1.9 setup issue
        try:
            installation = SentryAppInstallation.objects.get(uuid=uuid, )
        except SentryAppInstallation.DoesNotExist:
            raise Http404

        self.check_object_permissions(request, installation)

        with configure_scope() as scope:
            scope.set_tag("sentry_app_installation", installation.uuid)

        kwargs['installation'] = installation
        return (args, kwargs)
Esempio n. 34
0
    def authenticate_credentials(self, token):
        try:
            key = ProjectKey.from_dsn(token)
        except ProjectKey.DoesNotExist:
            raise AuthenticationFailed('Invalid token')

        if not key.is_active:
            raise AuthenticationFailed('Invalid token')

        with configure_scope() as scope:
            scope.set_tag("api_token_type", self.token_name)
            scope.set_tag("api_project_key", key.id)

        return (AnonymousUser(), key)
Esempio n. 35
0
def plugin_post_process_group(plugin_slug, event, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    with configure_scope() as scope:
        scope.set_tag("project", event.project_id)

    plugin = plugins.get(plugin_slug)
    safe_execute(
        plugin.post_process,
        event=event,
        group=event.group,
        expected_errors=(PluginError,),
        **kwargs)
Esempio n. 36
0
    def authenticate_credentials(self, token):
        try:
            key = ProjectKey.from_dsn(token)
        except ProjectKey.DoesNotExist:
            raise AuthenticationFailed('Invalid token')

        if not key.is_active:
            raise AuthenticationFailed('Invalid token')

        with configure_scope() as scope:
            scope.set_tag("api_token_type", self.token_name)
            scope.set_tag("api_project_key", key.id)

        return (AnonymousUser(), key)
Esempio n. 37
0
def inner_dsym_download(project_id: int, config_id: str) -> None:
    """Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
    # TODO(flub): we should only run one task ever for a project.  Is
    # sentry.cache.default_cache the right thing to put a "mutex" into?  See how
    # sentry.tasks.assemble uses this.
    with sdk.configure_scope() as scope:
        scope.set_tag("project", project_id)

    project = Project.objects.get(pk=project_id)
    config = appconnect.AppStoreConnectConfig.from_project_config(
        project, config_id)
    client = appconnect.AppConnectClient.from_config(config)

    # persist all fetched builds into the database as "pending"
    builds = []
    listed_builds = client.list_builds()
    with sentry_sdk.start_span(
            op="appconnect-update-builds",
            description="Update AppStoreConnect builds in database"):
        for build in listed_builds:
            build_state = get_or_create_persisted_build(project, config, build)
            if not build_state.fetched:
                builds.append((build, build_state))

    update_build_refresh_date(project, config_id)

    itunes_client = client.itunes_client()
    for (build, build_state) in builds:
        with tempfile.NamedTemporaryFile() as dsyms_zip:
            try:
                itunes_client.download_dsyms(build,
                                             pathlib.Path(dsyms_zip.name))
            except appconnect.NoDsymsError:
                logger.debug("No dSYMs for build %s", build)
            except ITunesSessionExpiredException:
                logger.debug("Error fetching dSYMs: expired iTunes session")
                # we early-return here to avoid trying all the other builds
                # as well, since an expired token will error for all of them.
                # we also swallow the error and not report it because this is
                # a totally expected error and not actionable.
                return
            else:
                create_difs_from_dsyms_zip(dsyms_zip.name, project)
                logger.debug("Uploaded dSYMs for build %s", build)

        # If we either downloaded, or didn't need to download the dSYMs
        # (there was no dSYM url), we check off this build.
        build_state.fetched = True
        build_state.save()
Esempio n. 38
0
    def convert_args(self, request, uuid, *args, **kwargs):
        try:
            installation = SentryAppInstallation.objects.get(
                uuid=uuid,
            )
        except SentryAppInstallation.DoesNotExist:
            raise Http404

        self.check_object_permissions(request, installation)

        with configure_scope() as scope:
            scope.set_tag("sentry_app_installation", installation.uuid)

        kwargs['installation'] = installation
        return (args, kwargs)
Esempio n. 39
0
    def __init__(self, data, skip_renormalization=False, **kwargs):
        is_renormalized = (
            isinstance(data, EventDict) or
            (isinstance(data, NodeData) and isinstance(data.data, EventDict))
        )

        with configure_scope() as scope:
            scope.set_tag("rust.is_renormalized", is_renormalized)
            scope.set_tag("rust.skip_renormalization", skip_renormalization)
            scope.set_tag("rust.renormalized", "null")

        if not skip_renormalization and not is_renormalized:
            rust_renormalized = _should_skip_to_python(data.get('event_id'))
            if rust_renormalized:
                normalizer = StoreNormalizer(is_renormalize=True)
                data = normalizer.normalize_event(dict(data))

            metrics.incr('rust.renormalized',
                         tags={'value': rust_renormalized})

            with configure_scope() as scope:
                scope.set_tag("rust.renormalized", rust_renormalized)

        CanonicalKeyDict.__init__(self, data, **kwargs)
Esempio n. 40
0
    def convert_args(self, request, sentry_app_slug, *args, **kwargs):
        try:
            sentry_app = SentryApp.objects.get(
                slug=sentry_app_slug,
            )
        except SentryApp.DoesNotExist:
            raise Http404

        self.check_object_permissions(request, sentry_app)

        with configure_scope() as scope:
            scope.set_tag("sentry_app", sentry_app.slug)

        kwargs['sentry_app'] = sentry_app
        return (args, kwargs)
Esempio n. 41
0
    def authenticate_credentials(self, userid, password):
        if password:
            return None

        try:
            key = ApiKey.objects.get_from_cache(key=userid)
        except ApiKey.DoesNotExist:
            raise AuthenticationFailed('API key is not valid')

        if not key.is_active:
            raise AuthenticationFailed('Key is disabled')

        with configure_scope() as scope:
            scope.set_tag("api_key", key.id)

        return (AnonymousUser(), key)
Esempio n. 42
0
    def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):
        try:
            project = Project.objects.filter(
                organization__slug=organization_slug,
                slug=project_slug,
            ).select_related('organization').prefetch_related('teams').get()
        except Project.DoesNotExist:
            try:
                # Project may have been renamed
                redirect = ProjectRedirect.objects.select_related('project')
                redirect = redirect.get(
                    organization__slug=organization_slug,
                    redirect_slug=project_slug
                )

                # get full path so that we keep query strings
                requested_url = request.get_full_path()
                new_url = requested_url.replace(
                    'projects/%s/%s/' %
                    (organization_slug, project_slug), 'projects/%s/%s/' %
                    (organization_slug, redirect.project.slug))

                # Resource was moved/renamed if the requested url is different than the new url
                if requested_url != new_url:
                    raise ProjectMoved(new_url, redirect.project.slug)

                # otherwise project doesn't exist
                raise ResourceDoesNotExist
            except ProjectRedirect.DoesNotExist:
                raise ResourceDoesNotExist

        if project.status != ProjectStatus.VISIBLE:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, project)

        with configure_scope() as scope:
            scope.set_tag("project", project.id)
            scope.set_tag("organization", project.organization_id)

        request._request.organization = project.organization

        kwargs['project'] = project
        return (args, kwargs)
Esempio n. 43
0
def report_monitor_begin(task, **kwargs):
    if not SENTRY_DSN or not API_ROOT:
        return

    monitor_id = task.request.headers.get('X-Sentry-Monitor')
    if not monitor_id:
        return

    with configure_scope() as scope:
        scope.set_context('monitor', {'id': monitor_id})

    session = SafeSession()
    req = session.post(u'{}/api/0/monitors/{}/checkins/'.format(API_ROOT, monitor_id), headers={
        'Authorization': u'DSN {}'.format(SENTRY_DSN)
    }, json={
        'status': 'in_progress',
    })
    req.raise_for_status()
    # HACK:
    task.request.headers['X-Sentry-Monitor-CheckIn'] = (req.json()['id'], time())
Esempio n. 44
0
    def authenticate_credentials(self, relay_id, relay_sig, request):
        with configure_scope() as scope:
            scope.set_tag('relay_id', relay_id)

        try:
            relay = Relay.objects.get(relay_id=relay_id)
        except Relay.DoesNotExist:
            raise AuthenticationFailed('Unknown relay')

        try:
            data = relay.public_key_object.unpack(request.body, relay_sig,
                                                  max_age=60 * 5)
            request.relay = relay
            request.relay_request_data = data
        except semaphore.UnpackError:
            raise AuthenticationFailed('Invalid relay signature')

        # TODO(mitsuhiko): can we return the relay here?  would be nice if we
        # could find some common interface for it
        return (AnonymousUser(), None)
Esempio n. 45
0
        def _wrapped(*args, **kwargs):
            # TODO(dcramer): we want to tag a transaction ID, but overriding
            # the base on app.task seems to cause problems w/ Celery internals
            transaction_id = kwargs.pop('__transaction_id', None)

            key = 'jobs.duration'
            if stat_suffix:
                instance = u'{}.{}'.format(name, stat_suffix(*args, **kwargs))
            else:
                instance = name

            with configure_scope() as scope:
                scope.set_tag('task_name', name)
                scope.set_tag('transaction_id', transaction_id)

            with metrics.timer(key, instance=instance), \
                    track_memory_usage('jobs.memory_change', instance=instance):
                result = func(*args, **kwargs)

            return result
Esempio n. 46
0
    def convert_args(self, request, organization_slug, team_slug, *args, **kwargs):
        try:
            team = Team.objects.filter(
                organization__slug=organization_slug,
                slug=team_slug,
            ).select_related('organization').get()
        except Team.DoesNotExist:
            raise ResourceDoesNotExist

        if team.status != TeamStatus.VISIBLE:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, team)

        with configure_scope() as scope:
            scope.set_tag("organization", team.organization_id)

        request._request.organization = team.organization

        kwargs['team'] = team
        return (args, kwargs)
Esempio n. 47
0
    def authenticate_credentials(self, token):
        try:
            token = ApiToken.objects.filter(
                token=token,
            ).select_related('user', 'application').get()
        except ApiToken.DoesNotExist:
            raise AuthenticationFailed('Invalid token')

        if token.is_expired():
            raise AuthenticationFailed('Token expired')

        if not token.user.is_active:
            raise AuthenticationFailed('User inactive or deleted')

        if token.application and not token.application.is_active:
            raise AuthenticationFailed('UserApplication inactive or deleted')

        with configure_scope() as scope:
            scope.set_tag("api_token_type", self.token_name)
            scope.set_tag("api_token", token.id)

        return (token.user, token)
Esempio n. 48
0
    def convert_args(self, request, organization_slug, *args, **kwargs):
        try:
            organization = Organization.objects.get_from_cache(
                slug=organization_slug,
            )
        except Organization.DoesNotExist:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, organization)

        with configure_scope() as scope:
            scope.set_tag("organization", organization.id)

        request._request.organization = organization

        # Track the 'active' organization when the request came from
        # a cookie based agent (react app)
        if request.auth is None and request.user:
            request.session['activeorg'] = organization.slug

        kwargs['organization'] = organization
        return (args, kwargs)
Esempio n. 49
0
def _do_preprocess_event(cache_key, data, start_time, event_id, process_task):
    if cache_key and data is None:
        data = default_cache.get(cache_key)

    if data is None:
        metrics.incr('events.failed', tags={'reason': 'cache', 'stage': 'pre'}, skip_internal=False)
        error_logger.error('preprocess.failed.empty', extra={'cache_key': cache_key})
        return

    original_data = data
    data = CanonicalKeyDict(data)
    project_id = data['project']

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    project = Project.objects.get_from_cache(id=project_id)

    if should_process(data):
        from_reprocessing = process_task is process_event_from_reprocessing
        submit_process(project, from_reprocessing, cache_key, event_id, start_time, original_data)
        return

    submit_save_event(project, cache_key, event_id, start_time, original_data)
Esempio n. 50
0
    def convert_args(self, request, organization_slug, *args, **kwargs):
        try:
            organization = Organization.objects.get_from_cache(
                slug=organization_slug,
            )
        except Organization.DoesNotExist:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, organization)

        with configure_scope() as scope:
            scope.set_tag("organization", organization.id)

        request._request.organization = organization

        # Track the 'active' organization when the request came from
        # a cookie based agent (react app)
        # Never track any org (regardless of whether the user does or doesn't have
        # membership in that org) when the user is in active superuser mode
        if request.auth is None and request.user and not is_active_superuser(request):
            request.session['activeorg'] = organization.slug

        kwargs['organization'] = organization
        return (args, kwargs)
Esempio n. 51
0
def _do_process_event(cache_key, start_time, event_id, process_task,
                      data=None):
    from sentry.plugins import plugins

    if data is None:
        data = default_cache.get(cache_key)

    if data is None:
        metrics.incr(
            'events.failed',
            tags={
                'reason': 'cache',
                'stage': 'process'},
            skip_internal=False)
        error_logger.error('process.failed.empty', extra={'cache_key': cache_key})
        return

    data = CanonicalKeyDict(data)
    project_id = data['project']

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    has_changed = False

    # Fetch the reprocessing revision
    reprocessing_rev = reprocessing.get_reprocessing_revision(project_id)

    try:
        # Event enhancers.  These run before anything else.
        for plugin in plugins.all(version=2):
            enhancers = safe_execute(plugin.get_event_enhancers, data=data)
            for enhancer in (enhancers or ()):
                enhanced = safe_execute(enhancer, data, _passthrough_errors=(RetrySymbolication,))
                if enhanced:
                    data = enhanced
                    has_changed = True

        # Stacktrace based event processors.
        new_data = process_stacktraces(data)
        if new_data is not None:
            has_changed = True
            data = new_data
    except RetrySymbolication as e:
        if start_time and (time() - start_time) > 3600:
            raise RuntimeError('Event spent one hour in processing')

        retry_process_event.apply_async(
            args=(),
            kwargs={
                'process_task_name': process_task.__name__,
                'task_kwargs': {
                    'cache_key': cache_key,
                    'event_id': event_id,
                    'start_time': start_time,
                }
            },
            countdown=e.retry_after
        )
        return

    # TODO(dcramer): ideally we would know if data changed by default
    # Default event processors.
    for plugin in plugins.all(version=2):
        processors = safe_execute(
            plugin.get_event_preprocessors, data=data, _with_transaction=False
        )
        for processor in (processors or ()):
            result = safe_execute(processor, data)
            if result:
                data = result
                has_changed = True

    assert data['project'] == project_id, 'Project cannot be mutated by preprocessor'
    project = Project.objects.get_from_cache(id=project_id)

    # We cannot persist canonical types in the cache, so we need to
    # downgrade this.
    if isinstance(data, CANONICAL_TYPES):
        data = dict(data.items())

    if has_changed:
        issues = data.get('processing_issues')
        try:
            if issues and create_failed_event(
                cache_key, project_id, list(issues.values()),
                event_id=event_id, start_time=start_time,
                reprocessing_rev=reprocessing_rev
            ):
                return
        except RetryProcessing:
            # If `create_failed_event` indicates that we need to retry we
            # invoke outselves again.  This happens when the reprocessing
            # revision changed while we were processing.
            from_reprocessing = process_task is process_event_from_reprocessing
            submit_process(project, from_reprocessing, cache_key, event_id, start_time, data)
            process_task.delay(cache_key, start_time=start_time,
                               event_id=event_id)
            return

        default_cache.set(cache_key, data, 3600)

    submit_save_event(project, cache_key, event_id, start_time, data)
Esempio n. 52
0
def assemble_dif(project_id, name, checksum, chunks, **kwargs):
    from sentry.models import ChunkFileState, debugfile, Project, \
        ProjectDebugFile, set_assemble_status, BadDif
    from sentry.reprocessing import bump_reprocessing_revision

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    project = Project.objects.filter(id=project_id).get()
    set_assemble_status(project, checksum, ChunkFileState.ASSEMBLING)

    # Assemble the chunks into files
    rv = assemble_file(project, name, checksum, chunks,
                       file_type='project.dif')

    # If not file has been created this means that the file failed to
    # assemble because of bad input data.  Return.
    if rv is None:
        return

    file, temp_file = rv
    delete_file = True
    try:
        with temp_file:
            # We only permit split difs to hit this endpoint.  The
            # client is required to split them up first or we error.
            try:
                result = debugfile.detect_dif_from_path(temp_file.name, name=name)
            except BadDif as e:
                set_assemble_status(project, checksum, ChunkFileState.ERROR,
                                    detail=e.args[0])
                return

            if len(result) != 1:
                set_assemble_status(project, checksum, ChunkFileState.ERROR,
                                    detail='Contained wrong number of '
                                    'architectures (expected one, got %s)'
                                    % len(result))
                return

            dif, created = debugfile.create_dif_from_id(project, result[0], file=file)
            indicate_success = True
            delete_file = False

            if created:
                # Bump the reprocessing revision since the symbol has changed
                # and might resolve processing issues. If the file was not
                # created, someone else has created it and will bump the
                # revision instead.
                bump_reprocessing_revision(project)

                # Try to generate caches from this DIF immediately. If this
                # fails, we can capture the error and report it to the uploader.
                # Also, we remove the file to prevent it from erroring again.
                error = ProjectDebugFile.difcache.generate_caches(project, dif, temp_file.name)
                if error is not None:
                    set_assemble_status(project, checksum, ChunkFileState.ERROR,
                                        detail=error)
                    indicate_success = False
                    dif.delete()

            if indicate_success:
                set_assemble_status(project, checksum, ChunkFileState.OK,
                                    detail=serialize(dif))
    finally:
        if delete_file:
            file.delete()
Esempio n. 53
0
 def bind_project(self, project):
     self.project = project
     self.project_id = project.id
     with configure_scope() as scope:
         scope.set_tag("project", project.id)
Esempio n. 54
0
def post_process_group(event, is_new, is_regression, is_sample, is_new_group_environment, **kwargs):
    """
    Fires post processing hooks for a group.
    """
    with snuba.options_override({'consistent': True}):
        if check_event_already_post_processed(event):
            logger.info('post_process.skipped', extra={
                'project_id': event.project_id,
                'event_id': event.event_id,
                'reason': 'duplicate',
            })
            return

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        # Re-bind Group since we're pickling the whole Event object
        # which may contain a stale Group.
        event.group, _ = get_group_with_redirect(event.group_id)
        event.group_id = event.group.id

        project_id = event.group.project_id
        with configure_scope() as scope:
            scope.set_tag("project", project_id)

        # Re-bind Project since we're pickling the whole Event object
        # which may contain a stale Project.
        event.project = Project.objects.get_from_cache(id=project_id)

        _capture_stats(event, is_new)

        # we process snoozes before rules as it might create a regression
        has_reappeared = process_snoozes(event.group)

        handle_owner_assignment(event.project, event.group, event)

        rp = RuleProcessor(event, is_new, is_regression, is_new_group_environment, has_reappeared)
        has_alert = False
        # TODO(dcramer): ideally this would fanout, but serializing giant
        # objects back and forth isn't super efficient
        for callback, futures in rp.apply():
            has_alert = True
            safe_execute(callback, event, futures)

        if features.has(
            'projects:servicehooks',
            project=event.project,
        ):
            allowed_events = set(['event.created'])
            if has_alert:
                allowed_events.add('event.alert')

            if allowed_events:
                for servicehook_id, events in _get_service_hooks(project_id=event.project_id):
                    if any(e in allowed_events for e in events):
                        process_service_hook.delay(
                            servicehook_id=servicehook_id,
                            event=event,
                        )

        if is_new:
            process_resource_change_bound.delay(
                action='created',
                sender='Group',
                instance_id=event.group_id,
            )

        for plugin in plugins.for_project(event.project):
            plugin_post_process_group(
                plugin_slug=plugin.slug,
                event=event,
                is_new=is_new,
                is_regresion=is_regression,
                is_sample=is_sample,
            )

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            group=event.group,
            event=event,
            primary_hash=kwargs.get('primary_hash'),
        )
Esempio n. 55
0
def _do_save_event(cache_key=None, data=None, start_time=None, event_id=None,
                   project_id=None, **kwargs):
    """
    Saves an event to the database.
    """
    from sentry.event_manager import HashDiscarded, EventManager
    from sentry import quotas
    from sentry.models import ProjectKey
    from sentry.utils.outcomes import Outcome, track_outcome

    if cache_key and data is None:
        data = default_cache.get(cache_key)

    if data is not None:
        data = CanonicalKeyDict(data)

    if event_id is None and data is not None:
        event_id = data['event_id']

    # only when we come from reprocessing we get a project_id sent into
    # the task.
    if project_id is None:
        project_id = data.pop('project')

    key_id = None if data is None else data.get('key_id')
    if key_id is not None:
        key_id = int(key_id)
    timestamp = to_datetime(start_time) if start_time is not None else None

    delete_raw_event(project_id, event_id, allow_hint_clear=True)

    # This covers two cases: where data is None because we did not manage
    # to fetch it from the default cache or the empty dictionary was
    # stored in the default cache.  The former happens if the event
    # expired while being on the queue, the second happens on reprocessing
    # if the raw event was deleted concurrently while we held on to
    # it.  This causes the node store to delete the data and we end up
    # fetching an empty dict.  We could in theory not invoke `save_event`
    # in those cases but it's important that we always clean up the
    # reprocessing reports correctly or they will screw up the UI.  So
    # to future proof this correctly we just handle this case here.
    if not data:
        metrics.incr(
            'events.failed',
            tags={
                'reason': 'cache',
                'stage': 'post'},
            skip_internal=False)
        return

    with configure_scope() as scope:
        scope.set_tag("project", project_id)

    event = None
    try:
        manager = EventManager(data)
        event = manager.save(project_id, assume_normalized=True)

        # Always load attachments from the cache so we can later prune them.
        # Only save them if the event-attachments feature is active, though.
        if features.has('organizations:event-attachments', event.project.organization, actor=None):
            attachments = attachment_cache.get(cache_key) or []
            for attachment in attachments:
                save_attachment(event, attachment)

        # This is where we can finally say that we have accepted the event.
        track_outcome(
            event.project.organization_id,
            event.project.id,
            key_id,
            Outcome.ACCEPTED,
            None,
            timestamp,
            event_id
        )

    except HashDiscarded:
        project = Project.objects.get_from_cache(id=project_id)
        reason = FilterStatKeys.DISCARDED_HASH
        project_key = None
        try:
            if key_id is not None:
                project_key = ProjectKey.objects.get_from_cache(id=key_id)
        except ProjectKey.DoesNotExist:
            pass

        quotas.refund(project, key=project_key, timestamp=start_time)
        track_outcome(
            project.organization_id,
            project_id,
            key_id,
            Outcome.FILTERED,
            reason,
            timestamp,
            event_id
        )

    finally:
        if cache_key:
            default_cache.delete(cache_key)

            # For the unlikely case that we did not manage to persist the
            # event we also delete the key always.
            if event is None or \
               features.has('organizations:event-attachments', event.project.organization, actor=None):
                attachment_cache.delete(cache_key)

        if start_time:
            metrics.timing(
                'events.time-to-process',
                time() - start_time,
                instance=data['platform'])
Esempio n. 56
0
def _do_process_event(cache_key, start_time, event_id, process_task):
    from sentry.plugins import plugins

    data = default_cache.get(cache_key)

    if data is None:
        metrics.incr(
            'events.failed',
            tags={
                'reason': 'cache',
                'stage': 'process'},
            skip_internal=False)
        error_logger.error('process.failed.empty', extra={'cache_key': cache_key})
        return

    data = CanonicalKeyDict(data)
    project = data['project']

    with configure_scope() as scope:
        scope.set_tag("project", project)

    has_changed = False

    # Fetch the reprocessing revision
    reprocessing_rev = reprocessing.get_reprocessing_revision(project)

    # Event enhancers.  These run before anything else.
    for plugin in plugins.all(version=2):
        enhancers = safe_execute(plugin.get_event_enhancers, data=data)
        for enhancer in (enhancers or ()):
            enhanced = safe_execute(enhancer, data)
            if enhanced:
                data = enhanced
                has_changed = True

    # Stacktrace based event processors.
    new_data = process_stacktraces(data)
    if new_data is not None:
        has_changed = True
        data = new_data

    # TODO(dcramer): ideally we would know if data changed by default
    # Default event processors.
    for plugin in plugins.all(version=2):
        processors = safe_execute(
            plugin.get_event_preprocessors, data=data, _with_transaction=False
        )
        for processor in (processors or ()):
            result = safe_execute(processor, data)
            if result:
                data = result
                has_changed = True

    assert data['project'] == project, 'Project cannot be mutated by preprocessor'

    if has_changed:
        issues = data.get('processing_issues')
        try:
            if issues and create_failed_event(
                cache_key, project, list(issues.values()),
                event_id=event_id, start_time=start_time,
                reprocessing_rev=reprocessing_rev
            ):
                return
        except RetryProcessing:
            # If `create_failed_event` indicates that we need to retry we
            # invoke outselves again.  This happens when the reprocessing
            # revision changed while we were processing.
            process_task.delay(cache_key, start_time=start_time,
                               event_id=event_id)
            return

        # We cannot persist canonical types in the cache, so we need to
        # downgrade this.
        if isinstance(data, CANONICAL_TYPES):
            data = dict(data.items())
        default_cache.set(cache_key, data, 3600)

    save_event.delay(
        cache_key=cache_key, data=None, start_time=start_time, event_id=event_id,
        project_id=project
    )