Ejemplo n.º 1
0
    def post(self, request, *args, **kwargs):
        try:
            jira_auth = get_jira_auth_from_request(request)
        except (ApiError, JiraTenant.DoesNotExist):
            self.get_response("error.html")

        if request.user.is_anonymous:
            return self.get_response("signin.html")

        orgs = self.request.user.get_orgs()

        form = JiraConfigForm([(o.id, o.name) for o in orgs], self.request.POST)

        if form.is_valid():
            try:
                org = orgs.get(id=form.cleaned_data["organization"])
            except Organization.DoesNotExist:
                errors = form._errors.setdefault("organization", ErrorList())
                errors.append("Invalid organization")
            else:
                jira_auth.update(organization=org)
                bind_organization_context(org)

        context = self.get_context()
        context.update({"is_configured": jira_auth.is_configured(), "form": form})

        return self.get_response("config.html", context)
Ejemplo n.º 2
0
    def get(self, request, *args, **kwargs):
        with configure_scope() as scope:
            try:
                # make sure this exists and is valid
                jira_auth = self.get_jira_auth()
            except (ApiError, JiraTenant.DoesNotExist, ExpiredSignatureError) as e:
                scope.set_tag("result", f"error.{e.__class__.__name__}")
                return self.get_response("error.html")

            if request.user.is_anonymous:
                scope.set_tag("result", "signin")
                return self.get_response("signin.html")

            org = jira_auth.organization
            context = self.get_context()
            if org is None:
                context.update(
                    {
                        "error_message": (
                            "You still need to configure this plugin, which "
                            "can be done from the Manage Add-ons page."
                        )
                    }
                )
                scope.set_tag("result", "error.no_org")
                return self.get_response("error.html", context)

            bind_organization_context(org)
            context.update({"organization_slug": org.slug})

            scope.set_tag("result", "success")
            return self.get_response("widget.html", context)
Ejemplo n.º 3
0
    def get(self, request, *args, **kwargs):
        try:
            # make sure this exists and is valid
            jira_auth = self.get_jira_auth()
        except (ApiError, JiraTenant.DoesNotExist, ExpiredSignatureError):
            return self.get_response("error.html")

        if request.user.is_anonymous():
            return self.get_response("signin.html")

        org = jira_auth.organization
        context = self.get_context()
        if org is None:
            context.update({
                "error_message":
                ("You still need to configure this plugin, which "
                 "can be done from the Manage Add-ons page.")
            })
            return self.get_response("error.html", context)
        bind_organization_context(org)
        context.update({
            "sentry_api_url":
            absolute_uri("/api/0/organizations/%s/users/issues/" %
                         (org.slug, )),
            "issue_key":
            self.request.GET.get("issueKey"),
        })

        return self.get_response("widget.html", context)
Ejemplo n.º 4
0
    def convert_args(self, request, monitor_id, checkin_id, *args, **kwargs):
        try:
            monitor = Monitor.objects.get(guid=monitor_id)
        except Monitor.DoesNotExist:
            raise ResourceDoesNotExist

        project = Project.objects.get_from_cache(id=monitor.project_id)
        if project.status != ProjectStatus.VISIBLE:
            raise ResourceDoesNotExist

        if hasattr(request.auth, "project_id") and project.id != request.auth.project_id:
            return self.respond(status=400)

        if not features.has("organizations:monitors", project.organization, actor=request.user):
            raise ResourceDoesNotExist

        self.check_object_permissions(request, project)

        with configure_scope() as scope:
            scope.set_tag("project", project.id)

        bind_organization_context(project.organization)

        try:
            checkin = MonitorCheckIn.objects.get(monitor=monitor, guid=checkin_id)
        except MonitorCheckIn.DoesNotExist:
            raise ResourceDoesNotExist

        request._request.organization = project.organization

        kwargs.update({"checkin": checkin, "monitor": monitor, "project": project})
        return (args, kwargs)
Ejemplo n.º 5
0
    def convert_args(self, request, monitor_id, *args, **kwargs):
        try:
            monitor = Monitor.objects.get(guid=monitor_id)
        except Monitor.DoesNotExist:
            raise ResourceDoesNotExist

        project = Project.objects.get_from_cache(id=monitor.project_id)
        if project.status != ProjectStatus.VISIBLE:
            raise ResourceDoesNotExist

        # HACK: This doesn't work since we can't return a 400 from here,
        # and actually just results in a 500.
        if hasattr(request.auth, "project_id") and project.id != request.auth.project_id:
            return self.respond(status=400)

        if not features.has("organizations:monitors", project.organization, actor=request.user):
            raise ResourceDoesNotExist

        self.check_object_permissions(request, project)

        with configure_scope() as scope:
            scope.set_tag("project", project.id)

        bind_organization_context(project.organization)

        request._request.organization = project.organization

        kwargs.update({"monitor": monitor, "project": project})
        return (args, kwargs)
Ejemplo n.º 6
0
    def convert_args(self, request, organization_slug, *args, **kwargs):
        try:
            organization = Organization.objects.get_from_cache(
                slug=organization_slug)
        except Organization.DoesNotExist:
            raise ResourceDoesNotExist

        with sentry_sdk.start_span(
                op="check_object_permissions_on_organization",
                description=organization_slug):
            self.check_object_permissions(request, organization)

        bind_organization_context(organization)

        request._request.organization = organization

        # Track the 'active' organization when the request came from
        # a cookie based agent (react app)
        # Never track any org (regardless of whether the user does or doesn't have
        # membership in that org) when the user is in active superuser mode
        if request.auth is None and request.user and not is_active_superuser(
                request):
            request.session["activeorg"] = organization.slug

        kwargs["organization"] = organization
        return (args, kwargs)
Ejemplo n.º 7
0
    def convert_args(self,
                     request,
                     issue_id,
                     organization_slug=None,
                     *args,
                     **kwargs):
        # TODO(tkaemming): Ideally, this would return a 302 response, rather
        # than just returning the data that is bound to the new group. (It
        # technically shouldn't be a 301, since the response could change again
        # as the result of another merge operation that occurs later. This
        # wouldn't break anything though -- it will just be a "permanent"
        # redirect to *another* permanent redirect.) This would require
        # rebuilding the URL in one of two ways: either by hacking it in with
        # string replacement, or making the endpoint aware of the URL pattern
        # that caused it to be dispatched, and reversing it with the correct
        # `issue_id` keyword argument.
        if organization_slug:
            try:
                organization = Organization.objects.get_from_cache(
                    slug=organization_slug)
            except Organization.DoesNotExist:
                raise ResourceDoesNotExist

            bind_organization_context(organization)

            request._request.organization = organization
        else:
            organization = None

        try:
            group, _ = get_group_with_redirect(
                issue_id,
                queryset=Group.objects.select_related("project",
                                                      "project__organization"),
                organization=organization,
            )
        except Group.DoesNotExist:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, group)

        with configure_scope() as scope:
            scope.set_tag("project", group.project_id)

        # we didnt bind context above, so do it now
        if not organization:
            bind_organization_context(group.project.organization)

        if group.status in EXCLUDED_STATUSES:
            raise ResourceDoesNotExist

        request._request.organization = group.project.organization

        kwargs["group"] = group

        return (args, kwargs)
Ejemplo n.º 8
0
    def test_bind_organization_context(self):
        configure_sdk()
        Hub.current.bind_client(Hub.main.client)

        org = self.create_organization()
        bind_organization_context(org)

        assert Hub.current.scope._tags["organization"] == org.id
        assert Hub.current.scope._tags["organization.slug"] == org.slug
        assert Hub.current.scope._contexts["organization"] == {"id": org.id, "slug": org.slug}
Ejemplo n.º 9
0
def test_bind_organization_context_with_callback(settings, default_organization):
    configure_sdk()

    def add_context(scope, organization, **kwargs):
        scope.set_tag("organization.test", "1")

    settings.SENTRY_ORGANIZATION_CONTEXT_HELPER = add_context
    bind_organization_context(default_organization)

    assert Hub.current.scope._tags["organization.test"] == "1"
Ejemplo n.º 10
0
def test_bind_organization_context(default_organization):
    configure_sdk()

    bind_organization_context(default_organization)

    assert Hub.current.scope._tags["organization"] == default_organization.id
    assert Hub.current.scope._tags["organization.slug"] == default_organization.slug
    assert Hub.current.scope._contexts["organization"] == {
        "id": default_organization.id,
        "slug": default_organization.slug,
    }
Ejemplo n.º 11
0
    def test_bind_organization_context_with_callback_error(self):
        configure_sdk()
        Hub.current.bind_client(Hub.main.client)

        org = self.create_organization()

        def add_context(scope, organization, **kwargs):
            1 / 0

        with self.settings(SENTRY_ORGANIZATION_CONTEXT_HELPER=add_context):
            bind_organization_context(org)

        assert Hub.current.scope._tags["organization"] == org.id
Ejemplo n.º 12
0
    def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):
        try:
            project = (
                Project.objects.filter(organization__slug=organization_slug, slug=project_slug)
                .select_related("organization")
                .prefetch_related("teams")
                .get()
            )
        except Project.DoesNotExist:
            try:
                # Project may have been renamed
                redirect = ProjectRedirect.objects.select_related("project")
                redirect = redirect.get(
                    organization__slug=organization_slug, redirect_slug=project_slug
                )
                # Without object permissions don't reveal the rename
                self.check_object_permissions(request, redirect.project)

                # get full path so that we keep query strings
                requested_url = request.get_full_path()
                new_url = requested_url.replace(
                    f"projects/{organization_slug}/{project_slug}/",
                    f"projects/{organization_slug}/{redirect.project.slug}/",
                )

                # Resource was moved/renamed if the requested url is different than the new url
                if requested_url != new_url:
                    raise ProjectMoved(new_url, redirect.project.slug)

                # otherwise project doesn't exist
                raise ResourceDoesNotExist
            except ProjectRedirect.DoesNotExist:
                raise ResourceDoesNotExist

        if project.status != ProjectStatus.VISIBLE:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, project)

        with configure_scope() as scope:
            scope.set_tag("project", project.id)

        bind_organization_context(project.organization)

        request._request.organization = project.organization

        kwargs["project"] = project
        return (args, kwargs)
Ejemplo n.º 13
0
    def get(self, request, *args, **kwargs):
        try:
            jira_auth = self.get_jira_auth()
        except (ApiError, JiraTenant.DoesNotExist):
            return self.get_response("error.html")

        if request.user.is_anonymous:
            return self.get_response("signin.html")

        org = jira_auth.organization
        form_context = None
        if org:
            form_context = {"organization": org.id}
            bind_organization_context(org)

        form = JiraConfigForm([(o.id, o.name) for o in request.user.get_orgs()], form_context)
        context = self.get_context()
        context.update({"is_configured": jira_auth.is_configured(), "form": form})

        return self.get_response("config.html", context)
Ejemplo n.º 14
0
    def convert_args(self, request, organization_slug, team_slug, *args,
                     **kwargs):
        try:
            team = (Team.objects.filter(
                organization__slug=organization_slug,
                slug=team_slug).select_related("organization").get())
        except Team.DoesNotExist:
            raise ResourceDoesNotExist

        if team.status != TeamStatus.VISIBLE:
            raise ResourceDoesNotExist

        self.check_object_permissions(request, team)

        bind_organization_context(team.organization)

        request._request.organization = team.organization

        kwargs["team"] = team
        return (args, kwargs)
Ejemplo n.º 15
0
    def get(self, request: Request, *args: Any, **kwargs: Any) -> HttpResponse:
        try:
            jira_auth = self.get_jira_auth()
        except CATCHABLE_AUTH_ERRORS:
            return self.get_response("error.html")

        if request.user.is_anonymous:
            return self.get_response("signin.html")

        org = jira_auth.organization
        form_context = None
        if org:
            form_context = {"organization": org.id}
            bind_organization_context(org)

        form = JiraConfigForm([(o.id, o.name)
                               for o in request.user.get_orgs()], form_context)
        context = self.get_context()
        context.update({
            "is_configured": jira_auth.is_configured(),
            "form": form
        })

        return self.get_response("config.html", context)
Ejemplo n.º 16
0
    def post(self, request: Request, organization) -> Response:
        """
        Create a New Release for an Organization
        ````````````````````````````````````````
        Create a new release for the given Organization.  Releases are used by
        Sentry to improve its error reporting abilities by correlating
        first seen events with the release that might have introduced the
        problem.
        Releases are also necessary for sourcemaps and other debug features
        that require manual upload for functioning well.

        :pparam string organization_slug: the slug of the organization the
                                          release belongs to.
        :param string version: a version identifier for this release.  Can
                               be a version number, a commit hash etc.
        :param string ref: an optional commit reference.  This is useful if
                           a tagged version has been provided.
        :param url url: a URL that points to the release.  This can be the
                        path to an online interface to the sourcecode
                        for instance.
        :param array projects: a list of project slugs that are involved in
                               this release
        :param datetime dateReleased: an optional date that indicates when
                                      the release went live.  If not provided
                                      the current time is assumed.
        :param array commits: an optional list of commit data to be associated
                              with the release. Commits must include parameters
                              ``id`` (the sha of the commit), and can optionally
                              include ``repository``, ``message``, ``patch_set``,
                              ``author_name``, ``author_email``, and ``timestamp``.
                              See [release without integration example](/workflow/releases/).
        :param array refs: an optional way to indicate the start and end commits
                           for each repository included in a release. Head commits
                           must include parameters ``repository`` and ``commit``
                           (the HEAD sha). They can optionally include ``previousCommit``
                           (the sha of the HEAD of the previous release), which should
                           be specified if this is the first time you've sent commit data.
                           ``commit`` may contain a range in the form of ``previousCommit..commit``
        :auth: required
        """
        bind_organization_context(organization)
        serializer = ReleaseSerializerWithProjects(
            data=request.data, context={"organization": organization})

        with configure_scope() as scope:
            if serializer.is_valid():
                result = serializer.validated_data
                scope.set_tag("version", result["version"])

                allowed_projects = {
                    p.slug: p
                    for p in self.get_projects(request, organization)
                }

                projects = []
                for slug in result["projects"]:
                    if slug not in allowed_projects:
                        return Response(
                            {"projects": ["Invalid project slugs"]},
                            status=400)
                    projects.append(allowed_projects[slug])

                new_status = result.get("status")

                # release creation is idempotent to simplify user
                # experiences
                try:
                    release, created = Release.objects.get_or_create(
                        organization_id=organization.id,
                        version=result["version"],
                        defaults={
                            "ref": result.get("ref"),
                            "url": result.get("url"),
                            "owner": result.get("owner"),
                            "date_released": result.get("dateReleased"),
                            "status": new_status or ReleaseStatus.OPEN,
                        },
                    )
                except IntegrityError:
                    raise ConflictError(
                        "Could not create the release it conflicts with existing data",
                    )
                if created:
                    release_created.send_robust(release=release,
                                                sender=self.__class__)

                if not created and new_status is not None and new_status != release.status:
                    release.status = new_status
                    release.save()

                new_projects = []
                for project in projects:
                    created = release.add_project(project)
                    if created:
                        new_projects.append(project)

                if release.date_released:
                    for project in new_projects:
                        Activity.objects.create(
                            type=Activity.RELEASE,
                            project=project,
                            ident=Activity.get_version_ident(
                                result["version"]),
                            data={"version": result["version"]},
                            datetime=release.date_released,
                        )

                commit_list = result.get("commits")
                if commit_list:
                    try:
                        release.set_commits(commit_list)
                        self.track_set_commits_local(
                            request,
                            organization_id=organization.id,
                            project_ids=[project.id for project in projects],
                        )
                    except ReleaseCommitError:
                        raise ConflictError(
                            "Release commits are currently being processed")

                refs = result.get("refs")
                if not refs:
                    refs = [{
                        "repository": r["repository"],
                        "previousCommit": r.get("previousId"),
                        "commit": r["currentId"],
                    } for r in result.get("headCommits", [])]
                scope.set_tag("has_refs", bool(refs))
                if refs:
                    if not request.user.is_authenticated:
                        scope.set_tag("failure_reason",
                                      "user_not_authenticated")
                        return Response(
                            {
                                "refs": [
                                    "You must use an authenticated API token to fetch refs"
                                ]
                            },
                            status=400,
                        )
                    fetch_commits = not commit_list
                    try:
                        release.set_refs(refs,
                                         request.user,
                                         fetch=fetch_commits)
                    except InvalidRepository as e:
                        scope.set_tag("failure_reason", "InvalidRepository")
                        return Response({"refs": [str(e)]}, status=400)

                if not created and not new_projects:
                    # This is the closest status code that makes sense, and we want
                    # a unique 2xx response code so people can understand when
                    # behavior differs.
                    #   208 Already Reported (WebDAV; RFC 5842)
                    status = 208
                else:
                    status = 201

                analytics.record(
                    "release.created",
                    user_id=request.user.id
                    if request.user and request.user.id else None,
                    organization_id=organization.id,
                    project_ids=[project.id for project in projects],
                    user_agent=request.META.get("HTTP_USER_AGENT", ""),
                    created_status=status,
                )

                scope.set_tag("success_status", status)
                return Response(serialize(release, request.user),
                                status=status)
            scope.set_tag("failure_reason", "serializer_error")
            return Response(serializer.errors, status=400)
Ejemplo n.º 17
0
def assemble_artifacts(org_id, version, checksum, chunks, **kwargs):
    """
    Creates release files from an uploaded artifact bundle.
    """
    try:
        organization = Organization.objects.get_from_cache(pk=org_id)
        bind_organization_context(organization)

        set_assemble_status(AssembleTask.ARTIFACTS, org_id, checksum,
                            ChunkFileState.ASSEMBLING)

        archive_filename = f"release-artifacts-{uuid.uuid4().hex}.zip"

        # Assemble the chunks into a temporary file
        rv = assemble_file(
            AssembleTask.ARTIFACTS,
            organization,
            archive_filename,
            checksum,
            chunks,
            file_type="release.bundle",
        )

        # If not file has been created this means that the file failed to
        # assemble because of bad input data. In this case, assemble_file
        # has set the assemble status already.
        if rv is None:
            return

        bundle, temp_file = rv

        try:
            archive = ReleaseArchive(temp_file)
        except Exception:
            raise AssembleArtifactsError("failed to open release manifest")

        with archive:
            manifest = archive.manifest

            org_slug = manifest.get("org")
            if organization.slug != org_slug:
                raise AssembleArtifactsError(
                    "organization does not match uploaded bundle")

            release_name = manifest.get("release")
            if release_name != version:
                raise AssembleArtifactsError(
                    "release does not match uploaded bundle")

            try:
                release = Release.objects.get(organization_id=organization.id,
                                              version=release_name)
            except Release.DoesNotExist:
                raise AssembleArtifactsError("release does not exist")

            dist_name = manifest.get("dist")
            dist = None
            if dist_name:
                dist = release.add_dist(dist_name)

            num_files = len(manifest.get("files", {}))

            meta = {  # Required for release file creation
                "organization_id": organization.id,
                "release_id": release.id,
                "dist_id": dist.id if dist else dist,
            }

            saved_as_archive = False
            min_size = options.get("processing.release-archive-min-files")
            if num_files >= min_size:
                try:
                    update_artifact_index(release, dist, bundle)
                    saved_as_archive = True
                except Exception as exc:
                    logger.error("Unable to update artifact index",
                                 exc_info=exc)

            if not saved_as_archive:
                _store_single_files(archive, meta, True)

            # Count files extracted, to compare them to release files endpoint
            metrics.incr("tasks.assemble.extracted_files", amount=num_files)

    except AssembleArtifactsError as e:
        set_assemble_status(AssembleTask.ARTIFACTS,
                            org_id,
                            checksum,
                            ChunkFileState.ERROR,
                            detail=str(e))
    except Exception:
        logger.error("failed to assemble release bundle", exc_info=True)
        set_assemble_status(
            AssembleTask.ARTIFACTS,
            org_id,
            checksum,
            ChunkFileState.ERROR,
            detail="internal server error",
        )
    else:
        set_assemble_status(AssembleTask.ARTIFACTS, org_id, checksum,
                            ChunkFileState.OK)
Ejemplo n.º 18
0
def assemble_artifacts(org_id, version, checksum, chunks, **kwargs):
    """
    Creates release files from an uploaded artifact bundle.
    """
    try:
        organization = Organization.objects.get_from_cache(pk=org_id)
        bind_organization_context(organization)

        set_assemble_status(AssembleTask.ARTIFACTS, org_id, checksum,
                            ChunkFileState.ASSEMBLING)

        # Assemble the chunks into a temporary file
        rv = assemble_file(
            AssembleTask.ARTIFACTS,
            organization,
            RELEASE_ARCHIVE_FILENAME,
            checksum,
            chunks,
            file_type="release.bundle",
        )

        # If not file has been created this means that the file failed to
        # assemble because of bad input data. In this case, assemble_file
        # has set the assemble status already.
        if rv is None:
            return

        bundle, temp_file = rv

        try:
            archive = ReleaseArchive(temp_file)
        except BaseException:
            raise AssembleArtifactsError("failed to open release manifest")

        with archive:
            manifest = archive.manifest

            org_slug = manifest.get("org")
            if organization.slug != org_slug:
                raise AssembleArtifactsError(
                    "organization does not match uploaded bundle")

            release_name = manifest.get("release")
            if release_name != version:
                raise AssembleArtifactsError(
                    "release does not match uploaded bundle")

            try:
                release = Release.objects.get(organization_id=organization.id,
                                              version=release_name)
            except Release.DoesNotExist:
                raise AssembleArtifactsError("release does not exist")

            dist_name = manifest.get("dist")
            dist = None
            if dist_name:
                dist = release.add_dist(dist_name)

            meta = {  # Required for release file creation
                "organization_id": organization.id,
                "release": release,
                "dist": dist,
            }
            if options.get("processing.save-release-archives"):
                kwargs = dict(meta, name=RELEASE_ARCHIVE_FILENAME)
                _upsert_release_file(bundle, archive, _merge_archives,
                                     **kwargs)

            # NOTE(jjbayer): Single files are still stored to enable
            # rolling back from release archives. Once release archives run
            # smoothely, this call can be removed / only called when feature
            # flag is off.
            _store_single_files(archive, meta)

            # Count files extracted, to compare them to release files endpoint
            metrics.incr("tasks.assemble.extracted_files",
                         amount=len(manifest.get("files", {})))

    except AssembleArtifactsError as e:
        set_assemble_status(AssembleTask.ARTIFACTS,
                            org_id,
                            checksum,
                            ChunkFileState.ERROR,
                            detail=str(e))
    except BaseException:
        logger.error("failed to assemble release bundle", exc_info=True)
        set_assemble_status(
            AssembleTask.ARTIFACTS,
            org_id,
            checksum,
            ChunkFileState.ERROR,
            detail="internal server error",
        )
    else:
        set_assemble_status(AssembleTask.ARTIFACTS, org_id, checksum,
                            ChunkFileState.OK)
Ejemplo n.º 19
0
    def put(self, request, project, version):
        """
        Update a Project's Release
        ``````````````````````````

        Update a release.  This can change some metadata associated with
        the release (the ref, url, and dates).

        :pparam string organization_slug: the slug of the organization the
                                          release belongs to.
        :pparam string project_slug: the slug of the project to change the
                                     release of.
        :pparam string version: the version identifier of the release.
        :param string ref: an optional commit reference.  This is useful if
                           a tagged version has been provided.
        :param url url: a URL that points to the release.  This can be the
                        path to an online interface to the sourcecode
                        for instance.
        :param datetime dateReleased: an optional date that indicates when
                                      the release went live.  If not provided
                                      the current time is assumed.
        :auth: required
        """
        bind_organization_context(project.organization)
        with configure_scope() as scope:
            scope.set_tag("version", version)
            try:
                release = Release.objects.get(
                    organization_id=project.organization_id,
                    projects=project,
                    version=version)
            except Release.DoesNotExist:
                scope.set_tag("failure_reason", "Release.DoesNotExist")
                raise ResourceDoesNotExist

            serializer = ReleaseSerializer(data=request.data, partial=True)

            if not serializer.is_valid():
                scope.set_tag("failure_reason", "serializer_error")
                return Response(serializer.errors, status=400)

            result = serializer.validated_data

            was_released = bool(release.date_released)

            kwargs = {}
            if result.get("dateReleased"):
                kwargs["date_released"] = result["dateReleased"]
            if result.get("ref"):
                kwargs["ref"] = result["ref"]
            if result.get("url"):
                kwargs["url"] = result["url"]
            if result.get("status"):
                kwargs["status"] = result["status"]

            if kwargs:
                release.update(**kwargs)

            commit_list = result.get("commits")
            if commit_list:
                hook = ReleaseHook(project)
                # TODO(dcramer): handle errors with release payloads
                hook.set_commits(release.version, commit_list)
                self.track_set_commits_local(
                    request,
                    organization_id=project.organization_id,
                    project_ids=[project.id])

            if not was_released and release.date_released:
                Activity.objects.create(
                    type=Activity.RELEASE,
                    project=project,
                    ident=Activity.get_version_ident(release.version),
                    data={"version": release.version},
                    datetime=release.date_released,
                )

            return Response(serialize(release, request.user))
Ejemplo n.º 20
0
def post_process_group(event, is_new, is_regression, is_new_group_environment,
                       **kwargs):
    """
    Fires post processing hooks for a group.
    """
    set_current_project(event.project_id)

    from sentry.utils import snuba

    with snuba.options_override({"consistent": True}):
        if check_event_already_post_processed(event):
            logger.info(
                "post_process.skipped",
                extra={
                    "project_id": event.project_id,
                    "event_id": event.event_id,
                    "reason": "duplicate",
                },
            )
            return

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project, Organization, EventDict
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        if event.group_id:
            # Re-bind Group since we're pickling the whole Event object
            # which may contain a stale Project.
            event.group, _ = get_group_with_redirect(event.group_id)
            event.group_id = event.group.id

        # Re-bind Project and Org since we're pickling the whole Event object
        # which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project._organization_cache = Organization.objects.get_from_cache(
            id=event.project.organization_id)
        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        if event.group_id:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(event, is_new, is_regression,
                               is_new_group_environment, has_reappeared)
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                with sentry_sdk.start_transaction(op="post_process_group",
                                                  name="rule_processor_apply",
                                                  sampled=True):
                    safe_execute(callback, event, futures)

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = set(["event.created"])
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(
                            project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(
                                servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type(
            ) == "error" and _should_send_error_created_hooks(event.project):
                process_resource_change_bound.delay(action="created",
                                                    sender="Error",
                                                    instance_id=event.event_id,
                                                    instance=event)
            if is_new:
                process_resource_change_bound.delay(action="created",
                                                    sender="Group",
                                                    instance_id=event.group_id)

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(plugin_slug=plugin.slug,
                                          event=event,
                                          is_new=is_new,
                                          is_regresion=is_regression)

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            event=event,
            primary_hash=kwargs.get("primary_hash"),
        )
Ejemplo n.º 21
0
def assemble_artifacts(org_id, version, checksum, chunks, **kwargs):
    """
    Creates release files from an uploaded artifact bundle.
    """

    import shutil
    import tempfile
    from sentry.utils.zip import safe_extract_zip
    from sentry.models import File, Organization, Release, ReleaseFile

    organization = Organization.objects.get_from_cache(pk=org_id)

    bind_organization_context(organization)

    set_assemble_status(AssembleTask.ARTIFACTS, org_id, checksum,
                        ChunkFileState.ASSEMBLING)

    # Assemble the chunks into a temporary file
    rv = assemble_file(
        AssembleTask.ARTIFACTS,
        organization,
        "release-artifacts.zip",
        checksum,
        chunks,
        file_type="release.bundle",
    )

    # If not file has been created this means that the file failed to
    # assemble because of bad input data.  Return.
    if rv is None:
        return

    bundle, temp_file = rv
    scratchpad = tempfile.mkdtemp()

    # Initially, always delete the bundle file. Later on, we can start to store
    # the artifact bundle as a release file.
    delete_bundle = True

    try:
        try:
            safe_extract_zip(temp_file, scratchpad, strip_toplevel=False)
        except BaseException:
            raise AssembleArtifactsError("failed to extract bundle")

        try:
            manifest_path = path.join(scratchpad, "manifest.json")
            with open(manifest_path, "rb") as manifest:
                manifest = json.loads(manifest.read())
        except BaseException:
            raise AssembleArtifactsError("failed to open release manifest")

        org_slug = manifest.get("org")
        if organization.slug != org_slug:
            raise AssembleArtifactsError(
                "organization does not match uploaded bundle")

        release_name = manifest.get("release")
        if release_name != version:
            raise AssembleArtifactsError(
                "release does not match uploaded bundle")

        try:
            release = Release.objects.get(organization_id=organization.id,
                                          version=release_name)
        except Release.DoesNotExist:
            raise AssembleArtifactsError("release does not exist")

        dist_name = manifest.get("dist")
        dist = None
        if dist_name:
            dist = release.add_dist(dist_name)

        artifacts = manifest.get("files", {})
        for rel_path, artifact in six.iteritems(artifacts):
            artifact_url = artifact.get("url", rel_path)
            artifact_basename = artifact_url.rsplit("/", 1)[-1]

            file = File.objects.create(name=artifact_basename,
                                       type="release.file",
                                       headers=artifact.get("headers", {}))

            full_path = path.join(scratchpad, rel_path)
            with open(full_path, "rb") as fp:
                file.putfile(fp, logger=logger)

            kwargs = {
                "organization_id": organization.id,
                "release": release,
                "name": artifact_url,
                "dist": dist,
            }

            # Release files must have unique names within their release
            # and dist. If a matching file already exists, replace its
            # file with the new one; otherwise create it.
            try:
                release_file = ReleaseFile.objects.get(**kwargs)
            except ReleaseFile.DoesNotExist:
                try:
                    with transaction.atomic():
                        ReleaseFile.objects.create(file=file, **kwargs)
                except IntegrityError:
                    # NB: This indicates a race, where another assemble task or
                    # file upload job has just created a conflicting file. Since
                    # we're upserting here anyway, yield to the faster actor and
                    # do not try again.
                    file.delete()
            else:
                old_file = release_file.file
                release_file.update(file=file)
                old_file.delete()

    except AssembleArtifactsError as e:
        set_assemble_status(AssembleTask.ARTIFACTS,
                            org_id,
                            checksum,
                            ChunkFileState.ERROR,
                            detail=six.text_type(e))
    except BaseException:
        logger.error("failed to assemble release bundle", exc_info=True)
        set_assemble_status(
            AssembleTask.ARTIFACTS,
            org_id,
            checksum,
            ChunkFileState.ERROR,
            detail="internal server error",
        )
    else:
        set_assemble_status(AssembleTask.ARTIFACTS, org_id, checksum,
                            ChunkFileState.OK)
    finally:
        shutil.rmtree(scratchpad)
        if delete_bundle:
            bundle.delete()
Ejemplo n.º 22
0
    def post(self, request, project):
        """
        Create a New Release for a Project
        ``````````````````````````````````

        Create a new release and/or associate a project with a release.
        Release versions that are the same across multiple projects
        within an Organization will be treated as the same release in Sentry.

        Releases are used by Sentry to improve its error reporting abilities
        by correlating first seen events with the release that might have
        introduced the problem.

        Releases are also necessary for sourcemaps and other debug features
        that require manual upload for functioning well.

        :pparam string organization_slug: the slug of the organization the
                                          release belongs to.
        :pparam string project_slug: the slug of the project to create a
                                     release for.
        :param string version: a version identifier for this release.  Can
                               be a version number, a commit hash etc.
        :param string ref: an optional commit reference.  This is useful if
                           a tagged version has been provided.
        :param url url: a URL that points to the release.  This can be the
                        path to an online interface to the sourcecode
                        for instance.
        :param datetime dateReleased: an optional date that indicates when
                                      the release went live.  If not provided
                                      the current time is assumed.
        :auth: required
        """
        bind_organization_context(project.organization)
        serializer = ReleaseWithVersionSerializer(data=request.data)

        with configure_scope() as scope:
            if serializer.is_valid():
                result = serializer.validated_data
                scope.set_tag("version", result["version"])

                # release creation is idempotent to simplify user
                # experiences
                try:
                    with transaction.atomic():
                        release, created = (
                            Release.objects.create(
                                organization_id=project.organization_id,
                                version=result["version"],
                                ref=result.get("ref"),
                                url=result.get("url"),
                                owner=result.get("owner"),
                                date_released=result.get("dateReleased"),
                            ),
                            True,
                        )
                    was_released = False
                except IntegrityError:
                    release, created = (
                        Release.objects.get(
                            organization_id=project.organization_id,
                            version=result["version"]),
                        False,
                    )
                    was_released = bool(release.date_released)
                else:
                    release_created.send_robust(release=release,
                                                sender=self.__class__)

                created = release.add_project(project)

                commit_list = result.get("commits")
                if commit_list:
                    hook = ReleaseHook(project)
                    # TODO(dcramer): handle errors with release payloads
                    hook.set_commits(release.version, commit_list)

                if not was_released and release.date_released:
                    Activity.objects.create(
                        type=Activity.RELEASE,
                        project=project,
                        ident=Activity.get_version_ident(result["version"]),
                        data={"version": result["version"]},
                        datetime=release.date_released,
                    )

                if not created:
                    # This is the closest status code that makes sense, and we want
                    # a unique 2xx response code so people can understand when
                    # behavior differs.
                    #   208 Already Reported (WebDAV; RFC 5842)
                    status = 208
                else:
                    status = 201

                analytics.record(
                    "release.created",
                    user_id=request.user.id
                    if request.user and request.user.id else None,
                    organization_id=project.organization_id,
                    project_ids=[project.id],
                    user_agent=request.META.get("HTTP_USER_AGENT", ""),
                    created_status=status,
                )
                scope.set_tag("success_status", status)
                return Response(serialize(release, request.user),
                                status=status)
            scope.set_tag("failure_reason", "serializer_error")
            return Response(serializer.errors, status=400)
Ejemplo n.º 23
0
    def put(self, request, organization, version):
        """
        Update an Organization's Release
        ````````````````````````````````

        Update a release. This can change some metadata associated with
        the release (the ref, url, and dates).

        :pparam string organization_slug: the slug of the organization the
                                          release belongs to.
        :pparam string version: the version identifier of the release.
        :param string ref: an optional commit reference.  This is useful if
                           a tagged version has been provided.
        :param url url: a URL that points to the release.  This can be the
                        path to an online interface to the sourcecode
                        for instance.
        :param datetime dateReleased: an optional date that indicates when
                                      the release went live.  If not provided
                                      the current time is assumed.
        :param array commits: an optional list of commit data to be associated

                              with the release. Commits must include parameters
                              ``id`` (the sha of the commit), and can optionally
                              include ``repository``, ``message``, ``author_name``,
                              ``author_email``, and ``timestamp``.
        :param array refs: an optional way to indicate the start and end commits
                           for each repository included in a release. Head commits
                           must include parameters ``repository`` and ``commit``
                           (the HEAD sha). They can optionally include ``previousCommit``
                           (the sha of the HEAD of the previous release), which should
                           be specified if this is the first time you've sent commit data.
        :auth: required
        """
        bind_organization_context(organization)

        with configure_scope() as scope:
            scope.set_tag("version", version)
            try:
                release = Release.objects.get(organization_id=organization, version=version)
                projects = release.projects.all()
            except Release.DoesNotExist:
                scope.set_tag("failure_reason", "Release.DoesNotExist")
                raise ResourceDoesNotExist

            if not self.has_release_permission(request, organization, release):
                scope.set_tag("failure_reason", "no_release_permission")
                raise ResourceDoesNotExist

            serializer = OrganizationReleaseSerializer(data=request.data)

            if not serializer.is_valid():
                scope.set_tag("failure_reason", "serializer_error")
                return Response(serializer.errors, status=400)

            result = serializer.validated_data

            was_released = bool(release.date_released)

            kwargs = {}
            if result.get("dateReleased"):
                kwargs["date_released"] = result["dateReleased"]
            if result.get("ref"):
                kwargs["ref"] = result["ref"]
            if result.get("url"):
                kwargs["url"] = result["url"]
            if result.get("status"):
                kwargs["status"] = result["status"]

            if kwargs:
                release.update(**kwargs)

            commit_list = result.get("commits")
            if commit_list:
                # TODO(dcramer): handle errors with release payloads
                try:
                    release.set_commits(commit_list)
                    self.track_set_commits_local(
                        request,
                        organization_id=organization.id,
                        project_ids=[project.id for project in projects],
                    )
                except ReleaseCommitError:
                    raise ConflictError("Release commits are currently being processed")

            refs = result.get("refs")
            if not refs:
                refs = [
                    {
                        "repository": r["repository"],
                        "previousCommit": r.get("previousId"),
                        "commit": r["currentId"],
                    }
                    for r in result.get("headCommits", [])
                ]
            scope.set_tag("has_refs", bool(refs))
            if refs:
                if not request.user.is_authenticated:
                    scope.set_tag("failure_reason", "user_not_authenticated")
                    return Response(
                        {"refs": ["You must use an authenticated API token to fetch refs"]},
                        status=400,
                    )
                fetch_commits = not commit_list
                try:
                    release.set_refs(refs, request.user, fetch=fetch_commits)
                except InvalidRepository as e:
                    scope.set_tag("failure_reason", "InvalidRepository")
                    return Response({"refs": [str(e)]}, status=400)

            if not was_released and release.date_released:
                for project in projects:
                    Activity.objects.create(
                        type=Activity.RELEASE,
                        project=project,
                        ident=Activity.get_version_ident(release.version),
                        data={"version": release.version},
                        datetime=release.date_released,
                    )

            return Response(serialize(release, request.user))
Ejemplo n.º 24
0
def post_process_group(
    is_new, is_regression, is_new_group_environment, cache_key, group_id=None, **kwargs
):
    """
    Fires post processing hooks for a group.
    """
    from sentry.eventstore.models import Event
    from sentry.eventstore.processing import event_processing_store
    from sentry.reprocessing2 import is_reprocessed_event
    from sentry.utils import snuba

    with snuba.options_override({"consistent": True}):
        # We use the data being present/missing in the processing store
        # to ensure that we don't duplicate work should the forwarding consumers
        # need to rewind history.
        data = event_processing_store.get(cache_key)
        if not data:
            logger.info(
                "post_process.skipped",
                extra={"cache_key": cache_key, "reason": "missing_cache"},
            )
            return
        event = Event(
            project_id=data["project"], event_id=data["event_id"], group_id=group_id, data=data
        )

        set_current_event_project(event.project_id)

        is_transaction_event = not bool(event.group_id)

        from sentry.models import EventDict, Organization, Project

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        # Re-bind Project and Org since we're reading the Event object
        # from cache which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project.set_cached_field_value(
            "organization", Organization.objects.get_from_cache(id=event.project.organization_id)
        )

        # Simplified post processing for transaction events.
        # This should eventually be completely removed and transactions
        # will not go through any post processing.
        if is_transaction_event:
            transaction_processed.send_robust(
                sender=post_process_group,
                project=event.project,
                event=event,
            )

            event_processing_store.delete_by_key(cache_key)

            return

        is_reprocessed = is_reprocessed_event(event.data)

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Commit, GroupInboxReason
        from sentry.models.group import get_group_with_redirect
        from sentry.models.groupinbox import add_group_to_inbox
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.groupowner import process_suspect_commits
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind Group since we're reading the Event object
        # from cache, which may contain a stale group and project
        event.group, _ = get_group_with_redirect(event.group_id)
        event.group_id = event.group.id

        event.group.project = event.project
        event.group.project.set_cached_field_value("organization", event.project.organization)

        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        if is_reprocessed and is_new:
            add_group_to_inbox(event.group, GroupInboxReason.REPROCESSED)

        if not is_reprocessed:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)
            if not has_reappeared:  # If true, we added the .UNIGNORED reason already
                if is_new:
                    add_group_to_inbox(event.group, GroupInboxReason.NEW)
                elif is_regression:
                    add_group_to_inbox(event.group, GroupInboxReason.REGRESSION)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(
                event, is_new, is_regression, is_new_group_environment, has_reappeared
            )
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                safe_execute(callback, event, futures, _with_transaction=False)

            try:
                lock = locks.get(
                    f"w-o:{event.group_id}-d-l",
                    duration=10,
                )
                with lock.acquire():
                    has_commit_key = f"w-o:{event.project.organization_id}-h-c"
                    org_has_commit = cache.get(has_commit_key)
                    if org_has_commit is None:
                        org_has_commit = Commit.objects.filter(
                            organization_id=event.project.organization_id
                        ).exists()
                        cache.set(has_commit_key, org_has_commit, 3600)

                    if org_has_commit:
                        group_cache_key = f"w-o-i:g-{event.group_id}"
                        if cache.get(group_cache_key):
                            metrics.incr(
                                "sentry.tasks.process_suspect_commits.debounce",
                                tags={"detail": "w-o-i:g debounce"},
                            )
                        else:
                            from sentry.utils.committers import get_frame_paths

                            cache.set(group_cache_key, True, 604800)  # 1 week in seconds
                            event_frames = get_frame_paths(event.data)
                            process_suspect_commits.delay(
                                event_id=event.event_id,
                                event_platform=event.platform,
                                event_frames=event_frames,
                                group_id=event.group_id,
                                project_id=event.project_id,
                            )
            except UnableToAcquireLock:
                pass
            except Exception:
                logger.exception("Failed to process suspect commits")

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = {"event.created"}
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type() == "error" and _should_send_error_created_hooks(
                event.project
            ):
                process_resource_change_bound.delay(
                    action="created", sender="Error", instance_id=event.event_id, instance=event
                )
            if is_new:
                process_resource_change_bound.delay(
                    action="created", sender="Group", instance_id=event.group_id
                )

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(
                    plugin_slug=plugin.slug, event=event, is_new=is_new, is_regresion=is_regression
                )

            from sentry import similarity

            safe_execute(similarity.record, event.project, [event], _with_transaction=False)

        # Patch attachments that were ingested on the standalone path.
        update_existing_attachments(event)

        if not is_reprocessed:
            event_processed.send_robust(
                sender=post_process_group,
                project=event.project,
                event=event,
                primary_hash=kwargs.get("primary_hash"),
            )

        with metrics.timer("tasks.post_process.delete_event_cache"):
            event_processing_store.delete_by_key(cache_key)
Ejemplo n.º 25
0
def post_process_group(is_new,
                       is_regression,
                       is_new_group_environment,
                       cache_key,
                       group_id=None,
                       event=None,
                       **kwargs):
    """
    Fires post processing hooks for a group.
    """
    from sentry.eventstore.models import Event
    from sentry.eventstore.processing import event_processing_store
    from sentry.utils import snuba
    from sentry.reprocessing2 import is_reprocessed_event

    with snuba.options_override({"consistent": True}):
        # We use the data being present/missing in the processing store
        # to ensure that we don't duplicate work should the forwarding consumers
        # need to rewind history.
        #
        # While we always send the cache_key and never send the event parameter now,
        # the code to handle `event` has to stick around for a self-hosted release cycle.
        if cache_key and event is None:
            data = event_processing_store.get(cache_key)
            if not data:
                logger.info(
                    "post_process.skipped",
                    extra={
                        "cache_key": cache_key,
                        "reason": "missing_cache"
                    },
                )
                return
            event = Event(project_id=data["project"],
                          event_id=data["event_id"],
                          group_id=group_id,
                          data=data)
        elif event and check_event_already_post_processed(event):
            if cache_key:
                event_processing_store.delete_by_key(cache_key)
            logger.info(
                "post_process.skipped",
                extra={
                    "reason": "duplicate",
                    "project_id": event.project_id,
                    "event_id": event.event_id,
                },
            )
            return

        if is_reprocessed_event(event.data):
            logger.info(
                "post_process.skipped",
                extra={
                    "project_id": event.project_id,
                    "event_id": event.event_id,
                    "reason": "reprocessed",
                },
            )
            return

        set_current_project(event.project_id)

        # NOTE: we must pass through the full Event object, and not an
        # event_id since the Event object may not actually have been stored
        # in the database due to sampling.
        from sentry.models import Project, Organization, EventDict
        from sentry.models.group import get_group_with_redirect
        from sentry.rules.processor import RuleProcessor
        from sentry.tasks.servicehooks import process_service_hook

        # Re-bind node data to avoid renormalization. We only want to
        # renormalize when loading old data from the database.
        event.data = EventDict(event.data, skip_renormalization=True)

        if event.group_id:
            # Re-bind Group since we're reading the Event object
            # from cache, which may contain a stale group and project
            event.group, _ = get_group_with_redirect(event.group_id)
            event.group_id = event.group.id

        # Re-bind Project and Org since we're reading the Event object
        # from cache which may contain stale parent models.
        event.project = Project.objects.get_from_cache(id=event.project_id)
        event.project._organization_cache = Organization.objects.get_from_cache(
            id=event.project.organization_id)
        bind_organization_context(event.project.organization)

        _capture_stats(event, is_new)

        if event.group_id:
            # we process snoozes before rules as it might create a regression
            # but not if it's new because you can't immediately snooze a new group
            has_reappeared = False if is_new else process_snoozes(event.group)

            handle_owner_assignment(event.project, event.group, event)

            rp = RuleProcessor(event, is_new, is_regression,
                               is_new_group_environment, has_reappeared)
            has_alert = False
            # TODO(dcramer): ideally this would fanout, but serializing giant
            # objects back and forth isn't super efficient
            for callback, futures in rp.apply():
                has_alert = True
                with sentry_sdk.start_transaction(op="post_process_group",
                                                  name="rule_processor_apply",
                                                  sampled=True):
                    safe_execute(callback, event, futures)

            if features.has("projects:servicehooks", project=event.project):
                allowed_events = set(["event.created"])
                if has_alert:
                    allowed_events.add("event.alert")

                if allowed_events:
                    for servicehook_id, events in _get_service_hooks(
                            project_id=event.project_id):
                        if any(e in allowed_events for e in events):
                            process_service_hook.delay(
                                servicehook_id=servicehook_id, event=event)

            from sentry.tasks.sentry_apps import process_resource_change_bound

            if event.get_event_type(
            ) == "error" and _should_send_error_created_hooks(event.project):
                process_resource_change_bound.delay(action="created",
                                                    sender="Error",
                                                    instance_id=event.event_id,
                                                    instance=event)
            if is_new:
                process_resource_change_bound.delay(action="created",
                                                    sender="Group",
                                                    instance_id=event.group_id)

            from sentry.plugins.base import plugins

            for plugin in plugins.for_project(event.project):
                plugin_post_process_group(plugin_slug=plugin.slug,
                                          event=event,
                                          is_new=is_new,
                                          is_regresion=is_regression)

        event_processed.send_robust(
            sender=post_process_group,
            project=event.project,
            event=event,
            primary_hash=kwargs.get("primary_hash"),
        )
        with metrics.timer("tasks.post_process.delete_event_cache"):
            event_processing_store.delete_by_key(cache_key)