def test_mark_reviewed(self): event = self.create_sample_event(platform="python") add_group_to_inbox(event.group, GroupInboxReason.NEW) self.page.visit_issue(self.org.slug, event.group.id) self.page.mark_reviewed() res = self.page.api_issue_get(event.group.id) assert res.status_code == 200, res assert "inbox" not in res.data
def post(self, request, project): event = create_sample_event(project, platform=project.platform, default="javascript") add_group_to_inbox(event.group, GroupInboxReason.NEW) data = serialize(event, request.user) return Response(data)
def process_snoozes(group): """ Return True if the group is transitioning from "resolved" to "unresolved", otherwise return False. """ from sentry.models import ( Activity, GroupInboxReason, GroupSnooze, GroupStatus, add_group_to_inbox, ) from sentry.models.grouphistory import GroupHistoryStatus, record_group_history key = GroupSnooze.get_cache_key(group.id) snooze = cache.get(key) if snooze is None: try: snooze = GroupSnooze.objects.get(group=group) except GroupSnooze.DoesNotExist: snooze = False # This cache is also set in post_save|delete. cache.set(key, snooze, 3600) if not snooze: return False if not snooze.is_valid(group, test_rates=True, use_pending_data=True): snooze_details = { "until": snooze.until, "count": snooze.count, "window": snooze.window, "user_count": snooze.user_count, "user_window": snooze.user_window, } add_group_to_inbox(group, GroupInboxReason.UNIGNORED, snooze_details) record_group_history(group, GroupHistoryStatus.UNIGNORED) Activity.objects.create( project=group.project, group=group, type=Activity.SET_UNRESOLVED, user=None, ) snooze.delete() group.update(status=GroupStatus.UNRESOLVED) issue_unignored.send_robust( project=group.project, user=None, group=group, transition_type="automatic", sender="process_snoozes", ) return True return False
def test_old_group_inbox_is_removed(self): project = self.create_project() group1 = self.create_group(status=GroupStatus.UNRESOLVED, project=project) add_group_to_inbox(group1, GroupInboxReason.NEW) group2 = self.create_group(status=GroupStatus.UNRESOLVED, project=project) group_inbox = add_group_to_inbox(group2, GroupInboxReason.NEW) group_inbox.date_added = timezone.now() - timedelta(days=8) group_inbox.save() auto_remove_inbox() assert GroupInbox.objects.filter(group=group1).exists() assert GroupInbox.objects.filter(group=group2).exists() is False
def test_inbox_out(self, mock_record): group_inbox = add_group_to_inbox(self.group, reason=GroupInboxReason.NEW) inbox_out.send( project=self.project, group=self.group, user=self.owner, sender="test_inbox_out", action="mark_reviewed", inbox_date_added=group_inbox.date_added, ) assert mock_record.called
def create_issues(self): event_a = self.store_event( data={ "event_id": "a" * 32, "message": "oh no", "timestamp": iso_format(event_time), "fingerprint": ["group-1"], }, project_id=self.project.id, ) add_group_to_inbox(event_a.group, GroupInboxReason.NEW) event_b = self.store_event( data={ "event_id": "b" * 32, "message": "oh snap", "timestamp": iso_format(event_time), "fingerprint": ["group-2"], }, project_id=self.project.id, ) add_group_to_inbox(event_b.group, GroupInboxReason.NEW)
def test_group_expand_inbox(self): self.login_as(user=self.user) event = self.store_event( data={"timestamp": iso_format(before_now(minutes=3))}, project_id=self.project.id, ) group = event.group add_group_to_inbox(group, GroupInboxReason.NEW) url = f"/api/0/issues/{group.id}/?expand=inbox" response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert response.data["inbox"] is not None assert response.data["inbox"]["reason"] == GroupInboxReason.NEW.value assert response.data["inbox"]["reason_details"] is None remove_group_from_inbox(event.group) response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert response.data["inbox"] is None
def post_process_group( is_new, is_regression, is_new_group_environment, cache_key, group_id=None, **kwargs ): """ Fires post processing hooks for a group. """ from sentry.eventstore.models import Event from sentry.eventstore.processing import event_processing_store from sentry.reprocessing2 import is_reprocessed_event from sentry.utils import snuba with snuba.options_override({"consistent": True}): # We use the data being present/missing in the processing store # to ensure that we don't duplicate work should the forwarding consumers # need to rewind history. data = event_processing_store.get(cache_key) if not data: logger.info( "post_process.skipped", extra={"cache_key": cache_key, "reason": "missing_cache"}, ) return event = Event( project_id=data["project"], event_id=data["event_id"], group_id=group_id, data=data ) set_current_event_project(event.project_id) is_transaction_event = not bool(event.group_id) from sentry.models import EventDict, Organization, Project # Re-bind node data to avoid renormalization. We only want to # renormalize when loading old data from the database. event.data = EventDict(event.data, skip_renormalization=True) # Re-bind Project and Org since we're reading the Event object # from cache which may contain stale parent models. event.project = Project.objects.get_from_cache(id=event.project_id) event.project.set_cached_field_value( "organization", Organization.objects.get_from_cache(id=event.project.organization_id) ) # Simplified post processing for transaction events. # This should eventually be completely removed and transactions # will not go through any post processing. if is_transaction_event: transaction_processed.send_robust( sender=post_process_group, project=event.project, event=event, ) event_processing_store.delete_by_key(cache_key) return is_reprocessed = is_reprocessed_event(event.data) # NOTE: we must pass through the full Event object, and not an # event_id since the Event object may not actually have been stored # in the database due to sampling. from sentry.models import Commit, GroupInboxReason from sentry.models.group import get_group_with_redirect from sentry.models.groupinbox import add_group_to_inbox from sentry.rules.processor import RuleProcessor from sentry.tasks.groupowner import process_suspect_commits from sentry.tasks.servicehooks import process_service_hook # Re-bind Group since we're reading the Event object # from cache, which may contain a stale group and project event.group, _ = get_group_with_redirect(event.group_id) event.group_id = event.group.id event.group.project = event.project event.group.project.set_cached_field_value("organization", event.project.organization) bind_organization_context(event.project.organization) _capture_stats(event, is_new) if is_reprocessed and is_new: add_group_to_inbox(event.group, GroupInboxReason.REPROCESSED) if not is_reprocessed: # we process snoozes before rules as it might create a regression # but not if it's new because you can't immediately snooze a new group has_reappeared = False if is_new else process_snoozes(event.group) if not has_reappeared: # If true, we added the .UNIGNORED reason already if is_new: add_group_to_inbox(event.group, GroupInboxReason.NEW) elif is_regression: add_group_to_inbox(event.group, GroupInboxReason.REGRESSION) handle_owner_assignment(event.project, event.group, event) rp = RuleProcessor( event, is_new, is_regression, is_new_group_environment, has_reappeared ) has_alert = False # TODO(dcramer): ideally this would fanout, but serializing giant # objects back and forth isn't super efficient for callback, futures in rp.apply(): has_alert = True safe_execute(callback, event, futures, _with_transaction=False) try: lock = locks.get( f"w-o:{event.group_id}-d-l", duration=10, ) with lock.acquire(): has_commit_key = f"w-o:{event.project.organization_id}-h-c" org_has_commit = cache.get(has_commit_key) if org_has_commit is None: org_has_commit = Commit.objects.filter( organization_id=event.project.organization_id ).exists() cache.set(has_commit_key, org_has_commit, 3600) if org_has_commit: group_cache_key = f"w-o-i:g-{event.group_id}" if cache.get(group_cache_key): metrics.incr( "sentry.tasks.process_suspect_commits.debounce", tags={"detail": "w-o-i:g debounce"}, ) else: from sentry.utils.committers import get_frame_paths cache.set(group_cache_key, True, 604800) # 1 week in seconds event_frames = get_frame_paths(event.data) process_suspect_commits.delay( event_id=event.event_id, event_platform=event.platform, event_frames=event_frames, group_id=event.group_id, project_id=event.project_id, ) except UnableToAcquireLock: pass except Exception: logger.exception("Failed to process suspect commits") if features.has("projects:servicehooks", project=event.project): allowed_events = {"event.created"} if has_alert: allowed_events.add("event.alert") if allowed_events: for servicehook_id, events in _get_service_hooks(project_id=event.project_id): if any(e in allowed_events for e in events): process_service_hook.delay(servicehook_id=servicehook_id, event=event) from sentry.tasks.sentry_apps import process_resource_change_bound if event.get_event_type() == "error" and _should_send_error_created_hooks( event.project ): process_resource_change_bound.delay( action="created", sender="Error", instance_id=event.event_id, instance=event ) if is_new: process_resource_change_bound.delay( action="created", sender="Group", instance_id=event.group_id ) from sentry.plugins.base import plugins for plugin in plugins.for_project(event.project): plugin_post_process_group( plugin_slug=plugin.slug, event=event, is_new=is_new, is_regresion=is_regression ) from sentry import similarity safe_execute(similarity.record, event.project, [event], _with_transaction=False) # Patch attachments that were ingested on the standalone path. update_existing_attachments(event) if not is_reprocessed: event_processed.send_robust( sender=post_process_group, project=event.project, event=event, primary_hash=kwargs.get("primary_hash"), ) with metrics.timer("tasks.post_process.delete_event_cache"): event_processing_store.delete_by_key(cache_key)
def update_groups(request, projects, organization_id, search_fn, has_inbox=False): group_ids = request.GET.getlist("id") if group_ids: group_list = Group.objects.filter( project__organization_id=organization_id, project__in=projects, id__in=group_ids ) # filter down group ids to only valid matches group_ids = [g.id for g in group_list] if not group_ids: return Response(status=204) else: group_list = None # TODO(jess): We may want to look into refactoring GroupValidator # to support multiple projects, but this is pretty complicated # because of the assignee validation. Punting on this for now. for project in projects: serializer = GroupValidator( data=request.data, partial=True, context={"project": project, "access": getattr(request, "access", None)}, ) if not serializer.is_valid(): return Response(serializer.errors, status=400) result = dict(serializer.validated_data) # so we won't have to requery for each group project_lookup = {p.id: p for p in projects} acting_user = request.user if request.user.is_authenticated() else None if not group_ids: try: # bulk mutations are limited to 1000 items # TODO(dcramer): it'd be nice to support more than this, but its # a bit too complicated right now cursor_result, _ = search_fn({"limit": 1000, "paginator_options": {"max_limit": 1000}}) except ValidationError as exc: return Response({"detail": six.text_type(exc)}, status=400) group_list = list(cursor_result) group_ids = [g.id for g in group_list] is_bulk = len(group_ids) > 1 group_project_ids = {g.project_id for g in group_list} # filter projects down to only those that have groups in the search results projects = [p for p in projects if p.id in group_project_ids] queryset = Group.objects.filter(id__in=group_ids) discard = result.get("discard") if discard: return handle_discard(request, list(queryset), projects, acting_user) statusDetails = result.pop("statusDetails", result) status = result.get("status") release = None commit = None if status in ("resolved", "resolvedInNextRelease"): if status == "resolvedInNextRelease" or statusDetails.get("inNextRelease"): # TODO(jess): We may want to support this for multi project, but punting on it for now if len(projects) > 1: return Response( {"detail": "Cannot set resolved in next release for multiple projects."}, status=400, ) release = ( statusDetails.get("inNextRelease") or Release.objects.filter( projects=projects[0], organization_id=projects[0].organization_id ) .extra(select={"sort": "COALESCE(date_released, date_added)"}) .order_by("-sort")[0] ) activity_type = Activity.SET_RESOLVED_IN_RELEASE activity_data = { # no version yet "version": "" } status_details = { "inNextRelease": True, "actor": serialize(extract_lazy_object(request.user), request.user), } res_type = GroupResolution.Type.in_next_release res_type_str = "in_next_release" res_status = GroupResolution.Status.pending elif statusDetails.get("inRelease"): # TODO(jess): We could update validation to check if release # applies to multiple projects, but I think we agreed to punt # on this for now if len(projects) > 1: return Response( {"detail": "Cannot set resolved in release for multiple projects."}, status=400 ) release = statusDetails["inRelease"] activity_type = Activity.SET_RESOLVED_IN_RELEASE activity_data = { # no version yet "version": release.version } status_details = { "inRelease": release.version, "actor": serialize(extract_lazy_object(request.user), request.user), } res_type = GroupResolution.Type.in_release res_type_str = "in_release" res_status = GroupResolution.Status.resolved elif statusDetails.get("inCommit"): # TODO(jess): Same here, this is probably something we could do, but # punting for now. if len(projects) > 1: return Response( {"detail": "Cannot set resolved in commit for multiple projects."}, status=400 ) commit = statusDetails["inCommit"] activity_type = Activity.SET_RESOLVED_IN_COMMIT activity_data = {"commit": commit.id} status_details = { "inCommit": serialize(commit, request.user), "actor": serialize(extract_lazy_object(request.user), request.user), } res_type_str = "in_commit" else: res_type_str = "now" activity_type = Activity.SET_RESOLVED activity_data = {} status_details = {} now = timezone.now() metrics.incr("group.resolved", instance=res_type_str, skip_internal=True) # if we've specified a commit, let's see if its already been released # this will allow us to associate the resolution to a release as if we # were simply using 'inRelease' above # Note: this is different than the way commit resolution works on deploy # creation, as a given deploy is connected to an explicit release, and # in this case we're simply choosing the most recent release which contains # the commit. if commit and not release: # TODO(jess): If we support multiple projects for release / commit resolution, # we need to update this to find the release for each project (we shouldn't assume # it's the same) try: release = ( Release.objects.filter(projects__in=projects, releasecommit__commit=commit) .extra(select={"sort": "COALESCE(date_released, date_added)"}) .order_by("-sort")[0] ) res_type = GroupResolution.Type.in_release res_status = GroupResolution.Status.resolved except IndexError: release = None for group in group_list: with transaction.atomic(): resolution = None if release: resolution_params = { "release": release, "type": res_type, "status": res_status, "actor_id": request.user.id if request.user.is_authenticated() else None, } resolution, created = GroupResolution.objects.get_or_create( group=group, defaults=resolution_params ) if not created: resolution.update(datetime=timezone.now(), **resolution_params) if commit: GroupLink.objects.create( group_id=group.id, project_id=group.project_id, linked_type=GroupLink.LinkedType.commit, relationship=GroupLink.Relationship.resolves, linked_id=commit.id, ) affected = Group.objects.filter(id=group.id).update( status=GroupStatus.RESOLVED, resolved_at=now ) if not resolution: created = affected group.status = GroupStatus.RESOLVED group.resolved_at = now remove_group_from_inbox( group, action=GroupInboxRemoveAction.RESOLVED, user=acting_user ) if has_inbox: result["inbox"] = None assigned_to = self_subscribe_and_assign_issue(acting_user, group) if assigned_to is not None: result["assignedTo"] = assigned_to if created: activity = Activity.objects.create( project=project_lookup[group.project_id], group=group, type=activity_type, user=acting_user, ident=resolution.id if resolution else None, data=activity_data, ) # TODO(dcramer): we need a solution for activity rollups # before sending notifications on bulk changes if not is_bulk: activity.send_notification() issue_resolved.send_robust( organization_id=organization_id, user=acting_user or request.user, group=group, project=project_lookup[group.project_id], resolution_type=res_type_str, sender=update_groups, ) kick_off_status_syncs.apply_async( kwargs={"project_id": group.project_id, "group_id": group.id} ) result.update({"status": "resolved", "statusDetails": status_details}) elif status: new_status = STATUS_UPDATE_CHOICES[result["status"]] with transaction.atomic(): happened = queryset.exclude(status=new_status).update(status=new_status) GroupResolution.objects.filter(group__in=group_ids).delete() if new_status == GroupStatus.IGNORED: metrics.incr("group.ignored", skip_internal=True) for group in group_ids: remove_group_from_inbox( group, action=GroupInboxRemoveAction.IGNORED, user=acting_user ) if has_inbox: result["inbox"] = None ignore_duration = ( statusDetails.pop("ignoreDuration", None) or statusDetails.pop("snoozeDuration", None) ) or None ignore_count = statusDetails.pop("ignoreCount", None) or None ignore_window = statusDetails.pop("ignoreWindow", None) or None ignore_user_count = statusDetails.pop("ignoreUserCount", None) or None ignore_user_window = statusDetails.pop("ignoreUserWindow", None) or None if ignore_duration or ignore_count or ignore_user_count: if ignore_duration: ignore_until = timezone.now() + timedelta(minutes=ignore_duration) else: ignore_until = None for group in group_list: state = {} if ignore_count and not ignore_window: state["times_seen"] = group.times_seen if ignore_user_count and not ignore_user_window: state["users_seen"] = group.count_users_seen() GroupSnooze.objects.create_or_update( group=group, values={ "until": ignore_until, "count": ignore_count, "window": ignore_window, "user_count": ignore_user_count, "user_window": ignore_user_window, "state": state, "actor_id": request.user.id if request.user.is_authenticated() else None, }, ) result["statusDetails"] = { "ignoreCount": ignore_count, "ignoreUntil": ignore_until, "ignoreUserCount": ignore_user_count, "ignoreUserWindow": ignore_user_window, "ignoreWindow": ignore_window, "actor": serialize(extract_lazy_object(request.user), request.user), } else: GroupSnooze.objects.filter(group__in=group_ids).delete() ignore_until = None result["statusDetails"] = {} else: result["statusDetails"] = {} if group_list and happened: if new_status == GroupStatus.UNRESOLVED: activity_type = Activity.SET_UNRESOLVED activity_data = {} for group in group_list: if group.status == GroupStatus.IGNORED: issue_unignored.send_robust( project=project, user=acting_user, group=group, transition_type="manual", sender=update_groups, ) else: issue_unresolved.send_robust( project=project, user=acting_user, group=group, transition_type="manual", sender=update_groups, ) elif new_status == GroupStatus.IGNORED: activity_type = Activity.SET_IGNORED activity_data = { "ignoreCount": ignore_count, "ignoreDuration": ignore_duration, "ignoreUntil": ignore_until, "ignoreUserCount": ignore_user_count, "ignoreUserWindow": ignore_user_window, "ignoreWindow": ignore_window, } groups_by_project_id = defaultdict(list) for group in group_list: groups_by_project_id[group.project_id].append(group) for project in projects: project_groups = groups_by_project_id.get(project.id) if project_groups: issue_ignored.send_robust( project=project, user=acting_user, group_list=project_groups, activity_data=activity_data, sender=update_groups, ) for group in group_list: group.status = new_status activity = Activity.objects.create( project=project_lookup[group.project_id], group=group, type=activity_type, user=acting_user, data=activity_data, ) # TODO(dcramer): we need a solution for activity rollups # before sending notifications on bulk changes if not is_bulk: if acting_user: GroupSubscription.objects.subscribe( user=acting_user, group=group, reason=GroupSubscriptionReason.status_change, ) activity.send_notification() if new_status == GroupStatus.UNRESOLVED: kick_off_status_syncs.apply_async( kwargs={"project_id": group.project_id, "group_id": group.id} ) if "assignedTo" in result: assigned_actor = result["assignedTo"] if assigned_actor: for group in group_list: resolved_actor = assigned_actor.resolve() GroupAssignee.objects.assign(group, resolved_actor, acting_user) result["assignedTo"] = serialize( assigned_actor.resolve(), acting_user, ActorSerializer() ) else: for group in group_list: GroupAssignee.objects.deassign(group, acting_user) is_member_map = { project.id: project.member_set.filter(user=acting_user).exists() for project in projects } if result.get("hasSeen"): for group in group_list: if is_member_map.get(group.project_id): instance, created = create_or_update( GroupSeen, group=group, user=acting_user, project=project_lookup[group.project_id], values={"last_seen": timezone.now()}, ) elif result.get("hasSeen") is False: GroupSeen.objects.filter(group__in=group_ids, user=acting_user).delete() if result.get("isBookmarked"): for group in group_list: GroupBookmark.objects.get_or_create( project=project_lookup[group.project_id], group=group, user=acting_user ) GroupSubscription.objects.subscribe( user=acting_user, group=group, reason=GroupSubscriptionReason.bookmark ) elif result.get("isBookmarked") is False: GroupBookmark.objects.filter(group__in=group_ids, user=acting_user).delete() # TODO(dcramer): we could make these more efficient by first # querying for rich rows are present (if N > 2), flipping the flag # on those rows, and then creating the missing rows if result.get("isSubscribed") in (True, False): is_subscribed = result["isSubscribed"] for group in group_list: # NOTE: Subscribing without an initiating event (assignment, # commenting, etc.) clears out the previous subscription reason # to avoid showing confusing messaging as a result of this # action. It'd be jarring to go directly from "you are not # subscribed" to "you were subscribed due since you were # assigned" just by clicking the "subscribe" button (and you # may no longer be assigned to the issue anyway.) GroupSubscription.objects.create_or_update( user=acting_user, group=group, project=project_lookup[group.project_id], values={"is_active": is_subscribed, "reason": GroupSubscriptionReason.unknown}, ) result["subscriptionDetails"] = { "reason": SUBSCRIPTION_REASON_MAP.get(GroupSubscriptionReason.unknown, "unknown") } if "isPublic" in result: # We always want to delete an existing share, because triggering # an isPublic=True even when it's already public, should trigger # regenerating. for group in group_list: if GroupShare.objects.filter(group=group).delete(): result["shareId"] = None Activity.objects.create( project=project_lookup[group.project_id], group=group, type=Activity.SET_PRIVATE, user=acting_user, ) if result.get("isPublic"): for group in group_list: share, created = GroupShare.objects.get_or_create( project=project_lookup[group.project_id], group=group, user=acting_user ) if created: result["shareId"] = share.uuid Activity.objects.create( project=project_lookup[group.project_id], group=group, type=Activity.SET_PUBLIC, user=acting_user, ) # XXX(dcramer): this feels a bit shady like it should be its own # endpoint if result.get("merge") and len(group_list) > 1: # don't allow merging cross project if len(projects) > 1: return Response({"detail": "Merging across multiple projects is not supported"}) group_list_by_times_seen = sorted( group_list, key=lambda g: (g.times_seen, g.id), reverse=True ) primary_group, groups_to_merge = group_list_by_times_seen[0], group_list_by_times_seen[1:] group_ids_to_merge = [g.id for g in groups_to_merge] eventstream_state = eventstream.start_merge( primary_group.project_id, group_ids_to_merge, primary_group.id ) Group.objects.filter(id__in=group_ids_to_merge).update(status=GroupStatus.PENDING_MERGE) transaction_id = uuid4().hex merge_groups.delay( from_object_ids=group_ids_to_merge, to_object_id=primary_group.id, transaction_id=transaction_id, eventstream_state=eventstream_state, ) Activity.objects.create( project=project_lookup[primary_group.project_id], group=primary_group, type=Activity.MERGE, user=acting_user, data={"issues": [{"id": c.id} for c in groups_to_merge]}, ) result["merge"] = { "parent": six.text_type(primary_group.id), "children": [six.text_type(g.id) for g in groups_to_merge], } # Support moving groups in or out of the inbox inbox = result.get("inbox", None) if inbox is not None: if inbox: for group in group_list: add_group_to_inbox(group, GroupInboxReason.MANUAL) elif not inbox: for group in group_list: remove_group_from_inbox( group, action=GroupInboxRemoveAction.MARK_REVIEWED, user=acting_user ) issue_mark_reviewed.send_robust( project=project, user=acting_user, group=group, sender=update_groups, ) result["inbox"] = inbox return Response(result)
def post_process_group(is_new, is_regression, is_new_group_environment, cache_key, group_id=None, **kwargs): """ Fires post processing hooks for a group. """ from sentry.eventstore.models import Event from sentry.eventstore.processing import event_processing_store from sentry.utils import snuba from sentry.reprocessing2 import is_reprocessed_event with snuba.options_override({"consistent": True}): # We use the data being present/missing in the processing store # to ensure that we don't duplicate work should the forwarding consumers # need to rewind history. data = event_processing_store.get(cache_key) if not data: logger.info( "post_process.skipped", extra={ "cache_key": cache_key, "reason": "missing_cache" }, ) return event = Event(project_id=data["project"], event_id=data["event_id"], group_id=group_id, data=data) if is_reprocessed_event(event.data): logger.info( "post_process.skipped", extra={ "project_id": event.project_id, "event_id": event.event_id, "reason": "reprocessed", }, ) return set_current_project(event.project_id) # NOTE: we must pass through the full Event object, and not an # event_id since the Event object may not actually have been stored # in the database due to sampling. from sentry.models import ( Project, Organization, EventDict, GroupInboxReason, ) from sentry.models.groupinbox import add_group_to_inbox from sentry.models.group import get_group_with_redirect from sentry.rules.processor import RuleProcessor from sentry.tasks.servicehooks import process_service_hook # Re-bind node data to avoid renormalization. We only want to # renormalize when loading old data from the database. event.data = EventDict(event.data, skip_renormalization=True) # Re-bind Project and Org since we're reading the Event object # from cache which may contain stale parent models. event.project = Project.objects.get_from_cache(id=event.project_id) event.project._organization_cache = Organization.objects.get_from_cache( id=event.project.organization_id) if event.group_id: # Re-bind Group since we're reading the Event object # from cache, which may contain a stale group and project event.group, _ = get_group_with_redirect(event.group_id) event.group_id = event.group.id event.group.project = event.project event.group.project._organization_cache = event.project._organization_cache bind_organization_context(event.project.organization) _capture_stats(event, is_new) if event.group_id: # we process snoozes before rules as it might create a regression # but not if it's new because you can't immediately snooze a new group has_reappeared = False if is_new else process_snoozes(event.group) if not has_reappeared: # If true, we added the .UNIGNORED reason already if is_new: add_group_to_inbox(event.group, GroupInboxReason.NEW) elif is_regression: add_group_to_inbox(event.group, GroupInboxReason.REGRESSION) handle_owner_assignment(event.project, event.group, event) rp = RuleProcessor(event, is_new, is_regression, is_new_group_environment, has_reappeared) has_alert = False # TODO(dcramer): ideally this would fanout, but serializing giant # objects back and forth isn't super efficient for callback, futures in rp.apply(): has_alert = True with sentry_sdk.start_transaction(op="post_process_group", name="rule_processor_apply", sampled=True): safe_execute(callback, event, futures) if features.has("projects:servicehooks", project=event.project): allowed_events = set(["event.created"]) if has_alert: allowed_events.add("event.alert") if allowed_events: for servicehook_id, events in _get_service_hooks( project_id=event.project_id): if any(e in allowed_events for e in events): process_service_hook.delay( servicehook_id=servicehook_id, event=event) from sentry.tasks.sentry_apps import process_resource_change_bound if event.get_event_type( ) == "error" and _should_send_error_created_hooks(event.project): process_resource_change_bound.delay(action="created", sender="Error", instance_id=event.event_id, instance=event) if is_new: process_resource_change_bound.delay(action="created", sender="Group", instance_id=event.group_id) # Patch attachments that were ingested on the standalone path. update_existing_attachments(event) from sentry.plugins.base import plugins for plugin in plugins.for_project(event.project): plugin_post_process_group(plugin_slug=plugin.slug, event=event, is_new=is_new, is_regresion=is_regression) event_processed.send_robust( sender=post_process_group, project=event.project, event=event, primary_hash=kwargs.get("primary_hash"), ) with metrics.timer("tasks.post_process.delete_event_cache"): event_processing_store.delete_by_key(cache_key)
def update_groups( request: Request, group_ids: Sequence[Group], projects: Sequence[Project], organization_id: int, search_fn: SearchFunction | None, user: User | None = None, data: Mapping[str, Any] | None = None, ) -> Response: # If `user` and `data` are passed as parameters then they should override # the values in `request`. user = user or request.user data = data or request.data if group_ids: group_list = Group.objects.filter( project__organization_id=organization_id, project__in=projects, id__in=group_ids) # filter down group ids to only valid matches group_ids = [g.id for g in group_list] if not group_ids: return Response(status=204) else: group_list = None serializer = None # TODO(jess): We may want to look into refactoring GroupValidator # to support multiple projects, but this is pretty complicated # because of the assignee validation. Punting on this for now. for project in projects: serializer = GroupValidator( data=data, partial=True, context={ "project": project, "organization": project.organization, "access": getattr(request, "access", None), }, ) if not serializer.is_valid(): return Response(serializer.errors, status=400) if serializer is None: return result = dict(serializer.validated_data) # so we won't have to requery for each group project_lookup = {p.id: p for p in projects} acting_user = user if user.is_authenticated else None if search_fn and not group_ids: try: cursor_result, _ = search_fn({ "limit": BULK_MUTATION_LIMIT, "paginator_options": { "max_limit": BULK_MUTATION_LIMIT }, }) except ValidationError as exc: return Response({"detail": str(exc)}, status=400) group_list = list(cursor_result) group_ids = [g.id for g in group_list] is_bulk = len(group_ids) > 1 group_project_ids = {g.project_id for g in group_list} # filter projects down to only those that have groups in the search results projects = [p for p in projects if p.id in group_project_ids] queryset = Group.objects.filter(id__in=group_ids) discard = result.get("discard") if discard: return handle_discard(request, list(queryset), projects, acting_user) statusDetails = result.pop("statusDetails", result) status = result.get("status") release = None commit = None res_type = None activity_type = None activity_data: MutableMapping[str, Any | None] | None = None if status in ("resolved", "resolvedInNextRelease"): res_status = None if status == "resolvedInNextRelease" or statusDetails.get( "inNextRelease"): # TODO(jess): We may want to support this for multi project, but punting on it for now if len(projects) > 1: return Response( { "detail": "Cannot set resolved in next release for multiple projects." }, status=400, ) release = ( statusDetails.get("inNextRelease") or Release.objects.filter( projects=projects[0], organization_id=projects[0].organization_id).extra( select={ "sort": "COALESCE(date_released, date_added)" }).order_by("-sort")[0]) activity_type = Activity.SET_RESOLVED_IN_RELEASE activity_data = { # no version yet "version": "" } status_details = { "inNextRelease": True, "actor": serialize(extract_lazy_object(user), user), } res_type = GroupResolution.Type.in_next_release res_type_str = "in_next_release" res_status = GroupResolution.Status.pending elif statusDetails.get("inRelease"): # TODO(jess): We could update validation to check if release # applies to multiple projects, but I think we agreed to punt # on this for now if len(projects) > 1: return Response( { "detail": "Cannot set resolved in release for multiple projects." }, status=400) release = statusDetails["inRelease"] activity_type = Activity.SET_RESOLVED_IN_RELEASE activity_data = { # no version yet "version": release.version } status_details = { "inRelease": release.version, "actor": serialize(extract_lazy_object(user), user), } res_type = GroupResolution.Type.in_release res_type_str = "in_release" res_status = GroupResolution.Status.resolved elif statusDetails.get("inCommit"): # TODO(jess): Same here, this is probably something we could do, but # punting for now. if len(projects) > 1: return Response( { "detail": "Cannot set resolved in commit for multiple projects." }, status=400) commit = statusDetails["inCommit"] activity_type = Activity.SET_RESOLVED_IN_COMMIT activity_data = {"commit": commit.id} status_details = { "inCommit": serialize(commit, user), "actor": serialize(extract_lazy_object(user), user), } res_type_str = "in_commit" else: res_type_str = "now" activity_type = Activity.SET_RESOLVED activity_data = {} status_details = {} now = timezone.now() metrics.incr("group.resolved", instance=res_type_str, skip_internal=True) # if we've specified a commit, let's see if its already been released # this will allow us to associate the resolution to a release as if we # were simply using 'inRelease' above # Note: this is different than the way commit resolution works on deploy # creation, as a given deploy is connected to an explicit release, and # in this case we're simply choosing the most recent release which contains # the commit. if commit and not release: # TODO(jess): If we support multiple projects for release / commit resolution, # we need to update this to find the release for each project (we shouldn't assume # it's the same) try: release = (Release.objects.filter( projects__in=projects, releasecommit__commit=commit).extra( select={ "sort": "COALESCE(date_released, date_added)" }).order_by("-sort")[0]) res_type = GroupResolution.Type.in_release res_status = GroupResolution.Status.resolved except IndexError: release = None for group in group_list: with transaction.atomic(): resolution = None created = None if release: resolution_params = { "release": release, "type": res_type, "status": res_status, "actor_id": user.id if user.is_authenticated else None, } # We only set `current_release_version` if GroupResolution type is # in_next_release, because we need to store information about the latest/most # recent release that was associated with a group and that is required for # release comparisons (i.e. handling regressions) if res_type == GroupResolution.Type.in_next_release: # Check if semver versioning scheme is followed follows_semver = follows_semver_versioning_scheme( org_id=group.organization.id, project_id=group.project.id, release_version=release.version, ) current_release_version = get_current_release_version_of_group( group=group, follows_semver=follows_semver) if current_release_version: resolution_params.update({ "current_release_version": current_release_version }) # Sets `current_release_version` for activity, since there is no point # waiting for when a new release is created i.e. # clear_expired_resolutions task to be run. # Activity should look like "... resolved in version # >current_release_version" in the UI if follows_semver: activity_data.update({ "current_release_version": current_release_version }) # In semver projects, and thereby semver releases, we determine # resolutions by comparing against an expression rather than a # specific release (i.e. >current_release_version). Consequently, # at this point we can consider this GroupResolution as resolved # in release resolution_params.update({ "type": GroupResolution.Type.in_release, "status": GroupResolution.Status.resolved, }) else: # If we already know the `next` release in date based ordering # when clicking on `resolvedInNextRelease` because it is already # been released, there is no point in setting GroupResolution to # be of type in_next_release but rather in_release would suffice try: # Get current release object from current_release_version current_release_obj = Release.objects.get( version=current_release_version, organization_id=projects[0]. organization_id, ) date_order_q = Q( date_added__gt=current_release_obj. date_added) | Q( date_added=current_release_obj. date_added, id__gt=current_release_obj.id, ) # Find the next release after the current_release_version # i.e. the release that resolves the issue resolved_in_release = ( Release.objects.filter( date_order_q, projects=projects[0], organization_id=projects[0]. organization_id, ).extra( select={ "sort": "COALESCE(date_released, date_added)" }).order_by("sort", "id")[:1].get()) # If we get here, we assume it exists and so we update # GroupResolution and Activity resolution_params.update({ "release": resolved_in_release, "type": GroupResolution.Type.in_release, "status": GroupResolution.Status.resolved, }) activity_data.update({ "version": resolved_in_release.version }) except Release.DoesNotExist: # If it gets here, it means we don't know the upcoming # release yet because it does not exist, and so we should # fall back to our current model ... resolution, created = GroupResolution.objects.get_or_create( group=group, defaults=resolution_params) if not created: resolution.update(datetime=timezone.now(), **resolution_params) if commit: GroupLink.objects.create( group_id=group.id, project_id=group.project_id, linked_type=GroupLink.LinkedType.commit, relationship=GroupLink.Relationship.resolves, linked_id=commit.id, ) affected = Group.objects.filter(id=group.id).update( status=GroupStatus.RESOLVED, resolved_at=now) if not resolution: created = affected group.status = GroupStatus.RESOLVED group.resolved_at = now remove_group_from_inbox(group, action=GroupInboxRemoveAction.RESOLVED, user=acting_user) result["inbox"] = None assigned_to = self_subscribe_and_assign_issue( acting_user, group) if assigned_to is not None: result["assignedTo"] = assigned_to if created: activity = Activity.objects.create( project=project_lookup[group.project_id], group=group, type=activity_type, user=acting_user, ident=resolution.id if resolution else None, data=activity_data, ) record_group_history_from_activity_type(group, activity_type, actor=acting_user) # TODO(dcramer): we need a solution for activity rollups # before sending notifications on bulk changes if not is_bulk: activity.send_notification() issue_resolved.send_robust( organization_id=organization_id, user=acting_user or user, group=group, project=project_lookup[group.project_id], resolution_type=res_type_str, sender=update_groups, ) kick_off_status_syncs.apply_async(kwargs={ "project_id": group.project_id, "group_id": group.id }) result.update({"status": "resolved", "statusDetails": status_details}) elif status: new_status = STATUS_UPDATE_CHOICES[result["status"]] ignore_duration = None ignore_count = None ignore_window = None ignore_user_count = None ignore_user_window = None ignore_until = None with transaction.atomic(): happened = queryset.exclude(status=new_status).update( status=new_status) GroupResolution.objects.filter(group__in=group_ids).delete() if new_status == GroupStatus.IGNORED: metrics.incr("group.ignored", skip_internal=True) for group in group_ids: remove_group_from_inbox( group, action=GroupInboxRemoveAction.IGNORED, user=acting_user) result["inbox"] = None ignore_duration = (statusDetails.pop("ignoreDuration", None) or statusDetails.pop( "snoozeDuration", None)) or None ignore_count = statusDetails.pop("ignoreCount", None) or None ignore_window = statusDetails.pop("ignoreWindow", None) or None ignore_user_count = statusDetails.pop("ignoreUserCount", None) or None ignore_user_window = statusDetails.pop("ignoreUserWindow", None) or None if ignore_duration or ignore_count or ignore_user_count: if ignore_duration: ignore_until = timezone.now() + timedelta( minutes=ignore_duration) else: ignore_until = None for group in group_list: state = {} if ignore_count and not ignore_window: state["times_seen"] = group.times_seen if ignore_user_count and not ignore_user_window: state["users_seen"] = group.count_users_seen() GroupSnooze.objects.create_or_update( group=group, values={ "until": ignore_until, "count": ignore_count, "window": ignore_window, "user_count": ignore_user_count, "user_window": ignore_user_window, "state": state, "actor_id": user.id if user.is_authenticated else None, }, ) result["statusDetails"] = { "ignoreCount": ignore_count, "ignoreUntil": ignore_until, "ignoreUserCount": ignore_user_count, "ignoreUserWindow": ignore_user_window, "ignoreWindow": ignore_window, "actor": serialize(extract_lazy_object(user), user), } else: GroupSnooze.objects.filter(group__in=group_ids).delete() ignore_until = None result["statusDetails"] = {} else: result["statusDetails"] = {} if group_list and happened: if new_status == GroupStatus.UNRESOLVED: activity_type = Activity.SET_UNRESOLVED activity_data = {} for group in group_list: if group.status == GroupStatus.IGNORED: issue_unignored.send_robust( project=project_lookup[group.project_id], user=acting_user, group=group, transition_type="manual", sender=update_groups, ) else: issue_unresolved.send_robust( project=project_lookup[group.project_id], user=acting_user, group=group, transition_type="manual", sender=update_groups, ) elif new_status == GroupStatus.IGNORED: activity_type = Activity.SET_IGNORED activity_data = { "ignoreCount": ignore_count, "ignoreDuration": ignore_duration, "ignoreUntil": ignore_until, "ignoreUserCount": ignore_user_count, "ignoreUserWindow": ignore_user_window, "ignoreWindow": ignore_window, } groups_by_project_id = defaultdict(list) for group in group_list: groups_by_project_id[group.project_id].append(group) for project in projects: project_groups = groups_by_project_id.get(project.id) if project_groups: issue_ignored.send_robust( project=project, user=acting_user, group_list=project_groups, activity_data=activity_data, sender=update_groups, ) for group in group_list: group.status = new_status activity = Activity.objects.create( project=project_lookup[group.project_id], group=group, type=activity_type, user=acting_user, data=activity_data, ) record_group_history_from_activity_type(group, activity_type, actor=acting_user) # TODO(dcramer): we need a solution for activity rollups # before sending notifications on bulk changes if not is_bulk: if acting_user: GroupSubscription.objects.subscribe( user=acting_user, group=group, reason=GroupSubscriptionReason.status_change, ) activity.send_notification() if new_status == GroupStatus.UNRESOLVED: kick_off_status_syncs.apply_async(kwargs={ "project_id": group.project_id, "group_id": group.id }) # XXX (ahmed): hack to get the activities to work properly on issues page. Not sure of # what performance impact this might have & this possibly should be moved else where try: if len(group_list) == 1: if res_type in ( GroupResolution.Type.in_next_release, GroupResolution.Type.in_release, ): result["activity"] = serialize( Activity.objects.get_activities_for_group( group=group_list[0], num=ACTIVITIES_COUNT), acting_user, ) except UnboundLocalError: pass if "assignedTo" in result: assigned_actor = result["assignedTo"] assigned_by = (data.get("assignedBy") if data.get("assignedBy") in [ "assignee_selector", "suggested_assignee" ] else None) if assigned_actor: for group in group_list: resolved_actor = assigned_actor.resolve() assignment = GroupAssignee.objects.assign( group, resolved_actor, acting_user) analytics.record( "manual.issue_assignment", organization_id=project_lookup[ group.project_id].organization_id, project_id=group.project_id, group_id=group.id, assigned_by=assigned_by, had_to_deassign=assignment["updated_assignment"], ) result["assignedTo"] = serialize(assigned_actor.resolve(), acting_user, ActorSerializer()) else: for group in group_list: GroupAssignee.objects.deassign(group, acting_user) analytics.record( "manual.issue_assignment", organization_id=project_lookup[ group.project_id].organization_id, project_id=group.project_id, group_id=group.id, assigned_by=assigned_by, had_to_deassign=True, ) is_member_map = { project.id: project.member_set.filter(user=acting_user).exists() for project in projects } if result.get("hasSeen"): for group in group_list: if is_member_map.get(group.project_id): instance, created = create_or_update( GroupSeen, group=group, user=acting_user, project=project_lookup[group.project_id], values={"last_seen": timezone.now()}, ) elif result.get("hasSeen") is False: GroupSeen.objects.filter(group__in=group_ids, user=acting_user).delete() if result.get("isBookmarked"): for group in group_list: GroupBookmark.objects.get_or_create( project=project_lookup[group.project_id], group=group, user=acting_user) GroupSubscription.objects.subscribe( user=acting_user, group=group, reason=GroupSubscriptionReason.bookmark) elif result.get("isBookmarked") is False: GroupBookmark.objects.filter(group__in=group_ids, user=acting_user).delete() # TODO(dcramer): we could make these more efficient by first # querying for rich rows are present (if N > 2), flipping the flag # on those rows, and then creating the missing rows if result.get("isSubscribed") in (True, False): is_subscribed = result["isSubscribed"] for group in group_list: # NOTE: Subscribing without an initiating event (assignment, # commenting, etc.) clears out the previous subscription reason # to avoid showing confusing messaging as a result of this # action. It'd be jarring to go directly from "you are not # subscribed" to "you were subscribed due since you were # assigned" just by clicking the "subscribe" button (and you # may no longer be assigned to the issue anyway.) GroupSubscription.objects.create_or_update( user=acting_user, group=group, project=project_lookup[group.project_id], values={ "is_active": is_subscribed, "reason": GroupSubscriptionReason.unknown }, ) result["subscriptionDetails"] = { "reason": SUBSCRIPTION_REASON_MAP.get(GroupSubscriptionReason.unknown, "unknown") } if "isPublic" in result: # We always want to delete an existing share, because triggering # an isPublic=True even when it's already public, should trigger # regenerating. for group in group_list: if GroupShare.objects.filter(group=group).delete(): result["shareId"] = None Activity.objects.create( project=project_lookup[group.project_id], group=group, type=Activity.SET_PRIVATE, user=acting_user, ) if result.get("isPublic"): for group in group_list: share, created = GroupShare.objects.get_or_create( project=project_lookup[group.project_id], group=group, user=acting_user) if created: result["shareId"] = share.uuid Activity.objects.create( project=project_lookup[group.project_id], group=group, type=Activity.SET_PUBLIC, user=acting_user, ) # XXX(dcramer): this feels a bit shady like it should be its own endpoint. if result.get("merge") and len(group_list) > 1: # don't allow merging cross project if len(projects) > 1: return Response({ "detail": "Merging across multiple projects is not supported" }) group_list_by_times_seen = sorted(group_list, key=lambda g: (g.times_seen, g.id), reverse=True) primary_group, groups_to_merge = group_list_by_times_seen[ 0], group_list_by_times_seen[1:] group_ids_to_merge = [g.id for g in groups_to_merge] eventstream_state = eventstream.start_merge(primary_group.project_id, group_ids_to_merge, primary_group.id) Group.objects.filter(id__in=group_ids_to_merge).update( status=GroupStatus.PENDING_MERGE) transaction_id = uuid4().hex merge_groups.delay( from_object_ids=group_ids_to_merge, to_object_id=primary_group.id, transaction_id=transaction_id, eventstream_state=eventstream_state, ) Activity.objects.create( project=project_lookup[primary_group.project_id], group=primary_group, type=Activity.MERGE, user=acting_user, data={"issues": [{ "id": c.id } for c in groups_to_merge]}, ) result["merge"] = { "parent": str(primary_group.id), "children": [str(g.id) for g in groups_to_merge], } # Support moving groups in or out of the inbox inbox = result.get("inbox", None) if inbox is not None: if inbox: for group in group_list: add_group_to_inbox(group, GroupInboxReason.MANUAL) elif not inbox: for group in group_list: remove_group_from_inbox( group, action=GroupInboxRemoveAction.MARK_REVIEWED, user=acting_user, referrer=request.META.get("HTTP_REFERER"), ) issue_mark_reviewed.send_robust( project=project_lookup[group.project_id], user=acting_user, group=group, sender=update_groups, ) result["inbox"] = inbox return Response(result)