def delete(self, request, group): """ Remove an Issue ``````````````` Removes an individual issue. :pparam string issue_id: the ID of the issue to delete. :auth: required """ from sentry.tasks.deletion import delete_group updated = Group.objects.filter( id=group.id, ).exclude(status__in=[ GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS, ]).update(status=GroupStatus.PENDING_DELETION) if updated: project = group.project GroupHashTombstone.tombstone_groups( project_id=project.id, group_ids=[group.id], ) transaction_id = uuid4().hex delete_group.apply_async( kwargs={ 'object_id': group.id, 'transaction_id': transaction_id, }, countdown=3600, ) self.create_audit_entry( request=request, organization_id=project.organization_id if project else None, target_object=group.id, transaction_id=transaction_id, ) delete_logger.info( 'object.delete.queued', extra={ 'object_id': group.id, 'transaction_id': transaction_id, 'model': type(group).__name__, } ) issue_deleted.send_robust( group=group, user=request.user, delete_type='delete', sender=self.__class__) return Response(status=202)
def test_project_issues_with_tombstones(self): # Nothing to be done if we're using `group_id`. # When this option is the default we can remove # this test. if options.get('snuba.use_group_id_column'): return base_time = datetime.utcnow() hash = 'a' * 32 def _query_for_issue(group_id): return snuba.query( start=base_time - timedelta(days=1), end=base_time + timedelta(days=1), groupby=['issue'], filter_keys={ 'project_id': [self.project.id], 'issue': [group_id] }, ) group1 = self.create_group() group2 = self.create_group() GroupHash.objects.create(project=self.project, group=group1, hash=hash) assert snuba.get_project_issues([self.project], [group1.id]) == \ [(group1.id, group1.project_id, [('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', None)])] # 1 event in the groups, no deletes have happened self._insert_event_for_time(base_time, hash) assert _query_for_issue(group1.id) == {group1.id: 1} # group is deleted and then returns (as a new group with the same hash) GroupHashTombstone.tombstone_groups(self.project.id, [group1.id]) ght = GroupHashTombstone.objects.get(project_id=self.project.id) assert ght GroupHash.objects.create( project=self.project, group=group2, hash=hash, ) # tombstone time is returned as expected assert snuba.get_project_issues([self.project], [group2.id]) == \ [(group2.id, group2.project_id, [('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', ght.deleted_at.strftime("%Y-%m-%d %H:%M:%S"))])] # events <= to the tombstone date aren't returned self._insert_event_for_time(ght.deleted_at, hash) assert _query_for_issue(group2.id) == {} # only the event > than the tombstone date is returned self._insert_event_for_time(ght.deleted_at + timedelta(seconds=1), hash) assert _query_for_issue(group2.id) == {group2.id: 1}
def delete(self, request, group): """ Remove an Issue ``````````````` Removes an individual issue. :pparam string issue_id: the ID of the issue to delete. :auth: required """ from sentry.tasks.deletion import delete_group updated = Group.objects.filter(id=group.id, ).exclude(status__in=[ GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS, ]).update(status=GroupStatus.PENDING_DELETION) if updated: project = group.project eventstream.delete_groups(group.project_id, [group.id]) GroupHashTombstone.tombstone_groups( project_id=project.id, group_ids=[group.id], ) transaction_id = uuid4().hex delete_group.apply_async( kwargs={ 'object_id': group.id, 'transaction_id': transaction_id, }, countdown=3600, ) self.create_audit_entry( request=request, organization_id=project.organization_id if project else None, target_object=group.id, transaction_id=transaction_id, ) delete_logger.info('object.delete.queued', extra={ 'object_id': group.id, 'transaction_id': transaction_id, 'model': type(group).__name__, }) issue_deleted.send_robust(group=group, user=request.user, delete_type='delete', sender=self.__class__) return Response(status=202)
def _delete_groups(self, request, project, group_list, delete_type): if not group_list: return # deterministic sort for sanity, and for very large deletions we'll # delete the "smaller" groups first group_list.sort(key=lambda g: (g.times_seen, g.id)) group_ids = [g.id for g in group_list] Group.objects.filter( id__in=group_ids, ).exclude(status__in=[ GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS, ]).update(status=GroupStatus.PENDING_DELETION) eventstream_state = eventstream.start_delete_groups(project.id, group_ids) GroupHashTombstone.tombstone_groups( project_id=project.id, group_ids=group_ids, ) transaction_id = uuid4().hex delete_groups.apply_async( kwargs={ 'object_ids': group_ids, 'transaction_id': transaction_id, 'eventstream_state': eventstream_state, }, countdown=3600, ) for group in group_list: self.create_audit_entry( request=request, organization_id=project.organization_id, target_object=group.id, transaction_id=transaction_id, ) delete_logger.info( 'object.delete.queued', extra={ 'object_id': group.id, 'transaction_id': transaction_id, 'model': type(group).__name__, } ) issue_deleted.send_robust( group=group, user=request.user, delete_type=delete_type, sender=self.__class__)
def test_project_issues_with_tombstones(self): base_time = datetime.utcnow() hash = 'a' * 32 def _query_for_issue(group_id): return snuba.query( start=base_time - timedelta(days=1), end=base_time + timedelta(days=1), groupby=['issue'], filter_keys={ 'project_id': [self.project.id], 'issue': [group_id] }, ) group1 = self.create_group() group2 = self.create_group() GroupHash.objects.create( project=self.project, group=group1, hash=hash ) assert snuba.get_project_issues([self.project], [group1.id]) == \ [(group1.id, group1.project_id, [('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', None)])] # 1 event in the groups, no deletes have happened self._insert_event_for_time(base_time, hash) assert _query_for_issue(group1.id) == {group1.id: 1} # group is deleted and then returns (as a new group with the same hash) GroupHashTombstone.tombstone_groups(self.project.id, [group1.id]) ght = GroupHashTombstone.objects.get(project_id=self.project.id) assert ght GroupHash.objects.create( project=self.project, group=group2, hash=hash, ) # tombstone time is returned as expected assert snuba.get_project_issues([self.project], [group2.id]) == \ [(group2.id, group2.project_id, [('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', ght.deleted_at.strftime("%Y-%m-%d %H:%M:%S"))])] # events <= to the tombstone date aren't returned self._insert_event_for_time(ght.deleted_at, hash) assert _query_for_issue(group2.id) == {} # only the event > than the tombstone date is returned self._insert_event_for_time(ght.deleted_at + timedelta(seconds=1), hash) assert _query_for_issue(group2.id) == {group2.id: 1}
def _delete_groups(self, request, project, group_list, delete_type): group_ids = [g.id for g in group_list] Group.objects.filter( id__in=group_ids, ).exclude(status__in=[ GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS, ]).update(status=GroupStatus.PENDING_DELETION) eventstream.delete_groups(project.id, group_ids) GroupHashTombstone.tombstone_groups( project_id=project.id, group_ids=group_ids, ) transaction_id = uuid4().hex for group in group_list: delete_group.apply_async( kwargs={ 'object_id': group.id, 'transaction_id': transaction_id, }, countdown=3600, ) self.create_audit_entry( request=request, organization_id=project.organization_id, target_object=group.id, transaction_id=transaction_id, ) delete_logger.info( 'object.delete.queued', extra={ 'object_id': group.id, 'transaction_id': transaction_id, 'model': type(group).__name__, } ) issue_deleted.send_robust( group=group, user=request.user, delete_type=delete_type, sender=self.__class__)
def _delete_groups(self, request, project, group_list, delete_type): group_ids = [g.id for g in group_list] Group.objects.filter( id__in=group_ids, ).exclude(status__in=[ GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS, ]).update(status=GroupStatus.PENDING_DELETION) GroupHashTombstone.tombstone_groups( project_id=project.id, group_ids=group_ids, ) transaction_id = uuid4().hex for group in group_list: delete_group.apply_async( kwargs={ 'object_id': group.id, 'transaction_id': transaction_id, }, countdown=3600, ) self.create_audit_entry( request=request, organization_id=project.organization_id, target_object=group.id, transaction_id=transaction_id, ) delete_logger.info( 'object.delete.queued', extra={ 'object_id': group.id, 'transaction_id': transaction_id, 'model': type(group).__name__, } ) issue_deleted.send_robust( group=group, user=request.user, delete_type=delete_type, sender=self.__class__)
def test(self, mock_now): mock_now.return_value = datetime(2010, 1, 1, 0, 0, 0, 0, tzinfo=timezone.utc) group = self.group hash1 = 'a' * 32 GroupHash.objects.create( project=group.project, group=group, hash=hash1, ) GroupHashTombstone.tombstone_groups( project_id=group.project_id, group_ids=[group.id], ) assert GroupHashTombstone.objects.filter(hash=hash1, deleted_at=mock_now.return_value).exists() assert not GroupHash.objects.filter(group=group, hash=hash1).exists() mock_now.return_value = datetime(2011, 1, 1, 0, 0, 0, 0, tzinfo=timezone.utc) # hash1 returns GroupHash.objects.create( project=group.project, group=group, hash=hash1, ) hash2 = 'b' * 32 GroupHash.objects.create( project=group.project, group=group, hash=hash2, ) GroupHashTombstone.tombstone_groups( project_id=group.project_id, group_ids=[group.id], ) assert GroupHashTombstone.objects.filter(hash=hash1, deleted_at=mock_now.return_value).exists() assert not GroupHash.objects.filter(group=group, hash=hash1).exists() assert GroupHashTombstone.objects.filter(hash=hash2, deleted_at=mock_now.return_value).exists() assert not GroupHash.objects.filter(group=group, hash=hash2).exists()
def test_project_issues_with_tombstones(self): base_time = datetime.utcnow() a_hash = 'a' * 32 def _insert_event_for_time(ts): self.snuba_insert({ 'event_id': uuid.uuid4().hex, 'primary_hash': a_hash, 'project_id': 100, 'message': 'message', 'platform': 'python', 'datetime': ts.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), 'data': { 'received': time.mktime(ts.timetuple()), } }) def _query_for_issue(group_id): return snuba.query( start=base_time - timedelta(days=1), end=base_time + timedelta(days=1), groupby=['issue'], filter_keys={ 'project_id': [100], 'issue': [group_id] }, ) group1 = self.create_group() group2 = self.create_group() GroupHash.objects.create(project=self.project, group=group1, hash=a_hash) assert snuba.get_project_issues([self.project], [group1.id]) == \ [(group1.id, [('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', None)])] # 1 event in the groups, no deletes have happened _insert_event_for_time(base_time) assert _query_for_issue(group1.id) == {group1.id: 1} # group is deleted and then returns (as a new group with the same hash) GroupHashTombstone.tombstone_groups(self.project.id, [group1.id]) ght = GroupHashTombstone.objects.get(project_id=self.project.id) assert ght GroupHash.objects.create( project=self.project, group=group2, hash=a_hash, ) # tombstone time is returned as expected assert snuba.get_project_issues([self.project], [group2.id]) == \ [(group2.id, [('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', ght.deleted_at.strftime("%Y-%m-%d %H:%M:%S"))])] # events <= to the tombstone date aren't returned _insert_event_for_time(ght.deleted_at) assert _query_for_issue(group2.id) == {} # only the event > than the tombstone date is returned _insert_event_for_time(ght.deleted_at + timedelta(seconds=1)) assert _query_for_issue(group2.id) == {group2.id: 1}