Пример #1
0
def finish_reprocessing(project_id, group_id, new_group_id):
    from sentry.models import Group, GroupRedirect, Activity

    with transaction.atomic():
        group = Group.objects.get(id=group_id)
        new_group = Group.objects.get(id=new_group_id)

        # Any sort of success message will be shown at the *new* group ID's URL
        GroupRedirect.objects.create(
            organization_id=new_group.project.organization_id,
            group_id=new_group.id,
            previous_group_id=group_id,
        )

        # While we migrated all associated models at the beginning of
        # reprocessing, there is still the "reprocessing" activity that we need
        # to transfer manually.
        Activity.objects.filter(group_id=group_id).update(
            group_id=new_group_id)

        # All the associated models (groupassignee and eventattachments) should
        # have moved to a successor group that may be deleted independently.
        group.delete()

    # Need to delay this until we have enqueued all events.
    eventstream.exclude_groups(project_id, [group_id])

    from sentry import similarity

    similarity.delete(None, group)
Пример #2
0
def reprocess_group(
    project_id, group_id, query_state=None, start_time=None, max_events=None, acting_user_id=None
):
    from sentry.reprocessing2 import start_group_reprocessing

    if start_time is None:
        start_time = time.time()
        start_group_reprocessing(
            project_id, group_id, max_events=max_events, acting_user_id=acting_user_id
        )

    query_state, events = celery_run_batch_query(
        filter=eventstore.Filter(project_ids=[project_id], group_ids=[group_id]),
        batch_size=GROUP_REPROCESSING_CHUNK_SIZE,
        state=query_state,
        referrer="reprocessing2.reprocess_group",
    )

    if not events:
        # Need to delay this until we have queried all events.
        eventstream.exclude_groups(project_id, [group_id])
        wait_group_reprocessed.delay(project_id=project_id, group_id=group_id)
        return

    tombstoned_event_ids = []

    for event in events:
        if max_events is None or max_events > 0:
            reprocess_event.delay(
                project_id=project_id, event_id=event.event_id, start_time=start_time,
            )
            if max_events is not None:
                max_events -= 1
        else:
            tombstoned_event_ids.append(event.event_id)

    # len(tombstoned_event_ids) is upper-bounded by GROUP_REPROCESSING_CHUNK_SIZE
    if tombstoned_event_ids:
        tombstone_events.delay(
            project_id=project_id, group_id=group_id, event_ids=tombstoned_event_ids
        )

    reprocess_group.delay(
        project_id=project_id,
        group_id=group_id,
        query_state=query_state,
        start_time=start_time,
        max_events=max_events,
    )
Пример #3
0
def finish_reprocessing(project_id, group_id):
    from sentry.models import Activity, Group, GroupRedirect

    with transaction.atomic():
        group = Group.objects.get(id=group_id)

        # While we migrated all associated models at the beginning of
        # reprocessing, there is still the "reprocessing" activity that we need
        # to transfer manually.
        activity = Activity.objects.get(group_id=group_id)
        new_group_id = activity.group_id = activity.data["newGroupId"]
        activity.save()

        new_group = Group.objects.get(id=new_group_id)

        # Any sort of success message will be shown at the *new* group ID's URL
        GroupRedirect.objects.create(
            organization_id=new_group.project.organization_id,
            group_id=new_group_id,
            previous_group_id=group_id,
        )

        # All the associated models (groupassignee and eventattachments) should
        # have moved to a successor group that may be deleted independently.
        group.delete()

    # Tombstone unwanted events that should be dropped after new group
    # is generated after reprocessing
    buffered_delete_old_primary_hash(
        project_id=project_id,
        group_id=group_id,
        force_flush_batch=True,
    )

    eventstream.exclude_groups(project_id, [group_id])

    from sentry import similarity

    similarity.delete(None, group)
Пример #4
0
def reprocess_group(
    project_id,
    group_id,
    remaining_events="delete",
    new_group_id=None,
    query_state=None,
    start_time=None,
    max_events=None,
    acting_user_id=None,
):
    sentry_sdk.set_tag("project", project_id)
    from sentry.reprocessing2 import (
        CannotReprocess,
        logger,
        mark_event_reprocessed,
        reprocess_event,
        start_group_reprocessing,
    )

    if start_time is None:
        assert new_group_id is None
        start_time = time.time()
        new_group_id = start_group_reprocessing(
            project_id,
            group_id,
            max_events=max_events,
            acting_user_id=acting_user_id,
            remaining_events=remaining_events,
        )

    assert new_group_id is not None

    query_state, events = celery_run_batch_query(
        filter=eventstore.Filter(project_ids=[project_id],
                                 group_ids=[group_id]),
        batch_size=GROUP_REPROCESSING_CHUNK_SIZE,
        state=query_state,
        referrer="reprocessing2.reprocess_group",
    )

    if not events:
        # Need to delay this until we have enqueued all events and stopped
        # iterating over the batch query, if we take care of this in
        # finish_reprocessing it won't work, as for small max_events
        # finish_reprocessing may execute sooner than the last reprocess_group
        # iteration.
        eventstream.exclude_groups(project_id, [group_id])
        return

    remaining_event_ids = []
    remaining_events_min_datetime = None
    remaining_events_max_datetime = None

    for event in events:
        if max_events is None or max_events > 0:
            with sentry_sdk.start_span(op="reprocess_event"):
                try:
                    reprocess_event(
                        project_id=project_id,
                        event_id=event.event_id,
                        start_time=start_time,
                    )
                except CannotReprocess as e:
                    logger.error(f"reprocessing2.{e}")
                except Exception:
                    sentry_sdk.capture_exception()
                else:
                    if max_events is not None:
                        max_events -= 1

                    continue

            # In case of errors while kicking off reprocessing, mark the event
            # as reprocessed such that progressbar advances and the
            # finish_reprocessing task is still correctly spawned.
            mark_event_reprocessed(group_id=group_id, project_id=project_id)

        # In case of errors while kicking off reprocessing or if max_events has
        # been exceeded, do the default action.

        if remaining_events_min_datetime is None or remaining_events_min_datetime > event.datetime:
            remaining_events_min_datetime = event.datetime
        if remaining_events_max_datetime is None or remaining_events_max_datetime < event.datetime:
            remaining_events_max_datetime = event.datetime

        remaining_event_ids.append(event.event_id)

    # len(remaining_event_ids) is upper-bounded by GROUP_REPROCESSING_CHUNK_SIZE
    if remaining_event_ids:
        handle_remaining_events.delay(
            project_id=project_id,
            new_group_id=new_group_id,
            event_ids=remaining_event_ids,
            remaining_events=remaining_events,
            from_timestamp=remaining_events_min_datetime,
            to_timestamp=remaining_events_max_datetime,
        )

    reprocess_group.delay(
        project_id=project_id,
        group_id=group_id,
        new_group_id=new_group_id,
        query_state=query_state,
        start_time=start_time,
        max_events=max_events,
        remaining_events=remaining_events,
    )