Ejemplo n.º 1
0
def migrate_events(caches, project, source_id, destination_id, fingerprints,
                   events, actor_id, eventstream_state):
    # XXX: This is only actually able to create a destination group and migrate
    # the group hashes if there are events that can be migrated. How do we
    # handle this if there aren't any events? We can't create a group (there
    # isn't any data to derive the aggregates from), so we'd have to mark the
    # hash as in limbo somehow...?)
    if not events:
        return (destination_id, eventstream_state)

    if destination_id is None:
        # XXX: There is a race condition here between the (wall clock) time
        # that the migration is started by the user and when we actually
        # get to this block where the new destination is created and we've
        # moved the ``GroupHash`` so that events start being associated
        # with it. During this gap, there could have been additional events
        # ingested, and if we want to handle this, we'd need to record the
        # highest event ID we've seen at the beginning of the migration,
        # then scan all events greater than that ID and migrate the ones
        # where necessary. (This still isn't even guaranteed to catch all
        # of the events due to processing latency, but it's a better shot.)
        # Create a new destination group.
        destination = Group.objects.create(project_id=project.id,
                                           short_id=project.next_short_id(),
                                           **get_group_creation_attributes(
                                               caches, events))

        destination_id = destination.id

        eventstream_state = eventstream.start_unmerge(project.id, fingerprints,
                                                      source_id,
                                                      destination_id)

        # Move the group hashes to the destination.
        GroupHash.objects.filter(
            project_id=project.id,
            hash__in=fingerprints).update(group=destination_id)

        # Create activity records for the source and destination group.
        Activity.objects.create(
            project_id=project.id,
            group_id=destination_id,
            type=Activity.UNMERGE_DESTINATION,
            user_id=actor_id,
            data={
                "fingerprints": fingerprints,
                "source_id": source_id
            },
        )

        Activity.objects.create(
            project_id=project.id,
            group_id=source_id,
            type=Activity.UNMERGE_SOURCE,
            user_id=actor_id,
            data={
                "fingerprints": fingerprints,
                "destination_id": destination_id
            },
        )
    else:
        # Update the existing destination group.
        destination = Group.objects.get(id=destination_id)
        destination.update(
            **get_group_backfill_attributes(caches, destination, events))

    event_id_set = set(event.id for event in events)

    Event.objects.filter(project_id=project.id,
                         id__in=event_id_set).update(group_id=destination_id)

    for event in events:
        event.group = destination

    tagstore.update_group_for_events(project_id=project.id,
                                     event_ids=event_id_set,
                                     destination_id=destination_id)

    event_event_id_set = set(event.event_id for event in events)

    UserReport.objects.filter(
        project_id=project.id,
        event_id__in=event_event_id_set).update(group=destination_id)

    return (destination.id, eventstream_state)
Ejemplo n.º 2
0
def migrate_events(caches, project, source_id, destination_id,
                   fingerprints, events, actor_id, eventstream_state):
    # XXX: This is only actually able to create a destination group and migrate
    # the group hashes if there are events that can be migrated. How do we
    # handle this if there aren't any events? We can't create a group (there
    # isn't any data to derive the aggregates from), so we'd have to mark the
    # hash as in limbo somehow...?)
    if not events:
        return (destination_id, eventstream_state)

    if destination_id is None:
        # XXX: There is a race condition here between the (wall clock) time
        # that the migration is started by the user and when we actually
        # get to this block where the new destination is created and we've
        # moved the ``GroupHash`` so that events start being associated
        # with it. During this gap, there could have been additional events
        # ingested, and if we want to handle this, we'd need to record the
        # highest event ID we've seen at the beginning of the migration,
        # then scan all events greater than that ID and migrate the ones
        # where necessary. (This still isn't even guaranteed to catch all
        # of the events due to processing latency, but it's a better shot.)
        # Create a new destination group.
        destination = Group.objects.create(
            project_id=project.id,
            short_id=project.next_short_id(),
            **get_group_creation_attributes(caches, events)
        )

        destination_id = destination.id

        eventstream_state = eventstream.start_unmerge(
            project.id,
            fingerprints,
            source_id,
            destination_id
        )

        # Move the group hashes to the destination.
        GroupHash.objects.filter(
            project_id=project.id,
            hash__in=fingerprints,
        ).update(group=destination_id)

        # Create activity records for the source and destination group.
        Activity.objects.create(
            project_id=project.id,
            group_id=destination_id,
            type=Activity.UNMERGE_DESTINATION,
            user_id=actor_id,
            data={
                'fingerprints': fingerprints,
                'source_id': source_id,
            },
        )

        Activity.objects.create(
            project_id=project.id,
            group_id=source_id,
            type=Activity.UNMERGE_SOURCE,
            user_id=actor_id,
            data={
                'fingerprints': fingerprints,
                'destination_id': destination_id,
            },
        )
    else:
        # Update the existing destination group.
        destination = Group.objects.get(id=destination_id)
        destination.update(**get_group_backfill_attributes(caches, destination, events))

    event_id_set = set(event.id for event in events)

    Event.objects.filter(
        project_id=project.id,
        id__in=event_id_set,
    ).update(group_id=destination_id)

    for event in events:
        event.group = destination

    tagstore.update_group_for_events(
        project_id=project.id,
        event_ids=event_id_set,
        destination_id=destination_id
    )

    event_event_id_set = set(event.event_id for event in events)

    EventMapping.objects.filter(
        project_id=project.id,
        event_id__in=event_event_id_set,
    ).update(group_id=destination_id)

    UserReport.objects.filter(
        project_id=project.id,
        event_id__in=event_event_id_set,
    ).update(group=destination_id)

    return (destination.id, eventstream_state)