示例#1
0
def test_concurrent_events_go_into_new_group(
    default_project,
    reset_snuba,
    register_event_preprocessor,
    process_and_save,
    burst_task_runner,
    default_user,
):
    """
    Assert that both unmodified and concurrently inserted events go into "the
    new group", i.e. the successor of the reprocessed (old) group that
    inherited the group hashes.
    """

    @register_event_preprocessor
    def event_preprocessor(data):
        extra = data.setdefault("extra", {})
        extra.setdefault("processing_counter", 0)
        extra["processing_counter"] += 1
        return data

    event_id = process_and_save({"message": "hello world"})

    event = eventstore.get_event_by_id(default_project.id, event_id)
    original_short_id = event.group.short_id
    assert original_short_id
    original_issue_id = event.group.id

    original_assignee = GroupAssignee.objects.create(
        group_id=original_issue_id, project=default_project, user=default_user
    )

    with burst_task_runner() as burst_reprocess:
        reprocess_group(default_project.id, event.group_id)

    assert not is_group_finished(event.group_id)

    event_id2 = process_and_save({"message": "hello world"})
    event2 = eventstore.get_event_by_id(default_project.id, event_id2)
    assert event2.event_id != event.event_id
    assert event2.group_id != event.group_id

    burst_reprocess(max_jobs=100)

    event3 = eventstore.get_event_by_id(default_project.id, event_id)
    assert event3.event_id == event.event_id
    assert event3.group_id != event.group_id

    assert is_group_finished(event.group_id)

    assert event2.group_id == event3.group_id
    assert event.get_hashes() == event2.get_hashes() == event3.get_hashes()

    group = event3.group

    assert group.short_id == original_short_id
    assert GroupAssignee.objects.get(group=group) == original_assignee
    activity = Activity.objects.get(group=group, type=Activity.REPROCESS)
    assert activity.ident == str(original_issue_id)
示例#2
0
def test_nodestore_missing(
    default_project, reset_snuba, process_and_save, burst_task_runner, monkeypatch, remaining_events
):
    logs = []
    monkeypatch.setattr("sentry.reprocessing2.logger.error", logs.append)

    event_id = process_and_save({"message": "hello world", "platform": "python"})
    event = eventstore.get_event_by_id(default_project.id, event_id)
    old_group = event.group

    with burst_task_runner() as burst:
        reprocess_group(
            default_project.id, event.group_id, max_events=1, remaining_events=remaining_events
        )

    burst(max_jobs=100)

    assert is_group_finished(event.group_id)

    new_event = eventstore.get_event_by_id(default_project.id, event_id)

    if remaining_events == "delete":
        assert new_event is None
    else:
        assert not new_event.data.get("errors")
        assert new_event.group_id != event.group_id

        assert new_event.group.times_seen == 1

        assert not Group.objects.filter(id=old_group.id).exists()
        assert (
            GroupRedirect.objects.get(previous_group_id=old_group.id).group_id == new_event.group_id
        )

    assert logs == ["reprocessing2.unprocessed_event.not_found"]
示例#3
0
def test_max_events(
    default_project,
    reset_snuba,
    register_event_preprocessor,
    process_and_save,
    task_runner,
    monkeypatch,
):
    @register_event_preprocessor
    def event_preprocessor(data):
        extra = data.setdefault("extra", {})
        extra.setdefault("processing_counter", 0)
        extra["processing_counter"] += 1
        return data

    event_id = process_and_save({"message": "hello world"})

    event = eventstore.get_event_by_id(default_project.id, event_id)

    # Make sure it never gets called
    monkeypatch.setattr("sentry.tasks.reprocessing2.reprocess_event", None)

    with task_runner():
        reprocess_group(default_project.id, event.group_id, max_events=0)

    assert is_group_finished(event.group_id)
示例#4
0
def test_max_events(
    default_project,
    reset_snuba,
    register_event_preprocessor,
    process_and_save,
    burst_task_runner,
    monkeypatch,
    remaining_events,
    max_events,
):
    @register_event_preprocessor
    def event_preprocessor(data):
        extra = data.setdefault("extra", {})
        extra.setdefault("processing_counter", 0)
        extra["processing_counter"] += 1
        return data

    event_ids = [
        process_and_save({"message": "hello world"}, seconds_ago=i + 1) for i in reversed(range(5))
    ]

    old_events = {
        event_id: eventstore.get_event_by_id(default_project.id, event_id) for event_id in event_ids
    }

    (group_id,) = {e.group_id for e in old_events.values()}

    with burst_task_runner() as burst:
        reprocess_group(
            default_project.id,
            group_id,
            max_events=max_events,
            remaining_events=remaining_events,
        )

    burst(max_jobs=100)

    for i, event_id in enumerate(event_ids):
        event = eventstore.get_event_by_id(default_project.id, event_id)
        if max_events is not None and i < (len(event_ids) - max_events):
            if remaining_events == "delete":
                assert event is None
            elif remaining_events == "keep":
                assert event.group_id != group_id
                assert dict(event.data) == dict(old_events[event_id].data)
            else:
                raise ValueError(remaining_events)
        else:
            assert event.group_id != group_id
            assert int(event.data["contexts"]["reprocessing"]["original_issue_id"]) == group_id
            assert dict(event.data) != dict(old_events[event_id].data)

    if remaining_events == "delete":
        assert event.group.times_seen == (max_events or 5)
    elif remaining_events == "keep":
        assert event.group.times_seen == 5
    else:
        raise ValueError(remaining_events)

    assert is_group_finished(group_id)
示例#5
0
def wait_group_reprocessed(project_id, group_id):
    from sentry.reprocessing2 import is_group_finished

    if is_group_finished(group_id):
        delete_old_group.delay(project_id=project_id, group_id=group_id)
    else:
        wait_group_reprocessed.apply_async(
            kwargs={"project_id": project_id, "group_id": group_id}, countdown=60 * 5
        )
示例#6
0
def test_concurrent_events_go_into_new_group(default_project, reset_snuba,
                                             register_event_preprocessor,
                                             process_and_save,
                                             burst_task_runner):
    @register_event_preprocessor
    def event_preprocessor(data):
        extra = data.setdefault("extra", {})
        extra.setdefault("processing_counter", 0)
        extra["processing_counter"] += 1
        return data

    event_id = process_and_save({"message": "hello world"})

    event = eventstore.get_event_by_id(default_project.id, event_id)

    with burst_task_runner() as burst_reprocess:
        reprocess_group(default_project.id, event.group_id)

    assert not is_group_finished(event.group_id)

    event_id2 = process_and_save({"message": "hello world"})
    event2 = eventstore.get_event_by_id(default_project.id, event_id2)
    assert event2.event_id != event.event_id
    assert event2.group_id != event.group_id

    burst_reprocess()

    (event3, ) = eventstore.get_events(
        eventstore.Filter(
            project_ids=[default_project.id],
            conditions=[["tags[original_event_id]", "=", event_id]],
        ))

    assert is_group_finished(event.group_id)

    assert event2.group_id == event3.group_id
    assert event.get_hashes() == event2.get_hashes() == event3.get_hashes()
示例#7
0
def test_max_events(
    default_project,
    reset_snuba,
    register_event_preprocessor,
    process_and_save,
    burst_task_runner,
    monkeypatch,
):
    @register_event_preprocessor
    def event_preprocessor(data):
        extra = data.setdefault("extra", {})
        extra.setdefault("processing_counter", 0)
        extra["processing_counter"] += 1
        return data

    event_ids = [
        process_and_save({"message": "hello world"}, seconds_ago=i + 1)
        for i in reversed(range(20))
    ]

    (group_id, ) = {
        eventstore.get_event_by_id(default_project.id, event_id).group_id
        for event_id in event_ids
    }

    with burst_task_runner() as burst:
        reprocess_group(default_project.id,
                        group_id,
                        max_events=len(event_ids) // 2)

    burst(max_jobs=100)

    for i, event_id in enumerate(event_ids):
        event = eventstore.get_event_by_id(default_project.id, event_id)
        if i < len(event_ids) / 2:
            assert event is None
        else:
            assert event.group_id != group_id
            assert int(event.data["contexts"]["reprocessing"]
                       ["original_issue_id"]) == group_id

    assert is_group_finished(group_id)
示例#8
0
def test_nodestore_missing(
    default_project,
    reset_snuba,
    process_and_save,
    burst_task_runner,
    monkeypatch,
):
    event_id = process_and_save({"message": "hello world"})
    event = eventstore.get_event_by_id(default_project.id, event_id)

    with burst_task_runner() as burst:
        reprocess_group(default_project.id, event.group_id, max_events=1)

    burst(max_jobs=100)

    new_event = eventstore.get_event_by_id(default_project.id, event_id)
    assert not new_event.data.get("errors")
    assert new_event.group_id != event.group_id

    assert is_group_finished(event.group_id)
示例#9
0
def test_basic(
    task_runner,
    default_project,
    change_groups,
    reset_snuba,
    process_and_save,
    register_event_preprocessor,
    burst_task_runner,
):
    # Replace this with an int and nonlocal when we have Python 3
    abs_count = []

    @register_event_preprocessor
    def event_preprocessor(data):
        tags = data.setdefault("tags", [])
        assert all(not x or x[0] != "processing_counter" for x in tags)
        tags.append(("processing_counter", "x{}".format(len(abs_count))))
        abs_count.append(None)

        if change_groups:
            data["fingerprint"] = [uuid.uuid4().hex]
        else:
            data["fingerprint"] = ["foo"]

        return data

    event_id = process_and_save(
        {"tags": [["key1", "value"], None, ["key2", "value"]]})

    def get_event_by_processing_counter(n):
        return list(
            eventstore.get_events(
                eventstore.Filter(
                    project_ids=[default_project.id],
                    conditions=[["tags[processing_counter]", "=", n]],
                )))

    event = eventstore.get_event_by_id(default_project.id, event_id)
    assert event.get_tag("processing_counter") == "x0"
    assert not event.data.get("errors")

    assert get_event_by_processing_counter("x0")[0].event_id == event.event_id

    old_event = event

    with burst_task_runner() as burst:
        reprocess_group(default_project.id, event.group_id)

    burst(max_jobs=100)

    (event, ) = get_event_by_processing_counter("x1")

    # Assert original data is used
    assert event.get_tag("processing_counter") == "x1"
    assert not event.data.get("errors")

    if change_groups:
        assert event.get_hashes() != old_event.get_hashes()
    else:
        assert event.get_hashes() == old_event.get_hashes()

    assert event.group_id != old_event.group_id

    assert event.event_id == old_event.event_id
    assert int(event.data["contexts"]["reprocessing"]
               ["original_issue_id"]) == old_event.group_id

    assert not Group.objects.filter(id=old_event.group_id).exists()

    assert is_group_finished(old_event.group_id)
示例#10
0
def test_attachments_and_userfeedback(
    default_project,
    reset_snuba,
    register_event_preprocessor,
    process_and_save,
    burst_task_runner,
    monkeypatch,
):
    @register_event_preprocessor
    def event_preprocessor(data):
        extra = data.setdefault("extra", {})
        extra.setdefault("processing_counter", 0)
        extra["processing_counter"] += 1

        cache_key = cache_key_for_event(data)
        attachments = attachment_cache.get(cache_key)
        extra.setdefault("attachments", []).append(
            [attachment.type for attachment in attachments])

        return data

    event_id_to_delete = process_and_save({"message": "hello world"},
                                          seconds_ago=5)
    event_to_delete = eventstore.get_event_by_id(default_project.id,
                                                 event_id_to_delete)

    event_id = process_and_save({"message": "hello world"})
    event = eventstore.get_event_by_id(default_project.id, event_id)

    for evt in (event, event_to_delete):
        for type in ("event.attachment", "event.minidump"):
            file = File.objects.create(name="foo", type=type)
            file.putfile(six.BytesIO(b"hello world"))
            EventAttachment.objects.create(
                event_id=evt.event_id,
                group_id=evt.group_id,
                project_id=default_project.id,
                file_id=file.id,
                type=file.type,
                name="foo",
            )

        UserReport.objects.create(
            project_id=default_project.id,
            event_id=evt.event_id,
            name="User",
        )

    with burst_task_runner() as burst:
        reprocess_group(default_project.id, event.group_id, max_events=1)

    burst(max_jobs=100)

    new_event = eventstore.get_event_by_id(default_project.id, event_id)
    assert new_event.group_id != event.group_id

    assert new_event.data["extra"]["attachments"] == [[
        "event.attachment", "event.minidump"
    ]]

    att, mdmp = EventAttachment.objects.filter(
        project_id=default_project.id).order_by("type")
    assert att.group_id == mdmp.group_id == new_event.group_id
    assert att.event_id == mdmp.event_id == event_id
    assert att.type == "event.attachment"
    assert mdmp.type == "event.minidump"

    (rep, ) = UserReport.objects.filter(project_id=default_project.id)
    assert rep.group_id == new_event.group_id
    assert rep.event_id == event_id

    assert is_group_finished(event.group_id)
示例#11
0
def test_attachments_and_userfeedback(
    default_project,
    reset_snuba,
    register_event_preprocessor,
    process_and_save,
    burst_task_runner,
    monkeypatch,
):
    @register_event_preprocessor
    def event_preprocessor(data):
        extra = data.setdefault("extra", {})
        extra.setdefault("processing_counter", 0)
        extra["processing_counter"] += 1

        cache_key = cache_key_for_event(data)
        attachments = attachment_cache.get(cache_key)
        extra.setdefault("attachments", []).append([attachment.type for attachment in attachments])

        return data

    # required such that minidump is loaded into attachments cache
    MINIDUMP_PLACEHOLDER = {
        "platform": "native",
        "exception": {"values": [{"mechanism": {"type": "minidump"}, "type": "test bogus"}]},
    }

    event_id_to_delete = process_and_save(
        {"message": "hello world", **MINIDUMP_PLACEHOLDER}, seconds_ago=5
    )
    event_to_delete = eventstore.get_event_by_id(default_project.id, event_id_to_delete)

    event_id = process_and_save(
        {"message": "hello world", "platform": "native", **MINIDUMP_PLACEHOLDER}
    )
    event = eventstore.get_event_by_id(default_project.id, event_id)

    for evt in (event, event_to_delete):
        for type in ("event.attachment", "event.minidump"):
            _create_event_attachment(evt, type)

        _create_user_report(evt)

    with burst_task_runner() as burst:
        reprocess_group(default_project.id, event.group_id, max_events=1)

    burst(max_jobs=100)

    new_event = eventstore.get_event_by_id(default_project.id, event_id)
    assert new_event.group_id != event.group_id

    assert new_event.data["extra"]["attachments"] == [["event.minidump"]]

    att, mdmp = EventAttachment.objects.filter(project_id=default_project.id).order_by("type")
    assert att.group_id == mdmp.group_id == new_event.group_id
    assert att.event_id == mdmp.event_id == event_id
    assert att.type == "event.attachment"
    assert mdmp.type == "event.minidump"

    (rep,) = UserReport.objects.filter(project_id=default_project.id)
    assert rep.group_id == new_event.group_id
    assert rep.event_id == event_id

    assert is_group_finished(event.group_id)
示例#12
0
def test_basic(
    task_runner,
    default_project,
    change_groups,
    reset_snuba,
    process_and_save,
    register_event_preprocessor,
    burst_task_runner,
    monkeypatch,
):
    from sentry import eventstream

    tombstone_calls = 0
    old_tombstone_fn = eventstream.tombstone_events_unsafe

    def tombstone_called(*args, **kwargs):
        nonlocal tombstone_calls
        tombstone_calls += 1
        old_tombstone_fn(*args, **kwargs)

    monkeypatch.setattr("sentry.eventstream.tombstone_events_unsafe",
                        tombstone_called)

    # Replace this with an int and nonlocal when we have Python 3
    abs_count = []

    @register_event_preprocessor
    def event_preprocessor(data):
        tags = data.setdefault("tags", [])
        assert all(not x or x[0] != "processing_counter" for x in tags)
        tags.append(("processing_counter", f"x{len(abs_count)}"))
        abs_count.append(None)

        if change_groups:
            data["fingerprint"] = [uuid.uuid4().hex]
        else:
            data["fingerprint"] = ["foo"]

        return data

    event_id = process_and_save(
        {"tags": [["key1", "value"], None, ["key2", "value"]]})

    def get_event_by_processing_counter(n):
        return list(
            eventstore.get_events(
                eventstore.Filter(
                    project_ids=[default_project.id],
                    conditions=[["tags[processing_counter]", "=", n]],
                )))

    event = eventstore.get_event_by_id(default_project.id, event_id)
    assert event.get_tag("processing_counter") == "x0"
    assert not event.data.get("errors")

    assert get_event_by_processing_counter("x0")[0].event_id == event.event_id

    old_event = event

    with burst_task_runner() as burst:
        reprocess_group(default_project.id, event.group_id)

    burst(max_jobs=100)

    (event, ) = get_event_by_processing_counter("x1")

    # Assert original data is used
    assert event.get_tag("processing_counter") == "x1"
    assert not event.data.get("errors")

    if change_groups:
        assert event.get_hashes() != old_event.get_hashes()
    else:
        assert event.get_hashes() == old_event.get_hashes()

    assert event.group_id != old_event.group_id

    assert event.event_id == old_event.event_id
    assert int(event.data["contexts"]["reprocessing"]
               ["original_issue_id"]) == old_event.group_id

    assert not Group.objects.filter(id=old_event.group_id).exists()

    assert is_group_finished(old_event.group_id)

    # Old event is actually getting tombstoned
    assert not get_event_by_processing_counter("x0")
    if change_groups:
        assert tombstone_calls == 1
    else:
        assert tombstone_calls == 0