Exemplo n.º 1
0
def test_basic(
    task_runner,
    default_project,
    change_groups,
    reset_snuba,
    process_and_save,
    register_event_preprocessor,
    burst_task_runner,
    monkeypatch,
):
    from sentry import eventstream

    tombstone_calls = 0
    old_tombstone_fn = eventstream.tombstone_events_unsafe

    def tombstone_called(*args, **kwargs):
        nonlocal tombstone_calls
        tombstone_calls += 1
        old_tombstone_fn(*args, **kwargs)

    monkeypatch.setattr("sentry.eventstream.tombstone_events_unsafe",
                        tombstone_called)

    # Replace this with an int and nonlocal when we have Python 3
    abs_count = []

    @register_event_preprocessor
    def event_preprocessor(data):
        tags = data.setdefault("tags", [])
        assert all(not x or x[0] != "processing_counter" for x in tags)
        tags.append(("processing_counter", f"x{len(abs_count)}"))
        abs_count.append(None)

        if change_groups:
            data["fingerprint"] = [uuid.uuid4().hex]
        else:
            data["fingerprint"] = ["foo"]

        return data

    event_id = process_and_save(
        {"tags": [["key1", "value"], None, ["key2", "value"]]})

    def get_event_by_processing_counter(n):
        return list(
            eventstore.get_events(
                eventstore.Filter(
                    project_ids=[default_project.id],
                    conditions=[["tags[processing_counter]", "=", n]],
                )))

    event = eventstore.get_event_by_id(default_project.id, event_id)
    assert event.get_tag("processing_counter") == "x0"
    assert not event.data.get("errors")

    assert get_event_by_processing_counter("x0")[0].event_id == event.event_id

    old_event = event

    with burst_task_runner() as burst:
        reprocess_group(default_project.id, event.group_id)

    burst(max_jobs=100)

    (event, ) = get_event_by_processing_counter("x1")

    # Assert original data is used
    assert event.get_tag("processing_counter") == "x1"
    assert not event.data.get("errors")

    if change_groups:
        assert event.get_hashes() != old_event.get_hashes()
    else:
        assert event.get_hashes() == old_event.get_hashes()

    assert event.group_id != old_event.group_id

    assert event.event_id == old_event.event_id
    assert int(event.data["contexts"]["reprocessing"]
               ["original_issue_id"]) == old_event.group_id

    assert not Group.objects.filter(id=old_event.group_id).exists()

    assert is_group_finished(old_event.group_id)

    # Old event is actually getting tombstoned
    assert not get_event_by_processing_counter("x0")
    if change_groups:
        assert tombstone_calls == 1
    else:
        assert tombstone_calls == 0
Exemplo n.º 2
0
def test_max_events(
    default_project,
    reset_snuba,
    register_event_preprocessor,
    process_and_save,
    burst_task_runner,
    monkeypatch,
    remaining_events,
    max_events,
):
    @register_event_preprocessor
    def event_preprocessor(data):
        extra = data.setdefault("extra", {})
        extra.setdefault("processing_counter", 0)
        extra["processing_counter"] += 1
        return data

    event_ids = [
        process_and_save({"message": "hello world"}, seconds_ago=i + 1)
        for i in reversed(range(5))
    ]

    old_events = {
        event_id: eventstore.get_event_by_id(default_project.id, event_id)
        for event_id in event_ids
    }

    (group_id, ) = {e.group_id for e in old_events.values()}

    with burst_task_runner() as burst:
        reprocess_group(
            default_project.id,
            group_id,
            max_events=max_events,
            remaining_events=remaining_events,
        )

    burst(max_jobs=100)

    for i, event_id in enumerate(event_ids):
        event = eventstore.get_event_by_id(default_project.id, event_id)
        if max_events is not None and i < (len(event_ids) - max_events):
            if remaining_events == "delete":
                assert event is None
            elif remaining_events == "keep":
                assert event.group_id != group_id
                assert dict(event.data) == dict(old_events[event_id].data)
            else:
                raise ValueError(remaining_events)
        else:
            assert event.group_id != group_id
            assert int(event.data["contexts"]["reprocessing"]
                       ["original_issue_id"]) == group_id
            assert dict(event.data) != dict(old_events[event_id].data)

    if remaining_events == "delete":
        assert event.group.times_seen == (max_events or 5)
    elif remaining_events == "keep":
        assert event.group.times_seen == 5
    else:
        raise ValueError(remaining_events)

    assert is_group_finished(group_id)
Exemplo n.º 3
0
def test_attachments_and_userfeedback(
    default_project,
    reset_snuba,
    register_event_preprocessor,
    process_and_save,
    burst_task_runner,
    monkeypatch,
):
    @register_event_preprocessor
    def event_preprocessor(data):
        extra = data.setdefault("extra", {})
        extra.setdefault("processing_counter", 0)
        extra["processing_counter"] += 1

        cache_key = cache_key_for_event(data)
        attachments = attachment_cache.get(cache_key)
        extra.setdefault("attachments", []).append(
            [attachment.type for attachment in attachments])

        return data

    # required such that minidump is loaded into attachments cache
    MINIDUMP_PLACEHOLDER = {
        "platform": "native",
        "exception": {
            "values": [{
                "mechanism": {
                    "type": "minidump"
                },
                "type": "test bogus"
            }]
        },
    }

    event_id_to_delete = process_and_save(
        {
            "message": "hello world",
            **MINIDUMP_PLACEHOLDER
        }, seconds_ago=5)
    event_to_delete = eventstore.get_event_by_id(default_project.id,
                                                 event_id_to_delete)

    event_id = process_and_save({
        "message": "hello world",
        "platform": "native",
        **MINIDUMP_PLACEHOLDER
    })
    event = eventstore.get_event_by_id(default_project.id, event_id)

    for evt in (event, event_to_delete):
        for type in ("event.attachment", "event.minidump"):
            file = File.objects.create(name="foo", type=type)
            file.putfile(BytesIO(b"hello world"))
            EventAttachment.objects.create(
                event_id=evt.event_id,
                group_id=evt.group_id,
                project_id=default_project.id,
                file_id=file.id,
                type=file.type,
                name="foo",
            )

        UserReport.objects.create(
            project_id=default_project.id,
            event_id=evt.event_id,
            name="User",
        )

    with burst_task_runner() as burst:
        reprocess_group(default_project.id, event.group_id, max_events=1)

    burst(max_jobs=100)

    new_event = eventstore.get_event_by_id(default_project.id, event_id)
    assert new_event.group_id != event.group_id

    assert new_event.data["extra"]["attachments"] == [["event.minidump"]]

    att, mdmp = EventAttachment.objects.filter(
        project_id=default_project.id).order_by("type")
    assert att.group_id == mdmp.group_id == new_event.group_id
    assert att.event_id == mdmp.event_id == event_id
    assert att.type == "event.attachment"
    assert mdmp.type == "event.minidump"

    (rep, ) = UserReport.objects.filter(project_id=default_project.id)
    assert rep.group_id == new_event.group_id
    assert rep.event_id == event_id

    assert is_group_finished(event.group_id)
Exemplo n.º 4
0
def test_basic(
    task_runner,
    default_project,
    change_groups,
    reset_snuba,
    process_and_save,
    register_event_preprocessor,
    burst_task_runner,
):
    # Replace this with an int and nonlocal when we have Python 3
    abs_count = []

    @register_event_preprocessor
    def event_preprocessor(data):
        tags = data.setdefault("tags", [])
        assert all(not x or x[0] != "processing_counter" for x in tags)
        tags.append(("processing_counter", "x{}".format(len(abs_count))))
        abs_count.append(None)

        if change_groups:
            data["fingerprint"] = [uuid.uuid4().hex]
        else:
            data["fingerprint"] = ["foo"]

        return data

    event_id = process_and_save(
        {"tags": [["key1", "value"], None, ["key2", "value"]]})

    def get_event_by_processing_counter(n):
        return list(
            eventstore.get_events(
                eventstore.Filter(
                    project_ids=[default_project.id],
                    conditions=[["tags[processing_counter]", "=", n]],
                )))

    event = eventstore.get_event_by_id(default_project.id, event_id)
    assert event.get_tag("processing_counter") == "x0"
    assert not event.data.get("errors")

    assert get_event_by_processing_counter("x0")[0].event_id == event.event_id

    old_event = event

    with burst_task_runner() as burst:
        reprocess_group(default_project.id, event.group_id)

    burst(max_jobs=100)

    (event, ) = get_event_by_processing_counter("x1")

    # Assert original data is used
    assert event.get_tag("processing_counter") == "x1"
    assert not event.data.get("errors")

    if change_groups:
        assert event.get_hashes() != old_event.get_hashes()
    else:
        assert event.get_hashes() == old_event.get_hashes()

    assert event.group_id != old_event.group_id

    assert event.event_id == old_event.event_id
    assert int(event.get_tag("original_group_id")) == old_event.group_id

    assert not Group.objects.filter(id=old_event.group_id).exists()

    assert is_group_finished(old_event.group_id)