示例#1
0
    def test_tags_hash_map(self) -> None:
        """
        Adds an event and ensures the tags_hash_map is properly populated
        including escaping.
        """
        self.event = get_raw_event()
        self.event["data"]["tags"].append(["test_tag1", "value1"])
        self.event["data"]["tags"].append(["test_tag=2", "value2"])  # Requires escaping
        self.write_unprocessed_events([self.event])

        clickhouse = (
            get_storage(StorageKey.EVENTS)
            .get_cluster()
            .get_query_connection(ClickhouseClientSettings.QUERY)
        )

        hashed = clickhouse.execute(
            "SELECT cityHash64('test_tag1=value1'), cityHash64('test_tag\\\\=2=value2')"
        )
        tag1, tag2 = hashed[0]

        event = clickhouse.execute(
            (
                f"SELECT event_id FROM sentry_local WHERE has(_tags_hash_map, {tag1}) "
                f"AND has(_tags_hash_map, {tag2})"
            )
        )
        assert len(event) == 1
        assert event[0][0] == self.event["data"]["id"]
示例#2
0
    def setup_method(self, test_method):
        self.metadata = KafkaMessageMetadata(0, 0, datetime.now())
        self.event = get_raw_event()

        self.processor = (get_writable_storage(
            StorageKey.EVENTS).get_table_writer().get_stream_loader().
                          get_processor())
示例#3
0
    def test_tags_hash_map(self) -> None:
        """
        Adds an event and ensures the tags_hash_map is properly populated
        including escaping.
        """
        self.event = get_raw_event()
        self.event["data"]["tags"].append(["test_tag1", "value1"])
        self.event["data"]["tags"].append(["test_tag=2",
                                           "value2"])  # Requires escaping
        storage = get_writable_storage(StorageKey.ERRORS)
        schema = storage.get_schema()
        assert isinstance(schema, TableSchema)
        table_name = schema.get_table_name()
        write_unprocessed_events(storage, [self.event])

        clickhouse = storage.get_cluster().get_query_connection(
            ClickhouseClientSettings.QUERY)

        hashed = clickhouse.execute(
            "SELECT cityHash64('test_tag1=value1'), cityHash64('test_tag\\\\=2=value2')"
        )
        tag1, tag2 = hashed[0]

        event = clickhouse.execute((
            f"SELECT replaceAll(toString(event_id), '-', '') FROM {table_name} WHERE has(_tags_hash_map, {tag1}) "
            f"AND has(_tags_hash_map, {tag2})"))
        assert len(event) == 1
        assert event[0][0] == self.event["data"]["id"]
示例#4
0
 def setup_method(self, test_method):
     super().setup_method(test_method)
     self.app.post = partial(self.app.post, headers={"referer": "test"})
     self.trace_id = uuid.UUID("7400045b-25c4-43b8-8591-4600aa83ad04")
     self.event = get_raw_event()
     self.project_id = self.event["project_id"]
     write_unprocessed_events(get_writable_storage(StorageKey.EVENTS),
                              [self.event])
     write_unprocessed_events(
         get_writable_storage(StorageKey.TRANSACTIONS),
         [get_raw_transaction()],
     )
示例#5
0
    def setup_method(self):
        from snuba.web.views import application

        assert application.testing is True

        self.app = application.test_client()
        self.app.post = partial(self.app.post, headers={"referer": "test"})
        self.storage = get_storage(StorageKey.EVENTS)

        self.replacer = replacer.ReplacerWorker(
            self.storage, DummyMetricsBackend(strict=True))

        self.project_id = 1
        self.event = get_raw_event()
示例#6
0
    def setup_method(self) -> None:
        from snuba.web.views import application

        assert application.testing is True

        self.app = application.test_client()
        self.storage = get_writable_storage(StorageKey.ERRORS)

        self.replacer = replacer.ReplacerWorker(
            self.storage,
            CONSUMER_GROUP,
            DummyMetricsBackend(strict=True),
        )

        self.project_id = 1
        self.event = get_raw_event()
示例#7
0
 def setup_method(self, test_method):
     super().setup_method(test_method)
     self.app.post = partial(self.app.post, headers={"referer": "test"})
     self.trace_id = uuid.UUID("7400045b-25c4-43b8-8591-4600aa83ad04")
     self.event = get_raw_event()
     self.project_id = self.event["project_id"]
     self.skew = timedelta(minutes=180)
     self.base_time = datetime.utcnow().replace(
         minute=0, second=0, microsecond=0,
         tzinfo=pytz.utc) - timedelta(minutes=180)
     write_unprocessed_events(get_writable_storage(StorageKey.EVENTS),
                              [self.event])
     write_unprocessed_events(
         get_writable_storage(StorageKey.TRANSACTIONS),
         [get_raw_transaction()],
     )
示例#8
0
    def setup_method(self, test_method):
        super(TestReplacer, self).setup_method(test_method, "events_migration")

        from snuba.web.views import application

        assert application.testing is True

        self.app = application.test_client()
        self.app.post = partial(self.app.post, headers={"referer": "test"})

        self.replacer = replacer.ReplacerWorker(
            self.dataset.get_writable_storage(),
            DummyMetricsBackend(strict=True))

        self.project_id = 1
        self.event = get_raw_event()
示例#9
0
    def setup_method(self):
        from snuba.web.views import application

        assert application.testing is True

        self.app = application.test_client()
        self.app.post = partial(self.app.post, headers={"referer": "test"})

        self.storage = get_writable_storage(StorageKey.ERRORS)
        self.replacer = replacer.ReplacerWorker(
            self.storage, DummyMetricsBackend(strict=True))

        self.project_id = 1
        self.event = get_raw_event()
        settings.ERRORS_ROLLOUT_ALL = True
        settings.ERRORS_ROLLOUT_WRITABLE_STORAGE = True
示例#10
0
def test_backfill_errors() -> None:

    backfill_migration_id = "0014_backfill_errors"
    runner = Runner()
    runner.run_migration(MigrationKey(MigrationGroup.SYSTEM,
                                      "0001_migrations"))

    run_prior_migrations(MigrationGroup.EVENTS, backfill_migration_id, runner)

    errors_storage = get_writable_storage(StorageKey.ERRORS)
    clickhouse = errors_storage.get_cluster().get_query_connection(
        ClickhouseClientSettings.QUERY)
    errors_table_name = errors_storage.get_table_writer().get_schema(
    ).get_table_name()

    raw_events = []
    for i in range(10):
        event = get_raw_event()
        raw_events.append(event)

    events_storage = get_writable_storage(StorageKey.EVENTS)

    write_unprocessed_events(events_storage, raw_events)

    assert get_count_from_storage(errors_table_name, clickhouse) == 0

    # Run 0014_backfill_errors
    runner.run_migration(MigrationKey(MigrationGroup.EVENTS,
                                      backfill_migration_id),
                         force=True)

    assert get_count_from_storage(errors_table_name, clickhouse) == 10

    outcome = perform_select_query(["contexts.key", "contexts.value"],
                                   errors_table_name, None, str(1), clickhouse)

    assert outcome[0] == (
        [
            "device.model_id",
            "geo.city",
            "geo.country_code",
            "geo.region",
            "os.kernel_version",
        ],
        ["Galaxy", "San Francisco", "US", "CA", "1.1.1"],
    )
示例#11
0
 def setup_method(self, test_method: Callable[..., Any]) -> None:
     super().setup_method(test_method)
     self.trace_id = uuid.UUID("7400045b-25c4-43b8-8591-4600aa83ad04")
     self.event = get_raw_event()
     self.project_id = self.event["project_id"]
     self.org_id = self.event["organization_id"]
     self.skew = timedelta(minutes=180)
     self.base_time = datetime.utcnow().replace(
         minute=0, second=0, microsecond=0) - timedelta(minutes=180)
     events_storage = get_entity(EntityKey.EVENTS).get_writable_storage()
     assert events_storage is not None
     write_unprocessed_events(events_storage, [self.event])
     self.next_time = datetime.utcnow().replace(
         minute=0, second=0, microsecond=0) + timedelta(minutes=180)
     write_unprocessed_events(
         get_writable_storage(StorageKey.TRANSACTIONS),
         [get_raw_transaction()],
     )
示例#12
0
    def setup_method(self, test_method):
        super().setup_method(test_method)
        self.app.post = partial(self.app.post, headers={"referer": "test"})
        self.event = get_raw_event()
        self.project_id = self.event["project_id"]
        self.base_time = datetime.utcnow().replace(
            second=0, microsecond=0, tzinfo=pytz.utc
        ) - timedelta(minutes=90)
        self.next_time = self.base_time + timedelta(minutes=95)

        self.events_storage = get_entity(EntityKey.EVENTS).get_writable_storage()
        write_unprocessed_events(self.events_storage, [self.event])

        groups = [
            {
                "offset": 0,
                "project_id": self.project_id,
                "id": self.event["group_id"],
                "record_deleted": 0,
                "status": 0,
            }
        ]

        groups_storage = get_entity(EntityKey.GROUPEDMESSAGES).get_writable_storage()
        groups_storage.get_table_writer().get_batch_writer(
            metrics=DummyMetricsBackend(strict=True)
        ).write([json.dumps(group).encode("utf-8") for group in groups])

        assignees = [
            {
                "offset": 0,
                "project_id": self.project_id,
                "group_id": self.event["group_id"],
                "record_deleted": 0,
                "user_id": 100,
            }
        ]

        assignees_storage = get_entity(EntityKey.GROUPASSIGNEE).get_writable_storage()
        assignees_storage.get_table_writer().get_batch_writer(
            metrics=DummyMetricsBackend(strict=True)
        ).write([json.dumps(assignee).encode("utf-8") for assignee in assignees])
示例#13
0
def test_backfill_errors() -> None:
    errors_storage = get_writable_storage(StorageKey.ERRORS)
    clickhouse = errors_storage.get_cluster().get_query_connection(
        ClickhouseClientSettings.QUERY)
    errors_table_name = errors_storage.get_table_writer().get_schema(
    ).get_table_name()

    def get_errors_count() -> int:
        return clickhouse.execute(
            f"SELECT count() from {errors_table_name}")[0][0]

    raw_events = []
    for i in range(10):
        event = get_raw_event()
        raw_events.append(event)

    events_storage = get_writable_storage(StorageKey.EVENTS)

    write_unprocessed_events(events_storage, raw_events)

    assert get_errors_count() == 0

    backfill_errors()

    assert get_errors_count() == 10

    assert clickhouse.execute(
        f"SELECT contexts.key, contexts.value from {errors_table_name} LIMIT 1;"
    )[0] == (
        (
            "device.model_id",
            "geo.city",
            "geo.country_code",
            "geo.region",
            "os.kernel_version",
        ),
        ("Galaxy", "San Francisco", "US", "CA", "1.1.1"),
    )
示例#14
0
    def setup_method(self) -> None:
        from snuba.web.views import application

        assert application.testing is True

        self.app = application.test_client()

        self.storage = get_writable_storage(StorageKey.ERRORS)
        self.replacer = replacer.ReplacerWorker(
            self.storage,
            CONSUMER_GROUP,
            DummyMetricsBackend(strict=True),
        )

        # Total query time range is 24h before to 24h after now to account
        # for local machine time zones
        self.from_time = datetime.now().replace(
            minute=0, second=0, microsecond=0) - timedelta(days=1)

        self.to_time = self.from_time + timedelta(days=2)

        self.project_id = 1
        self.event = get_raw_event()
    process_message_multistorage,
    process_message_multistorage_identical_storages,
)
from snuba.datasets.storages import StorageKey
from tests.fixtures import get_raw_event, get_raw_transaction

test_data = [
    pytest.param(
        Message(
            Partition(Topic("errors"), 1),
            1,
            MultistorageKafkaPayload(
                [StorageKey.ERRORS, StorageKey.ERRORS_V2],
                KafkaPayload(
                    None,
                    json.dumps((2, "insert", get_raw_event())).encode("utf-8"),
                    [],
                ),
            ),
            datetime.now(),
        ),
        True,
        id="both errors storage",
    ),
    pytest.param(
        Message(
            Partition(Topic("errors"), 1),
            1,
            MultistorageKafkaPayload(
                [StorageKey.TRANSACTIONS, StorageKey.TRANSACTIONS_V2],
                KafkaPayload(
示例#16
0
 def setup_method(self, test_method):
     super().setup_method(test_method, "events")
     self.metadata = KafkaMessageMetadata(0, 0, datetime.now())
     self.event = get_raw_event()
示例#17
0
 def setup_method(self, test_method):
     self.dataset = get_dataset("events")
     self.metadata = KafkaMessageMetadata(0, 0, datetime.now())
     self.event = get_raw_event()
示例#18
0
 def setup_method(self, test_method, dataset_name="events"):
     super(BaseEventsTest, self).setup_method(test_method, dataset_name)
     self.table = enforce_table_writer(self.dataset).get_schema().get_table_name()
     self.event = InsertEvent(get_raw_event())