Esempio n. 1
0
    def teardown_method(self, test_method):
        if self.dataset_name:
            for statement in self.dataset.get_dataset_schemas(
            ).get_drop_statements():
                self.clickhouse.execute(statement)

            redis_client.flushdb()
Esempio n. 2
0
    def test_query_time_flags_project_and_groups(self) -> None:
        """
        Tests errors_replacer.set_project_needs_final() and
        errors_replacer.set_project_exclude_groups() work together as expected.

        ReplacementType's are arbitrary, just need to show up in
        getter appropriately once set.
        """
        redis_client.flushdb()
        project_ids = [7, 8, 9]

        errors_replacer.set_project_needs_final(7, ReplacerState.ERRORS,
                                                ReplacementType.EXCLUDE_GROUPS)
        errors_replacer.set_project_exclude_groups(7, [1, 2],
                                                   ReplacerState.ERRORS,
                                                   ReplacementType.START_MERGE)
        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        assert (
            flags.needs_final,
            flags.group_ids_to_exclude,
            flags.replacement_types,
        ) == (
            True,
            {1, 2},
            # exclude_groups from project setter, start_merge from group setter
            {ReplacementType.EXCLUDE_GROUPS, ReplacementType.START_MERGE},
        )
Esempio n. 3
0
    def drop(*, dataset: Dataset):
        for statement in dataset.get_dataset_schemas().get_drop_statements():
            clickhouse_rw.execute(statement.statement)

        ensure_table_exists(dataset, force=True)
        redis_client.flushdb()
        return ("ok", 200, {"Content-Type": "text/plain"})
Esempio n. 4
0
    def setup_method(self, test_method):
        assert settings.TESTING, "settings.TESTING is False, try `SNUBA_SETTINGS=test` or `make test`"

        from fixtures import raw_event

        timestamp = datetime.utcnow()
        raw_event['datetime'] = (
            timestamp - timedelta(seconds=2)).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
        raw_event['received'] = int(
            calendar.timegm((timestamp - timedelta(seconds=1)).timetuple()))
        self.event = self.wrap_raw_event(raw_event)

        self.database = 'default'
        self.table = settings.CLICKHOUSE_TABLE

        self.clickhouse = ClickhousePool()

        self.clickhouse.execute("DROP TABLE IF EXISTS %s" % self.table)
        self.clickhouse.execute(
            get_table_definition(
                name=self.table,
                engine=get_test_engine(),
            ))

        redis_client.flushdb()
Esempio n. 5
0
def run_migrations() -> Iterator[None]:
    from snuba.migrations.runner import Runner

    Runner().run_all(force=True)

    yield

    for storage_key in STORAGES:
        storage = get_storage(storage_key)
        cluster = storage.get_cluster()
        database = cluster.get_database()

        schema = storage.get_schema()
        if isinstance(schema, WritableTableSchema):
            table_name = schema.get_local_table_name()

            nodes = [
                *cluster.get_local_nodes(), *cluster.get_distributed_nodes()
            ]
            for node in nodes:
                connection = cluster.get_node_connection(
                    ClickhouseClientSettings.MIGRATE, node)
                connection.execute(
                    f"TRUNCATE TABLE IF EXISTS {database}.{table_name}")

    redis_client.flushdb()
Esempio n. 6
0
def backend() -> Iterator[Cache[bytes]]:
    codec: PassthroughCodec[bytes] = PassthroughCodec()
    backend: Cache[bytes] = RedisCache(redis_client, "test", codec,
                                       ThreadPoolExecutor())
    try:
        yield backend
    finally:
        redis_client.flushdb()
Esempio n. 7
0
    def drop(dataset_name):
        dataset = get_dataset(dataset_name)
        for statement in dataset.get_dataset_schemas().get_drop_statements():
            clickhouse_rw.execute(statement)

        ensure_table_exists(dataset, force=True)
        redis_client.flushdb()
        return ('ok', 200, {'Content-Type': 'text/plain'})
Esempio n. 8
0
 def _clear_redis_and_force_merge(self) -> None:
     redis_client.flushdb()
     cluster = self.storage.get_cluster()
     clickhouse = cluster.get_query_connection(
         ClickhouseClientSettings.OPTIMIZE)
     run_optimize(clickhouse,
                  self.storage,
                  cluster.get_database(),
                  ignore_cutoff=True)
Esempio n. 9
0
    def setup_method(self, test_method, dataset_name: Optional[str] = None):
        assert (
            settings.TESTING
        ), "settings.TESTING is False, try `SNUBA_SETTINGS=test` or `make test`"

        self.database = os.environ.get("CLICKHOUSE_DATABASE", "default")
        self.dataset_name = dataset_name

        if dataset_name is not None:
            self.__dataset_manager = dataset_manager(dataset_name)
            self.dataset = self.__dataset_manager.__enter__()
        else:
            self.__dataset_manager = None
            self.dataset = None

        redis_client.flushdb()
Esempio n. 10
0
    def test_query_time_flags_project(self) -> None:
        """
        Tests errors_replacer.set_project_needs_final()

        ReplacementType's are arbitrary, just need to show up in
        getter appropriately once set.
        """
        redis_client.flushdb()
        project_ids = [1, 2, 3]
        assert ProjectsQueryFlags.load_from_redis(
            project_ids, ReplacerState.ERRORS) == ProjectsQueryFlags(
                False, set(), set(), None)

        errors_replacer.set_project_needs_final(100, ReplacerState.ERRORS,
                                                ReplacementType.EXCLUDE_GROUPS)
        assert ProjectsQueryFlags.load_from_redis(
            project_ids, ReplacerState.ERRORS) == ProjectsQueryFlags(
                False, set(), set(), None)

        errors_replacer.set_project_needs_final(1, ReplacerState.ERRORS,
                                                ReplacementType.EXCLUDE_GROUPS)
        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        assert (
            flags.needs_final,
            flags.group_ids_to_exclude,
            flags.replacement_types,
        ) == (
            True,
            set(),
            {ReplacementType.EXCLUDE_GROUPS},
        )

        errors_replacer.set_project_needs_final(2, ReplacerState.ERRORS,
                                                ReplacementType.EXCLUDE_GROUPS)
        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        assert (
            flags.needs_final,
            flags.group_ids_to_exclude,
            flags.replacement_types,
        ) == (
            True,
            set(),
            {ReplacementType.EXCLUDE_GROUPS},
        )
Esempio n. 11
0
    def setup_method(self, test_method, dataset_name=None):
        assert (
            settings.TESTING
        ), "settings.TESTING is False, try `SNUBA_SETTINGS=test` or `make test`"

        self.database = "default"
        self.dataset_name = dataset_name

        if self.dataset_name:
            self.dataset = get_dataset(self.dataset_name)
            self.clickhouse = clickhouse_rw

            for statement in self.dataset.get_dataset_schemas().get_drop_statements():
                self.clickhouse.execute(statement.statement)

            for statement in self.dataset.get_dataset_schemas().get_create_statements():
                self.clickhouse.execute(statement.statement)

        redis_client.flushdb()
Esempio n. 12
0
    def teardown_method(self, test_method):
        if self.__dataset_manager:
            self.__dataset_manager.__exit__(None, None, None)

        redis_client.flushdb()
Esempio n. 13
0
    def drop(*, dataset: Dataset) -> Tuple[str, int, Mapping[str, str]]:
        truncate_dataset(dataset)
        redis_client.flushdb()

        return ("ok", 200, {"Content-Type": "text/plain"})
Esempio n. 14
0
def test_notify_queue_ttl() -> None:
    # Tests that waiting clients can be notified of the cache status
    # even with network delays. This test will break if the notify queue
    # TTL is set below 200ms

    pop_calls = 0
    num_waiters = 9

    class DelayedRedisClient:
        def __init__(self, redis_client):
            self._client = redis_client

        def __getattr__(self, attr: str):
            # simulate the queue pop taking longer than expected.
            # the notification queue TTL is 60 seconds so running into a timeout
            # shouldn't happen (unless something has drastically changed in the TTL
            # time or use)
            if attr == "blpop":
                nonlocal pop_calls
                pop_calls += 1
                time.sleep(0.5)
            return getattr(self._client, attr)

    codec = PassthroughCodec()

    delayed_backend: Cache[bytes] = RedisCache(
        cast(RedisClientType, DelayedRedisClient(redis_client)),
        "test",
        codec,
        ThreadPoolExecutor(),
    )
    key = "key"
    try:

        def normal_function() -> bytes:
            # this sleep makes sure that all waiting clients
            # are put into the waiting queue
            time.sleep(0.5)
            return b"hello-cached"

        def normal_function_uncached() -> bytes:
            return b"hello-not-cached"

        def cached_query() -> bytes:
            return delayed_backend.get_readthrough(key, normal_function, noop, 10)

        def uncached_query() -> bytes:
            return delayed_backend.get_readthrough(
                key, normal_function_uncached, noop, 10
            )

        setter = execute(cached_query)
        waiters = []
        time.sleep(0.1)
        for _ in range(num_waiters):
            waiters.append(execute(uncached_query))

        # make sure that all clients actually did hit the cache
        assert setter.result() == b"hello-cached"
        for w in waiters:
            assert w.result() == b"hello-cached"
        # make sure that all the waiters actually did hit the notification queue
        # and didn't just get a direct cache hit
        assert pop_calls == num_waiters
    finally:
        redis_client.flushdb()
Esempio n. 15
0
    def test_query_time_flags_groups(self) -> None:
        """
        Tests errors_replacer.set_project_exclude_groups()

        ReplacementType's are arbitrary, just need to show up in
        getter appropriately once set.
        """
        redis_client.flushdb()
        project_ids = [4, 5, 6]
        errors_replacer.set_project_exclude_groups(
            4, [1, 2], ReplacerState.ERRORS, ReplacementType.EXCLUDE_GROUPS)
        errors_replacer.set_project_exclude_groups(5, [3, 4],
                                                   ReplacerState.ERRORS,
                                                   ReplacementType.START_MERGE)
        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        assert (
            flags.needs_final,
            flags.group_ids_to_exclude,
            flags.replacement_types,
        ) == (
            False,
            {1, 2, 3, 4},
            {ReplacementType.EXCLUDE_GROUPS, ReplacementType.START_MERGE},
        )

        errors_replacer.set_project_exclude_groups(
            4, [1, 2], ReplacerState.ERRORS, ReplacementType.EXCLUDE_GROUPS)
        errors_replacer.set_project_exclude_groups(
            5, [3, 4], ReplacerState.ERRORS, ReplacementType.EXCLUDE_GROUPS)
        errors_replacer.set_project_exclude_groups(
            6, [5, 6], ReplacerState.ERRORS, ReplacementType.START_UNMERGE)

        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        assert (
            flags.needs_final,
            flags.group_ids_to_exclude,
            flags.replacement_types,
        ) == (
            False,
            {1, 2, 3, 4, 5, 6},
            {
                ReplacementType.EXCLUDE_GROUPS,
                # start_merge should show up from previous setter on project id 2
                ReplacementType.START_MERGE,
                ReplacementType.START_UNMERGE,
            },
        )
        flags = ProjectsQueryFlags.load_from_redis([4, 5],
                                                   ReplacerState.ERRORS)
        assert (
            flags.needs_final,
            flags.group_ids_to_exclude,
            flags.replacement_types,
        ) == (
            False,
            {1, 2, 3, 4},
            {ReplacementType.EXCLUDE_GROUPS, ReplacementType.START_MERGE},
        )
        flags = ProjectsQueryFlags.load_from_redis([4], ReplacerState.ERRORS)
        assert (
            flags.needs_final,
            flags.group_ids_to_exclude,
            flags.replacement_types,
        ) == (
            False,
            {1, 2},
            {ReplacementType.EXCLUDE_GROUPS},
        )
Esempio n. 16
0
    def test_latest_replacement_time_by_projects(self) -> None:
        project_ids = [1, 2, 3]
        p = redis_client.pipeline()

        exclude_groups_keys = [
            errors_replacer.ProjectsQueryFlags.
            _build_project_exclude_groups_key_and_type_key(
                project_id, ReplacerState.ERRORS) for project_id in project_ids
        ]

        project_needs_final_keys = [
            errors_replacer.ProjectsQueryFlags.
            _build_project_needs_final_key_and_type_key(
                project_id, ReplacerState.ERRORS) for project_id in project_ids
        ]

        now = datetime.now()

        # No replacements or needs final
        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        assert flags.latest_replacement_time is None

        # All projects need final
        time_offset = 0
        for project_needs_final_key, _ in project_needs_final_keys:
            p.set(project_needs_final_key, now.timestamp() + time_offset)
            time_offset += 10
        p.execute()
        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        expected_time = now + timedelta(seconds=20)
        assert (flags.latest_replacement_time is not None and abs(
            (flags.latest_replacement_time - expected_time).total_seconds()) <
                1)
        redis_client.flushdb()

        # Some projects need final
        time_offset = 0
        for project_needs_final_key, _ in project_needs_final_keys[1:]:
            p.set(project_needs_final_key, now.timestamp() + time_offset)
            time_offset += 10
        p.execute()
        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        expected_time = now + timedelta(seconds=10)
        assert (flags.latest_replacement_time is not None and abs(
            (flags.latest_replacement_time - expected_time).total_seconds()) <
                1)
        redis_client.flushdb()

        # One exclude group per project
        group_id_data_asc: MutableMapping[str, float] = {"1": now.timestamp()}
        for exclude_groups_key, _ in exclude_groups_keys:
            group_id_data_asc["1"] += 10
            to_insert: Mapping[str | bytes, bytes | int | float | str] = {
                "1": group_id_data_asc["1"],
            }  # typing error fix
            p.zadd(exclude_groups_key, to_insert)
        p.execute()
        expected_time = now + timedelta(seconds=30)
        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        assert (flags.latest_replacement_time is not None and abs(
            (flags.latest_replacement_time - expected_time).total_seconds()) <
                1)
        redis_client.flushdb()

        # Multiple exclude groups per project
        group_id_data_multiple: MutableMapping[str, float] = {
            "1": (now + timedelta(seconds=10)).timestamp(),
            "2": now.timestamp(),
        }
        for exclude_groups_key, _ in exclude_groups_keys:
            group_id_data_multiple["1"] -= 10
            group_id_data_multiple["2"] -= 10
            to_insert = {
                "1": group_id_data_multiple["1"],
                "2": group_id_data_multiple["2"],
            }  # typing error fix
            p.zadd(exclude_groups_key, to_insert)
        p.execute()
        expected_time = now
        flags = ProjectsQueryFlags.load_from_redis(project_ids,
                                                   ReplacerState.ERRORS)
        assert (flags.latest_replacement_time is not None and abs(
            (flags.latest_replacement_time - expected_time).total_seconds()) <
                1)
        redis_client.flushdb()
Esempio n. 17
0
    def drop(*, dataset: Dataset) -> RespTuple:
        truncate_dataset(dataset)
        redis_client.flushdb()

        return ("ok", 200, {"Content-Type": "text/plain"})
Esempio n. 18
0
def teardown_function() -> None:
    redis_client.flushdb()
Esempio n. 19
0
    def teardown_method(self, test_method):
        self.clickhouse.execute("DROP TABLE IF EXISTS %s" % self.table)

        redis_client.flushdb()