Exemple #1
0
    def test_dedupe(self):
        try:
            state.set_config("use_query_id", 1)
            state.set_config("use_cache", 1)
            uniq_name = uuid.uuid4().hex[:8]

            def do_request(result_container):
                result = json.loads(
                    self.app.post(
                        "/query",
                        data=json.dumps({
                            "project":
                            1,
                            "granularity":
                            3600,
                            "aggregations": [
                                ["count()", "", uniq_name],
                                ["sleep(0.01)", "", "sleep"],
                            ],
                        }),
                    ).data)
                result_container.append(result)

            # t0 and t1 are exact duplicate queries submitted concurrently.  One of
            # them will execute normally and the other one should be held back by
            # the deduper, until it can use the cached result from the first.
            results = [[] for _ in range(3)]
            t0 = Thread(target=do_request, args=(results[0], ))
            t1 = Thread(target=do_request, args=(results[1], ))
            t0.start()
            t1.start()
            t0.join()
            t1.join()

            # a subsequent request will not be marked as duplicate
            # as we waited for the first 2 to finish
            # it is still fresh
            do_request(results[2])

            results = [r.pop() for r in results]
            # The results should all have the same data
            datas = [r["data"] for r in results]
            assert datas[0] == [{uniq_name: 0, "sleep": 0}]
            assert all(d == datas[0] for d in datas)

            stats = [r["stats"] for r in results]
            # we don't know which order these will execute in, but one
            # of them will be a cached result
            assert stats[0]["cache_hit"] in (True, False)
            assert stats[1]["cache_hit"] in (True, False)
            assert stats[0]["cache_hit"] != stats[1]["cache_hit"]
            # and the cached one should be the one marked as dupe
            assert stats[0]["cache_hit"] == stats[0]["is_duplicate"]
            assert stats[1]["cache_hit"] == stats[1]["is_duplicate"]

            assert stats[2]["is_duplicate"] == False

        finally:
            state.delete_config("use_query_id")
            state.delete_config("use_cache")
def test_query_copying_disallowed() -> None:
    set_config("pipeline-delegator-disallow-query-copy",
               "subscription;subscriptions_executor")

    assert _is_query_copying_disallowed("subscription") is True
    assert _is_query_copying_disallowed("tsdb-modelid:4") is False
    assert _is_query_copying_disallowed("subscriptions_executor") is True

    delete_config("pipeline-delegator-disallow-query-copy")
Exemple #3
0
def delete(*, key: str) -> None:
    "Delete a single key."

    try:
        rv = state.get_raw_configs()[key]
    except KeyError:
        raise click.ClickException(f"Key {key!r} not found.")

    click.echo(human_fmt({key: rv}))
    click.confirm(f"\nAre you sure you want to delete this?", abort=True)

    state.delete_config(key, user=get_user())
Exemple #4
0
def test_cache_wait_timeout() -> None:
    pool = ClickhousePool("localhost", 9000, "", "", "")
    default_reader = NativeDriverReader(None, pool)
    tiger_errors_reader = NativeDriverReader("tiger_errors", pool)
    tiger_transactions_reader = NativeDriverReader("tiger_transactions", pool)

    query_settings = {"max_execution_time": 30}
    assert _get_cache_wait_timeout(query_settings, default_reader) == 30
    assert _get_cache_wait_timeout(query_settings, tiger_errors_reader) == 30
    assert _get_cache_wait_timeout(query_settings,
                                   tiger_transactions_reader) == 30

    state.set_config("tiger-cache-wait-time", 60)
    assert _get_cache_wait_timeout(query_settings, default_reader) == 30
    assert _get_cache_wait_timeout(query_settings, tiger_errors_reader) == 60
    assert _get_cache_wait_timeout(query_settings,
                                   tiger_transactions_reader) == 60

    state.delete_config("tiger-cache-wait-time")
Exemple #5
0
 def teardown_method(self, test_method: Any) -> None:
     # Reset rate limits
     state.delete_config("global_concurrent_limit")
     state.delete_config("global_per_second_limit")
     state.delete_config("project_concurrent_limit")
     state.delete_config("project_concurrent_limit_1")
     state.delete_config("project_per_second_limit")
     state.delete_config("date_align_seconds")
 def teardown_method(self, test_method):
     # Reset rate limits
     state.delete_config('global_concurrent_limit')
     state.delete_config('global_per_second_limit')
     state.delete_config('project_concurrent_limit')
     state.delete_config('project_concurrent_limit_1')
     state.delete_config('project_per_second_limit')
     state.delete_config('date_align_seconds')
Exemple #7
0
def teardown_function(_: Callable[..., Any]) -> None:
    state.delete_config("use_fallback_host_in_native_connection_pool")
    state.delete_config(f"fallback_hosts:{CLUSTER_HOST}:{CLUSTER_PORT}")
Exemple #8
0
def config(config_key: str) -> Response:
    if request.method == "DELETE":
        user = request.headers.get(USER_HEADER_KEY)

        # Get the old value for notifications
        old = state.get_uncached_config(config_key)

        state.delete_config(config_key, user=user)

        if request.args.get("keepDescription") is None:
            state.delete_config_description(config_key, user=user)

        notification_client.notify(
            RuntimeConfigAction.REMOVED,
            {
                "option": config_key,
                "old": old,
                "new": None
            },
            user,
        )

        return Response("", 200)

    else:
        # PUT currently only supports editing existing config when old and
        # new types match. Does not currently support passing force to
        # set_config to override the type check.

        user = request.headers.get(USER_HEADER_KEY)
        data = json.loads(request.data)

        # Get the previous value for notifications
        old = state.get_uncached_config(config_key)

        try:
            new_value = data["value"]
            new_desc = data["description"]

            assert isinstance(config_key, str), "Invalid key"
            assert isinstance(new_value, str), "Invalid value"
            assert config_key != "", "Key cannot be empty string"

            state.set_config(
                config_key,
                new_value,
                user=user,
            )
            state.set_config_description(config_key, new_desc, user=user)

        except (KeyError, AssertionError) as exc:
            return Response(
                json.dumps({"error": f"Invalid config: {str(exc)}"}),
                400,
                {"Content-Type": "application/json"},
            )
        except (state.MismatchedTypeException):
            return Response(
                json.dumps({"error": "Mismatched type"}),
                400,
                {"Content-Type": "application/json"},
            )

        # Value was updated successfully, refetch and return it
        evaluated_value = state.get_uncached_config(config_key)
        assert evaluated_value is not None
        evaluated_type = get_config_type_from_value(evaluated_value)

        # Send notification
        notification_client.notify(
            RuntimeConfigAction.UPDATED,
            {
                "option": config_key,
                "old": old,
                "new": evaluated_value
            },
            user=user,
        )

        config = {
            "key": config_key,
            "value": str(evaluated_value),
            "description": state.get_config_description(config_key),
            "type": evaluated_type,
        }

        return Response(json.dumps(config), 200,
                        {"Content-Type": "application/json"})
Exemple #9
0
def teardown_common() -> None:
    # Reset rate limits
    state.delete_config("global_concurrent_limit")
    state.delete_config("global_per_second_limit")
    state.delete_config("project_concurrent_limit")
    state.delete_config("project_concurrent_limit_1")
    state.delete_config("project_per_second_limit")
    state.delete_config("date_align_seconds")