Ejemplo n.º 1
0
    def test_config(self):
        state.set_config('foo', 1)
        state.set_configs({'bar': 2, 'baz': 3})
        assert state.get_config('foo') == 1
        assert state.get_config('bar') == 2
        assert state.get_config('noexist', 4) == 4
        all_configs = state.get_all_configs()
        assert all(all_configs[k] == v
                   for k, v in [('foo', 1), ('bar', 2), ('baz', 3)])
        assert state.get_configs([('foo', 100), ('bar', 200), ('noexist', 300),
                                  ('noexist-2', None)]) == [1, 2, 300, None]

        state.set_configs({'bar': 'quux'})
        all_configs = state.get_all_configs()
        assert all(all_configs[k] == v
                   for k, v in [('foo', 1), ('bar', 'quux'), ('baz', 3)])
Ejemplo n.º 2
0
    def test_config(self):
        state.set_config("foo", 1)
        state.set_configs({"bar": 2, "baz": 3})
        assert state.get_config("foo") == 1
        assert state.get_config("bar") == 2
        assert state.get_config("noexist", 4) == 4
        all_configs = state.get_all_configs()
        assert all(all_configs[k] == v
                   for k, v in [("foo", 1), ("bar", 2), ("baz", 3)])
        assert state.get_configs([("foo", 100), ("bar", 200), ("noexist", 300),
                                  ("noexist-2", None)]) == [1, 2, 300, None]

        state.set_configs({"bar": "quux"})
        all_configs = state.get_all_configs()
        assert all(all_configs[k] == v
                   for k, v in [("foo", 1), ("bar", "quux"), ("baz", 3)])
Ejemplo n.º 3
0
def raw_query(request: Request, sql, client, timer, stats=None):
    """
    Submit a raw SQL query to clickhouse and do some post-processing on it to
    fix some of the formatting issues in the result JSON
    """
    from snuba.clickhouse.native import NativeDriverReader

    project_ids = to_list(request.extensions['project']['project'])
    project_id = project_ids[
        0] if project_ids else 0  # TODO rate limit on every project in the list?
    stats = stats or {}
    grl, gcl, prl, pcl, use_cache, use_deduper, uc_max = state.get_configs([
        ('global_per_second_limit', None),
        ('global_concurrent_limit', 1000),
        ('project_per_second_limit', 1000),
        ('project_concurrent_limit', 1000),
        ('use_cache', 0),
        ('use_deduper', 1),
        ('uncompressed_cache_max_cols', 5),
    ])

    # Specific projects can have their rate limits overridden
    prl, pcl = state.get_configs([
        ('project_per_second_limit_{}'.format(project_id), prl),
        ('project_concurrent_limit_{}'.format(project_id), pcl),
    ])

    all_confs = state.get_all_configs()
    query_settings = {
        k.split('/', 1)[1]: v
        for k, v in all_confs.items() if k.startswith('query_settings/')
    }

    # Experiment, if we are going to grab more than X columns worth of data,
    # don't use uncompressed_cache in clickhouse, or result cache in snuba.
    if len(all_referenced_columns(request.query.get_body())) > uc_max:
        query_settings['use_uncompressed_cache'] = 0
        use_cache = 0

    timer.mark('get_configs')

    query_id = md5(force_bytes(sql)).hexdigest()
    with state.deduper(query_id if use_deduper else None) as is_dupe:
        timer.mark('dedupe_wait')

        result = state.get_result(query_id) if use_cache else None
        timer.mark('cache_get')

        stats.update({
            'is_duplicate': is_dupe,
            'query_id': query_id,
            'use_cache': bool(use_cache),
            'cache_hit': bool(result)
        }),

        if result:
            status = 200
        else:
            with state.rate_limit('global', grl,
                                  gcl) as (g_allowed, g_rate, g_concurr):
                metrics.gauge('query.global_concurrent', g_concurr)
                stats.update({
                    'global_rate': g_rate,
                    'global_concurrent': g_concurr
                })

                with state.rate_limit(project_id, prl,
                                      pcl) as (p_allowed, p_rate, p_concurr):
                    stats.update({
                        'project_rate': p_rate,
                        'project_concurrent': p_concurr
                    })
                    timer.mark('rate_limit')

                    if g_allowed and p_allowed:

                        # Experiment, reduce max threads by 1 for each extra concurrent query
                        # that a project has running beyond the first one
                        if 'max_threads' in query_settings and p_concurr > 1:
                            maxt = query_settings['max_threads']
                            query_settings['max_threads'] = max(
                                1, maxt - p_concurr + 1)

                        # Force query to use the first shard replica, which
                        # should have synchronously received any cluster writes
                        # before this query is run.
                        consistent = request.extensions['performance'].get(
                            'consistent', False)
                        stats['consistent'] = consistent
                        if consistent:
                            query_settings['load_balancing'] = 'in_order'
                            query_settings['max_threads'] = 1

                        try:
                            result = NativeDriverReader(client).execute(
                                sql,
                                query_settings,
                                # All queries should already be deduplicated at this point
                                # But the query_id will let us know if they aren't
                                query_id=query_id if use_deduper else None,
                                with_totals=request.query.get_body().get(
                                    'totals', False),
                            )
                            status = 200

                            logger.debug(sql)
                            timer.mark('execute')
                            stats.update({
                                'result_rows': len(result['data']),
                                'result_cols': len(result['meta']),
                            })

                            if use_cache:
                                state.set_result(query_id, result)
                                timer.mark('cache_set')

                        except BaseException as ex:
                            error = str(ex)
                            status = 500
                            logger.exception("Error running query: %s\n%s",
                                             sql, error)
                            if isinstance(ex, ClickHouseError):
                                result = {
                                    'error': {
                                        'type': 'clickhouse',
                                        'code': ex.code,
                                        'message': error,
                                    }
                                }
                            else:
                                result = {
                                    'error': {
                                        'type': 'unknown',
                                        'message': error,
                                    }
                                }

                    else:
                        status = 429
                        Reason = namedtuple('reason', 'scope name val limit')
                        reasons = [
                            Reason('global', 'concurrent', g_concurr, gcl),
                            Reason('global', 'per-second', g_rate, grl),
                            Reason('project', 'concurrent', p_concurr, pcl),
                            Reason('project', 'per-second', p_rate, prl)
                        ]
                        reason = next(
                            (r for r in reasons
                             if r.limit is not None and r.val > r.limit), None)
                        result = {
                            'error': {
                                'type':
                                'ratelimit',
                                'message':
                                'rate limit exceeded',
                                'detail':
                                reason and
                                '{r.scope} {r.name} of {r.val:.0f} exceeds limit of {r.limit:.0f}'
                                .format(r=reason)
                            }
                        }

    stats.update(query_settings)

    if settings.RECORD_QUERIES:
        # send to redis
        state.record_query({
            'request': request.body,
            'sql': sql,
            'timing': timer,
            'stats': stats,
            'status': status,
        })

        # send to datadog
        tags = [
            'status:{}'.format(status),
            'referrer:{}'.format(stats.get('referrer', 'none')),
            'final:{}'.format(stats.get('final', False))
        ]
        mark_tags = ['final:{}'.format(stats.get('final', False))]
        timer.send_metrics_to(metrics, tags=tags, mark_tags=mark_tags)

    result['timing'] = timer

    if settings.STATS_IN_RESPONSE or request.extensions['performance'].get(
            'debug', False):
        result['stats'] = stats
        result['sql'] = sql

    return (result, status)
Ejemplo n.º 4
0
def raw_query(
    # TODO: Passing the whole clickhouse query here is needed as long
    # as the execute method depends on it. Otherwise we can make this
    # file rely either entirely on clickhouse query or entirely on
    # the formatter.
    clickhouse_query: Union[Query, CompositeQuery[Table]],
    request_settings: RequestSettings,
    formatted_query: FormattedQuery,
    reader: Reader,
    timer: Timer,
    query_metadata: SnubaQueryMetadata,
    stats: MutableMapping[str, Any],
    trace_id: Optional[str] = None,
) -> QueryResult:
    """
    Submits a raw SQL query to the DB and does some post-processing on it to
    fix some of the formatting issues in the result JSON.
    This function is not supposed to depend on anything higher level than the clickhouse
    query. If this function ends up depending on the dataset, something is wrong.
    """
    all_confs = state.get_all_configs()
    query_settings: MutableMapping[str, Any] = {
        k.split("/", 1)[1]: v
        for k, v in all_confs.items() if k.startswith("query_settings/")
    }

    timer.mark("get_configs")

    sql = formatted_query.get_sql()

    update_with_status = partial(
        update_query_metadata_and_stats,
        clickhouse_query,
        sql,
        timer,
        stats,
        query_metadata,
        query_settings,
        trace_id,
    )

    execute_query_strategy = (
        execute_query_with_readthrough_caching if state.get_config(
            "use_readthrough_query_cache", 1) else execute_query_with_caching)

    try:
        result = execute_query_strategy(
            clickhouse_query,
            request_settings,
            formatted_query,
            reader,
            timer,
            stats,
            query_settings,
        )
    except Exception as cause:
        if isinstance(cause, RateLimitExceeded):
            stats = update_with_status(QueryStatus.RATE_LIMITED)
        else:
            with configure_scope() as scope:
                if isinstance(cause, ClickhouseError):
                    scope.fingerprint = ["{{default}}", str(cause.code)]
                logger.exception("Error running query: %s\n%s", sql, cause)
            stats = update_with_status(QueryStatus.ERROR)
        raise QueryException({"stats": stats, "sql": sql}) from cause
    else:
        stats = update_with_status(QueryStatus.SUCCESS)
        return QueryResult(result, {"stats": stats, "sql": sql})
Ejemplo n.º 5
0
def raw_query(body, sql, client, timer, stats=None):
    """
    Submit a raw SQL query to clickhouse and do some post-processing on it to
    fix some of the formatting issues in the result JSON
    """
    project_ids = to_list(body['project'])
    project_id = project_ids[0] if project_ids else 0  # TODO rate limit on every project in the list?
    stats = stats or {}
    grl, gcl, prl, pcl, use_cache = state.get_configs([
        ('global_per_second_limit', 1000),
        ('global_concurrent_limit', 1000),
        ('project_per_second_limit', 1000),
        ('project_concurrent_limit', 1000),
        ('use_cache', 0),
    ])

    # Specific projects can have their rate limits overridden
    prl, pcl = state.get_configs([
        ('project_per_second_limit_{}'.format(project_id), prl),
        ('project_concurrent_limit_{}'.format(project_id), pcl),
    ])

    all_confs = state.get_all_configs()
    query_settings = {
        k.split('/', 1)[1]: v
        for k, v in six.iteritems(all_confs)
        if k.startswith('query_settings/')
    }

    timer.mark('get_configs')

    query_id = md5(force_bytes(sql)).hexdigest()
    with state.deduper(query_id) as is_dupe:
        timer.mark('dedupe_wait')

        result = state.get_result(query_id) if use_cache else None
        timer.mark('cache_get')

        stats.update({
            'is_duplicate': is_dupe,
            'query_id': query_id,
            'use_cache': bool(use_cache),
            'cache_hit': bool(result)}
        ),

        if result:
            status = 200
        else:
            with state.rate_limit('global', grl, gcl) as (g_allowed, g_rate, g_concurr):
                metrics.gauge('query.global_concurrent', g_concurr)
                stats.update({'global_rate': g_rate, 'global_concurrent': g_concurr})

                with state.rate_limit(project_id, prl, pcl) as (p_allowed, p_rate, p_concurr):
                    stats.update({'project_rate': p_rate, 'project_concurrent': p_concurr})
                    timer.mark('rate_limit')

                    if g_allowed and p_allowed:

                        # Experiment, reduce max threads by 1 for each extra concurrent query
                        # that a project has running beyond the first one
                        if 'max_threads' in query_settings and p_concurr > 1:
                            maxt = query_settings['max_threads']
                            query_settings['max_threads'] = max(1, maxt - p_concurr + 1)

                        # Force query to use the first shard replica, which
                        # should have synchronously received any cluster writes
                        # before this query is run.
                        consistent = body.get('consistent', False)
                        stats['consistent'] = consistent
                        if consistent:
                            query_settings['load_balancing'] = 'in_order'
                            query_settings['max_threads'] = 1

                        try:
                            data, meta = client.execute(
                                sql,
                                with_column_types=True,
                                settings=query_settings,
                                # All queries should already be deduplicated at this point
                                # But the query_id will let us know if they aren't
                                query_id=query_id
                            )
                            data, meta = scrub_ch_data(data, meta)
                            status = 200
                            if body.get('totals', False):
                                assert len(data) > 0
                                data, totals = data[:-1], data[-1]
                                result = {'data': data, 'meta': meta, 'totals': totals}
                            else:
                                result = {'data': data, 'meta': meta}

                            logger.debug(sql)
                            timer.mark('execute')
                            stats.update({
                                'result_rows': len(data),
                                'result_cols': len(meta),
                            })

                            if use_cache:
                                state.set_result(query_id, result)
                                timer.mark('cache_set')

                        except BaseException as ex:
                            error = six.text_type(ex)
                            status = 500
                            logger.exception("Error running query: %s\n%s", sql, error)
                            if isinstance(ex, ClickHouseError):
                                result = {'error': {
                                    'type': 'clickhouse',
                                    'code': ex.code,
                                    'message': error,
                                }}
                            else:
                                result = {'error': {
                                    'type': 'unknown',
                                    'message': error,
                                }}

                    else:
                        status = 429
                        reasons = [
                            ('global', 'concurrent', g_concurr, gcl),
                            ('global', 'per-second', g_rate, grl),
                            ('project', 'concurrent', p_concurr, pcl),
                            ('project', 'per-second', p_rate, prl)
                        ]
                        reason = next((r for r in reasons if r[2] > r[3]), None)
                        result = {'error': {
                            'type': 'ratelimit',
                            'message': 'rate limit exceeded',
                            'detail': reason and '{} {} of {:.0f} exceeds limit of {:.0f}'.format(*reason)
                        }}

    stats.update(query_settings)

    if settings.RECORD_QUERIES:
        # send to redis
        state.record_query({
            'request': body,
            'sql': sql,
            'timing': timer,
            'stats': stats,
            'status': status,
        })

        # send to datadog
        tags = [
            'status:{}'.format(status),
            'referrer:{}'.format(stats.get('referrer', 'none')),
            'final:{}'.format(stats.get('final', False))
        ]
        mark_tags = [
            'final:{}'.format(stats.get('final', False))
        ]
        timer.send_metrics_to(metrics, tags=tags, mark_tags=mark_tags)

    result['timing'] = timer

    if settings.STATS_IN_RESPONSE or body.get('debug', False):
        result['stats'] = stats
        result['sql'] = sql

    return (result, status)
Ejemplo n.º 6
0
def raw_query(
    request: Request,
    query: DictClickhouseQuery,
    reader: Reader[ClickhouseQuery],
    timer: Timer,
    stats: Optional[MutableMapping[str, Any]] = None,
) -> ClickhouseQueryResult:
    """
    Submit a raw SQL query to clickhouse and do some post-processing on it to
    fix some of the formatting issues in the result JSON
    """

    stats = stats or {}
    use_cache, use_deduper, uc_max = state.get_configs(
        [("use_cache", 0), ("use_deduper", 1), ("uncompressed_cache_max_cols", 5)]
    )

    all_confs = state.get_all_configs()
    query_settings = {
        k.split("/", 1)[1]: v
        for k, v in all_confs.items()
        if k.startswith("query_settings/")
    }

    # Experiment, if we are going to grab more than X columns worth of data,
    # don't use uncompressed_cache in clickhouse, or result cache in snuba.
    if len(request.query.get_all_referenced_columns()) > uc_max:
        query_settings["use_uncompressed_cache"] = 0
        use_cache = 0

    timer.mark("get_configs")

    sql = query.format_sql()
    query_id = md5(force_bytes(sql)).hexdigest()
    with state.deduper(query_id if use_deduper else None) as is_dupe:
        timer.mark("dedupe_wait")

        result = cache.get(query_id) if use_cache else None
        timer.mark("cache_get")

        stats.update(
            {
                "is_duplicate": is_dupe,
                "query_id": query_id,
                "use_cache": bool(use_cache),
                "cache_hit": bool(result),
            }
        ),

        if not result:
            try:
                with RateLimitAggregator(
                    request.settings.get_rate_limit_params()
                ) as rate_limit_stats_container:
                    stats.update(rate_limit_stats_container.to_dict())
                    timer.mark("rate_limit")

                    project_rate_limit_stats = rate_limit_stats_container.get_stats(
                        PROJECT_RATE_LIMIT_NAME
                    )

                    if (
                        "max_threads" in query_settings
                        and project_rate_limit_stats is not None
                        and project_rate_limit_stats.concurrent > 1
                    ):
                        maxt = query_settings["max_threads"]
                        query_settings["max_threads"] = max(
                            1, maxt - project_rate_limit_stats.concurrent + 1
                        )

                    # Force query to use the first shard replica, which
                    # should have synchronously received any cluster writes
                    # before this query is run.
                    consistent = request.settings.get_consistent()
                    stats["consistent"] = consistent
                    if consistent:
                        query_settings["load_balancing"] = "in_order"
                        query_settings["max_threads"] = 1

                    try:
                        result = reader.execute(
                            query,
                            query_settings,
                            # All queries should already be deduplicated at this point
                            # But the query_id will let us know if they aren't
                            query_id=query_id if use_deduper else None,
                            with_totals=request.query.has_totals(),
                        )

                        timer.mark("execute")
                        stats.update(
                            {
                                "result_rows": len(result["data"]),
                                "result_cols": len(result["meta"]),
                            }
                        )

                        if use_cache:
                            cache.set(query_id, result)
                            timer.mark("cache_set")

                    except BaseException as ex:
                        error = str(ex)
                        logger.exception("Error running query: %s\n%s", sql, error)
                        stats = log_query_and_update_stats(
                            request, sql, timer, stats, "error", query_settings
                        )
                        meta = {}
                        if isinstance(ex, ClickHouseError):
                            err_type = "clickhouse"
                            meta["code"] = ex.code
                        else:
                            err_type = "unknown"
                        raise RawQueryException(
                            err_type=err_type,
                            message=error,
                            stats=stats,
                            sql=sql,
                            **meta,
                        )
            except RateLimitExceeded as ex:
                stats = log_query_and_update_stats(
                    request, sql, timer, stats, "rate-limited", query_settings
                )
                raise RawQueryException(
                    err_type="rate-limited",
                    message="rate limit exceeded",
                    stats=stats,
                    sql=sql,
                    detail=str(ex),
                )

    stats = log_query_and_update_stats(
        request, sql, timer, stats, "success", query_settings
    )

    if settings.STATS_IN_RESPONSE or request.settings.get_debug():
        result["stats"] = stats
        result["sql"] = sql

    return result
Ejemplo n.º 7
0
def raw_query(
    request: Request,
    query: ClickhouseQuery,
    client: ClickhousePool,
    timer: Timer,
    stats=None,
) -> QueryResult:
    """
    Submit a raw SQL query to clickhouse and do some post-processing on it to
    fix some of the formatting issues in the result JSON
    """
    from snuba.clickhouse.native import NativeDriverReader

    stats = stats or {}
    use_cache, use_deduper, uc_max = state.get_configs([
        ('use_cache', 0),
        ('use_deduper', 1),
        ('uncompressed_cache_max_cols', 5),
    ])

    all_confs = state.get_all_configs()
    query_settings = {
        k.split('/', 1)[1]: v
        for k, v in all_confs.items() if k.startswith('query_settings/')
    }

    # Experiment, if we are going to grab more than X columns worth of data,
    # don't use uncompressed_cache in clickhouse, or result cache in snuba.
    if len(all_referenced_columns(request.query)) > uc_max:
        query_settings['use_uncompressed_cache'] = 0
        use_cache = 0

    timer.mark('get_configs')

    sql = query.format_sql()
    query_id = md5(force_bytes(sql)).hexdigest()
    with state.deduper(query_id if use_deduper else None) as is_dupe:
        timer.mark('dedupe_wait')

        result = state.get_result(query_id) if use_cache else None
        timer.mark('cache_get')

        stats.update({
            'is_duplicate': is_dupe,
            'query_id': query_id,
            'use_cache': bool(use_cache),
            'cache_hit': bool(result)
        }),

        if result:
            status = 200
        else:
            try:
                with RateLimitAggregator(
                        request.settings.get_rate_limit_params(
                        )) as rate_limit_stats_container:
                    stats.update(rate_limit_stats_container.to_dict())
                    timer.mark('rate_limit')

                    project_rate_limit_stats = rate_limit_stats_container.get_stats(
                        PROJECT_RATE_LIMIT_NAME)

                    if 'max_threads' in query_settings and \
                            project_rate_limit_stats is not None and \
                            project_rate_limit_stats.concurrent > 1:
                        maxt = query_settings['max_threads']
                        query_settings['max_threads'] = max(
                            1, maxt - project_rate_limit_stats.concurrent + 1)

                    # Force query to use the first shard replica, which
                    # should have synchronously received any cluster writes
                    # before this query is run.
                    consistent = request.settings.get_consistent()
                    stats['consistent'] = consistent
                    if consistent:
                        query_settings['load_balancing'] = 'in_order'
                        query_settings['max_threads'] = 1

                    try:
                        result = NativeDriverReader(client).execute(
                            query,
                            query_settings,
                            # All queries should already be deduplicated at this point
                            # But the query_id will let us know if they aren't
                            query_id=query_id if use_deduper else None,
                            with_totals=request.query.has_totals(),
                        )
                        status = 200

                        logger.debug(sql)
                        timer.mark('execute')
                        stats.update({
                            'result_rows': len(result['data']),
                            'result_cols': len(result['meta']),
                        })

                        if use_cache:
                            state.set_result(query_id, result)
                            timer.mark('cache_set')

                    except BaseException as ex:
                        error = str(ex)
                        status = 500
                        logger.exception("Error running query: %s\n%s", sql,
                                         error)
                        if isinstance(ex, ClickHouseError):
                            result = {
                                'error': {
                                    'type': 'clickhouse',
                                    'code': ex.code,
                                    'message': error,
                                }
                            }
                        else:
                            result = {
                                'error': {
                                    'type': 'unknown',
                                    'message': error,
                                }
                            }

            except RateLimitExceeded as ex:
                error = str(ex)
                status = 429
                result = {
                    'error': {
                        'type': 'ratelimit',
                        'message': 'rate limit exceeded',
                        'detail': error
                    }
                }

    stats.update(query_settings)

    if settings.RECORD_QUERIES:
        # send to redis
        state.record_query({
            'request': request.body,
            'sql': sql,
            'timing': timer,
            'stats': stats,
            'status': status,
        })

        timer.send_metrics_to(metrics,
                              tags={
                                  'status': str(status),
                                  'referrer': stats.get('referrer', 'none'),
                                  'final': str(stats.get('final', False)),
                              },
                              mark_tags={
                                  'final': str(stats.get('final', False)),
                              })

    result['timing'] = timer

    if settings.STATS_IN_RESPONSE or request.settings.get_debug():
        result['stats'] = stats
        result['sql'] = sql

    return QueryResult(result, status)
Ejemplo n.º 8
0
def raw_query(
    request: Request,
    query: ClickhouseQuery,
    timer: Timer,
    query_metadata: SnubaQueryMetadata,
    stats: MutableMapping[str, Any],
    trace_id: Optional[str] = None,
) -> RawQueryResult:
    """
    Submits a raw SQL query to the DB and does some post-processing on it to
    fix some of the formatting issues in the result JSON.
    This function is not supposed to depend on anything higher level than the storage
    query (ClickhouseQuery as of now). If this function ends up depending on the
    dataset, something is wrong.

    TODO: As soon as we have a StorageQuery abstraction remove all the references
    to the original query from the request.
    """

    use_cache, use_deduper, uc_max = state.get_configs([
        ("use_cache", settings.USE_RESULT_CACHE),
        ("use_deduper", 1),
        ("uncompressed_cache_max_cols", 5),
    ])

    all_confs = state.get_all_configs()
    query_settings: MutableMapping[str, Any] = {
        k.split("/", 1)[1]: v
        for k, v in all_confs.items() if k.startswith("query_settings/")
    }

    # Experiment, if we are going to grab more than X columns worth of data,
    # don't use uncompressed_cache in clickhouse, or result cache in snuba.
    if len(request.query.get_all_referenced_columns()) > uc_max:
        query_settings["use_uncompressed_cache"] = 0
        use_cache = 0

    timer.mark("get_configs")

    sql = query.format_sql()
    query_id = md5(force_bytes(sql)).hexdigest()
    with state.deduper(query_id if use_deduper else None) as is_dupe:
        timer.mark("dedupe_wait")

        result = cache.get(query_id) if use_cache else None
        timer.mark("cache_get")

        stats.update({
            "is_duplicate": is_dupe,
            "query_id": query_id,
            "use_cache": bool(use_cache),
            "cache_hit": bool(result),
        }),

        update_with_status = partial(
            update_query_metadata_and_stats,
            request,
            sql,
            timer,
            stats,
            query_metadata,
            query_settings,
            trace_id,
        )

        if not result:
            try:
                with RateLimitAggregator(
                        request.settings.get_rate_limit_params(
                        )) as rate_limit_stats_container:
                    stats.update(rate_limit_stats_container.to_dict())
                    timer.mark("rate_limit")

                    project_rate_limit_stats = rate_limit_stats_container.get_stats(
                        PROJECT_RATE_LIMIT_NAME)

                    if ("max_threads" in query_settings
                            and project_rate_limit_stats is not None
                            and project_rate_limit_stats.concurrent > 1):
                        maxt = query_settings["max_threads"]
                        query_settings["max_threads"] = max(
                            1, maxt - project_rate_limit_stats.concurrent + 1)

                    # Force query to use the first shard replica, which
                    # should have synchronously received any cluster writes
                    # before this query is run.
                    consistent = request.settings.get_consistent()
                    stats["consistent"] = consistent
                    if consistent:
                        query_settings["load_balancing"] = "in_order"
                        query_settings["max_threads"] = 1

                    try:
                        result = reader.execute(
                            query,
                            query_settings,
                            # All queries should already be deduplicated at this point
                            # But the query_id will let us know if they aren't
                            query_id=query_id if use_deduper else None,
                            with_totals=request.query.has_totals(),
                        )

                        timer.mark("execute")
                        stats.update({
                            "result_rows": len(result["data"]),
                            "result_cols": len(result["meta"]),
                        })

                        if use_cache:
                            cache.set(query_id, result)
                            timer.mark("cache_set")

                    except BaseException as ex:
                        error = str(ex)
                        logger.exception("Error running query: %s\n%s", sql,
                                         error)
                        stats = update_with_status("error")
                        meta = {}
                        if isinstance(ex, ClickhouseError):
                            err_type = "clickhouse"
                            meta["code"] = ex.code
                        else:
                            err_type = "unknown"
                        raise RawQueryException(
                            err_type=err_type,
                            message=error,
                            stats=stats,
                            sql=sql,
                            **meta,
                        )
            except RateLimitExceeded as ex:
                stats = update_with_status("rate-limited")
                raise RawQueryException(
                    err_type="rate-limited",
                    message="rate limit exceeded",
                    stats=stats,
                    sql=sql,
                    detail=str(ex),
                )

    stats = update_with_status("success")

    return RawQueryResult(result, {"stats": stats, "sql": sql})
Ejemplo n.º 9
0
def raw_query(
    # TODO: Passing the whole clickhouse query here is needed as long
    # as the execute method depends on it. Otherwise we can make this
    # file rely either entirely on clickhouse query or entirely on
    # the formatter.
    clickhouse_query: Union[Query, CompositeQuery[Table]],
    query_settings: QuerySettings,
    formatted_query: FormattedQuery,
    reader: Reader,
    timer: Timer,
    query_metadata: SnubaQueryMetadata,
    stats: MutableMapping[str, Any],
    trace_id: Optional[str] = None,
    robust: bool = False,
) -> QueryResult:
    """
    Submits a raw SQL query to the DB and does some post-processing on it to
    fix some of the formatting issues in the result JSON.
    This function is not supposed to depend on anything higher level than the clickhouse
    query. If this function ends up depending on the dataset, something is wrong.
    """
    all_confs = state.get_all_configs()
    clickhouse_query_settings: MutableMapping[str, Any] = {
        k.split("/", 1)[1]: v
        for k, v in all_confs.items()
        if k.startswith("query_settings/")
    }

    timer.mark("get_configs")

    sql = formatted_query.get_sql()

    update_with_status = partial(
        update_query_metadata_and_stats,
        clickhouse_query,
        sql,
        timer,
        stats,
        query_metadata,
        clickhouse_query_settings,
        trace_id,
    )

    execute_query_strategy = (
        execute_query_with_readthrough_caching
        if state.get_config("use_readthrough_query_cache", 1)
        else execute_query_with_caching
    )

    try:
        result = execute_query_strategy(
            clickhouse_query,
            query_settings,
            formatted_query,
            reader,
            timer,
            stats,
            clickhouse_query_settings,
            robust=robust,
        )
    except Exception as cause:
        if isinstance(cause, RateLimitExceeded):
            stats = update_with_status(QueryStatus.RATE_LIMITED)
        else:
            error_code = None
            with configure_scope() as scope:
                if isinstance(cause, ClickhouseError):
                    error_code = cause.code
                    scope.fingerprint = ["{{default}}", str(cause.code)]
                    if scope.span:
                        if cause.code == errors.ErrorCodes.TOO_SLOW:
                            sentry_sdk.set_tag("timeout", "predicted")
                        elif cause.code == errors.ErrorCodes.TIMEOUT_EXCEEDED:
                            sentry_sdk.set_tag("timeout", "query_timeout")
                        elif cause.code in (
                            errors.ErrorCodes.SOCKET_TIMEOUT,
                            errors.ErrorCodes.NETWORK_ERROR,
                        ):
                            sentry_sdk.set_tag("timeout", "network")
                elif isinstance(
                    cause,
                    (TimeoutError, ExecutionTimeoutError, TigerExecutionTimeoutError),
                ):
                    if scope.span:
                        sentry_sdk.set_tag("timeout", "cache_timeout")

                logger.exception("Error running query: %s\n%s", sql, cause)
            stats = update_with_status(QueryStatus.ERROR, error_code=error_code)
        raise QueryException(
            {
                "stats": stats,
                "sql": sql,
                "experiments": clickhouse_query.get_experiments(),
            }
        ) from cause
    else:
        stats = update_with_status(QueryStatus.SUCCESS, result["profile"])
        return QueryResult(
            result,
            {
                "stats": stats,
                "sql": sql,
                "experiments": clickhouse_query.get_experiments(),
            },
        )