Пример #1
0
 def do_post_processing(
     self,
     project_ids: Sequence[int],
     query: Query,
     request_settings: RequestSettings,
 ) -> None:
     if not request_settings.get_turbo():
         final, exclude_group_ids = get_projects_query_flags(
             project_ids, self.__replacer_state_name)
         if not final and exclude_group_ids:
             # If the number of groups to exclude exceeds our limit, the query
             # should just use final instead of the exclusion set.
             max_group_ids_exclude = get_config(
                 "max_group_ids_exclude",
                 settings.REPLACER_MAX_GROUP_IDS_TO_EXCLUDE)
             if len(exclude_group_ids) > max_group_ids_exclude:
                 query.set_final(True)
             else:
                 query.add_conditions([(["assumeNotNull", ["group_id"]],
                                        "NOT IN", exclude_group_ids)])
                 query.add_condition_to_ast(
                     not_in_condition(
                         None,
                         FunctionCall(None, "assumeNotNull",
                                      (Column(None, "group_id", None), )),
                         [Literal(None, p) for p in exclude_group_ids],
                     ))
         else:
             query.set_final(final)
Пример #2
0
def _apply_turbo_sampling_if_needed(
    clickhouse_query: Union[Query, CompositeQuery[Table]],
    request_settings: RequestSettings,
) -> None:
    """
    TODO: Remove this method entirely and move the sampling logic
    into a query processor.
    """
    if isinstance(clickhouse_query, Query):
        if (request_settings.get_turbo()
                and not clickhouse_query.get_from_clause().sampling_rate):
            clickhouse_query.set_from_clause(
                replace(
                    clickhouse_query.get_from_clause(),
                    sampling_rate=snuba_settings.TURBO_SAMPLE_RATE,
                ))
Пример #3
0
    def process_query(self, query: Query,
                      request_settings: RequestSettings) -> None:
        if request_settings.get_turbo():
            return

        project_ids = get_project_ids_in_query_ast(query,
                                                   self.__project_column)

        set_final = False
        condition_to_add = None
        if project_ids:
            final, exclude_group_ids = get_projects_query_flags(
                list(project_ids),
                self.__replacer_state_name,
            )
            if final:
                metrics.increment("final", tags={"cause": "final_flag"})
            if not final and exclude_group_ids:
                # If the number of groups to exclude exceeds our limit, the query
                # should just use final instead of the exclusion set.
                max_group_ids_exclude = get_config(
                    "max_group_ids_exclude",
                    settings.REPLACER_MAX_GROUP_IDS_TO_EXCLUDE)
                if len(exclude_group_ids) > max_group_ids_exclude:
                    metrics.increment("final", tags={"cause": "max_groups"})
                    set_final = True
                else:
                    condition_to_add = (
                        ["assumeNotNull", ["group_id"]],
                        "NOT IN",
                        exclude_group_ids,
                    )
                    query.add_condition_to_ast(
                        not_in_condition(
                            None,
                            FunctionCall(None, "assumeNotNull",
                                         (Column(None, None, "group_id"), )),
                            [Literal(None, p) for p in exclude_group_ids],
                        ))
            else:
                set_final = final

        query.set_final(set_final)
        if condition_to_add:
            query.add_conditions([condition_to_add])
Пример #4
0
 def do_post_processing(
         self,
         project_ids: Sequence[int],
         query: Query,
         request_settings: RequestSettings,
 ) -> None:
     if not request_settings.get_turbo():
         final, exclude_group_ids = get_projects_query_flags(project_ids)
         if not final and exclude_group_ids:
             # If the number of groups to exclude exceeds our limit, the query
             # should just use final instead of the exclusion set.
             max_group_ids_exclude = get_config('max_group_ids_exclude', settings.REPLACER_MAX_GROUP_IDS_TO_EXCLUDE)
             if len(exclude_group_ids) > max_group_ids_exclude:
                 query.set_final(True)
             else:
                 query.add_conditions([(['assumeNotNull', ['group_id']], 'NOT IN', exclude_group_ids)])
         else:
             query.set_final(final)
Пример #5
0
def format_query(query: FormattableQuery,
                 settings: RequestSettings) -> FormattedQuery:
    """
    Formats a Clickhouse Query from the AST representation into an
    intermediate structure that can either be serialized into a string
    (for clickhouse) or extracted as a sequence (for logging and tracing).

    This is the entry point for any type of query, whether simple or
    composite.

    TODO: Remove this method entirely and move the sampling logic
    into a query processor.
    """

    if isinstance(query, Query):
        if settings.get_turbo() and not query.get_from_clause().sampling_rate:
            query.set_from_clause(
                replace(
                    query.get_from_clause(),
                    sampling_rate=snuba_settings.TURBO_SAMPLE_RATE,
                ))
    return FormattedQuery(_format_query_content(query))
Пример #6
0
    def __init__(
        self,
        dataset: Dataset,
        query: Query,
        settings: RequestSettings,
    ) -> None:
        parsing_context = ParsingContext()

        aggregate_exprs = [
            column_expr(dataset, col, query, parsing_context, alias, agg)
            for (agg, col, alias) in query.get_aggregations()
        ]
        groupby = util.to_list(query.get_groupby())
        group_exprs = [
            column_expr(dataset, gb, query, parsing_context) for gb in groupby
        ]
        column_names = query.get_selected_columns() or []
        selected_cols = [
            column_expr(dataset, util.tuplify(colname), query, parsing_context)
            for colname in column_names
        ]
        select_clause = u"SELECT {}".format(
            ", ".join(group_exprs + aggregate_exprs + selected_cols))

        from_clause = u"FROM {}".format(query.get_data_source().format_from())

        if query.get_final():
            from_clause = u"{} FINAL".format(from_clause)

        if not query.get_data_source().supports_sample():
            sample_rate = None
        else:
            if query.get_sample():
                sample_rate = query.get_sample()
            elif settings.get_turbo():
                sample_rate = snuba_settings.TURBO_SAMPLE_RATE
            else:
                sample_rate = None

        if sample_rate:
            from_clause = u"{} SAMPLE {}".format(from_clause, sample_rate)

        join_clause = ""
        if query.get_arrayjoin():
            join_clause = u"ARRAY JOIN {}".format(query.get_arrayjoin())

        where_clause = ""
        if query.get_conditions():
            where_clause = u"WHERE {}".format(
                conditions_expr(dataset, query.get_conditions(), query,
                                parsing_context))

        prewhere_clause = ""
        if query.get_prewhere():
            prewhere_clause = u"PREWHERE {}".format(
                conditions_expr(dataset, query.get_prewhere(), query,
                                parsing_context))

        group_clause = ""
        if groupby:
            group_clause = "GROUP BY ({})".format(", ".join(
                column_expr(dataset, gb, query, parsing_context)
                for gb in groupby))
            if query.has_totals():
                group_clause = "{} WITH TOTALS".format(group_clause)

        having_clause = ""
        having_conditions = query.get_having()
        if having_conditions:
            assert groupby, "found HAVING clause with no GROUP BY"
            having_clause = u"HAVING {}".format(
                conditions_expr(dataset, having_conditions, query,
                                parsing_context))

        order_clause = ""
        if query.get_orderby():
            orderby = [
                column_expr(dataset, util.tuplify(ob), query, parsing_context)
                for ob in util.to_list(query.get_orderby())
            ]
            orderby = [
                u"{} {}".format(ob.lstrip("-"),
                                "DESC" if ob.startswith("-") else "ASC")
                for ob in orderby
            ]
            order_clause = u"ORDER BY {}".format(", ".join(orderby))

        limitby_clause = ""
        if query.get_limitby() is not None:
            limitby_clause = "LIMIT {} BY {}".format(*query.get_limitby())

        limit_clause = ""
        if query.get_limit() is not None:
            limit_clause = "LIMIT {}, {}".format(query.get_offset(),
                                                 query.get_limit())

        self.__formatted_query = " ".join([
            c for c in [
                select_clause,
                from_clause,
                join_clause,
                prewhere_clause,
                where_clause,
                group_clause,
                having_clause,
                order_clause,
                limitby_clause,
                limit_clause,
            ] if c
        ])
Пример #7
0
    def __init__(
        self,
        dataset: Dataset,
        query: Query,
        settings: RequestSettings,
        prewhere_conditions: Sequence[str],
    ) -> None:
        parsing_context = ParsingContext()

        aggregate_exprs = [
            column_expr(dataset, col, query, parsing_context, alias, agg)
            for (agg, col, alias) in query.get_aggregations()
        ]
        groupby = util.to_list(query.get_groupby())
        group_exprs = [
            column_expr(dataset, gb, query, parsing_context) for gb in groupby
        ]
        column_names = query.get_selected_columns() or []
        selected_cols = [
            column_expr(dataset, util.tuplify(colname), query, parsing_context)
            for colname in column_names
        ]
        select_clause = u'SELECT {}'.format(
            ', '.join(group_exprs + aggregate_exprs + selected_cols))

        from_clause = u'FROM {}'.format(query.get_data_source().format_from())

        if query.get_final():
            from_clause = u'{} FINAL'.format(from_clause)

        if query.get_sample():
            sample_rate = query.get_sample()
        elif settings.get_turbo():
            sample_rate = snuba_settings.TURBO_SAMPLE_RATE
        else:
            sample_rate = None

        if sample_rate:
            from_clause = u'{} SAMPLE {}'.format(from_clause, sample_rate)

        join_clause = ''
        if query.get_arrayjoin():
            join_clause = u'ARRAY JOIN {}'.format(query.get_arrayjoin())

        where_clause = ''
        if query.get_conditions():
            where_clause = u'WHERE {}'.format(
                conditions_expr(dataset, query.get_conditions(), query,
                                parsing_context))

        prewhere_clause = ''
        if prewhere_conditions:
            prewhere_clause = u'PREWHERE {}'.format(
                conditions_expr(dataset, prewhere_conditions, query,
                                parsing_context))

        group_clause = ''
        if groupby:
            group_clause = 'GROUP BY ({})'.format(', '.join(
                column_expr(dataset, gb, query, parsing_context)
                for gb in groupby))
            if query.has_totals():
                group_clause = '{} WITH TOTALS'.format(group_clause)

        having_clause = ''
        having_conditions = query.get_having()
        if having_conditions:
            assert groupby, 'found HAVING clause with no GROUP BY'
            having_clause = u'HAVING {}'.format(
                conditions_expr(dataset, having_conditions, query,
                                parsing_context))

        order_clause = ''
        if query.get_orderby():
            orderby = [
                column_expr(dataset, util.tuplify(ob), query, parsing_context)
                for ob in util.to_list(query.get_orderby())
            ]
            orderby = [
                u'{} {}'.format(ob.lstrip('-'),
                                'DESC' if ob.startswith('-') else 'ASC')
                for ob in orderby
            ]
            order_clause = u'ORDER BY {}'.format(', '.join(orderby))

        limitby_clause = ''
        if query.get_limitby() is not None:
            limitby_clause = 'LIMIT {} BY {}'.format(*query.get_limitby())

        limit_clause = ''
        if query.get_limit() is not None:
            limit_clause = 'LIMIT {}, {}'.format(query.get_offset(),
                                                 query.get_limit())

        self.__formatted_query = ' '.join([
            c for c in [
                select_clause, from_clause, join_clause, prewhere_clause,
                where_clause, group_clause, having_clause, order_clause,
                limitby_clause, limit_clause
            ] if c
        ])
Пример #8
0
def callback_func(
    storage: str,
    query: Query,
    request_settings: RequestSettings,
    referrer: str,
    results: List[Result[QueryResult]],
) -> None:
    cache_hit = False
    is_duplicate = False

    # Captures if any of the queries involved was a cache hit or duplicate, as cache
    # hits may a cause of inconsistency between results.
    # Doesn't attempt to distinguish between all of the specific scenarios (one or both
    # queries, or splits of those queries could have hit the cache).
    if any([result.result.extra["stats"].get("cache_hit", 0) for result in results]):
        cache_hit = True
    elif any(
        [result.result.extra["stats"].get("is_duplicate", 0) for result in results]
    ):
        is_duplicate = True

    consistent = request_settings.get_consistent()

    if not results:
        metrics.increment(
            "query_result",
            tags={"storage": storage, "match": "empty", "referrer": referrer},
        )
        return

    primary_result = results.pop(0)
    primary_result_data = primary_result.result.result["data"]

    for result in results:
        result_data = result.result.result["data"]

        metrics.timing(
            "diff_ms",
            round((result.execution_time - primary_result.execution_time) * 1000),
            tags={
                "referrer": referrer,
                "cache_hit": str(cache_hit),
                "is_duplicate": str(is_duplicate),
                "consistent": str(consistent),
            },
        )

        # Do not bother diffing the actual results of sampled queries
        if request_settings.get_turbo() or query.get_sample() not in [None, 1.0]:
            return

        if result_data == primary_result_data:
            metrics.increment(
                "query_result",
                tags={
                    "storage": storage,
                    "match": "true",
                    "referrer": referrer,
                    "cache_hit": str(cache_hit),
                    "is_duplicate": str(is_duplicate),
                    "consistent": str(consistent),
                },
            )
        else:
            # Do not log cache hits to Sentry as it creates too much noise
            if cache_hit:
                continue

            reason = assign_reason_category(result_data, primary_result_data, referrer)

            metrics.increment(
                "query_result",
                tags={
                    "storage": storage,
                    "match": "false",
                    "referrer": referrer,
                    "reason": reason,
                    "cache_hit": str(cache_hit),
                    "is_duplicate": str(is_duplicate),
                    "consistent": str(consistent),
                },
            )

            if len(result_data) != len(primary_result_data):
                sentry_sdk.capture_message(
                    f"Non matching {storage} result - different length",
                    level="warning",
                    tags={
                        "referrer": referrer,
                        "storage": storage,
                        "reason": reason,
                        "cache_hit": str(cache_hit),
                        "is_duplicate": str(is_duplicate),
                        "consistent": str(consistent),
                    },
                    extras={
                        "query": format_query(query),
                        "primary_result": len(primary_result_data),
                        "other_result": len(result_data),
                    },
                )

                break

            # Avoid sending too much data to Sentry - just one row for now
            for idx in range(len(result_data)):
                if result_data[idx] != primary_result_data[idx]:
                    sentry_sdk.capture_message(
                        "Non matching result - different result",
                        level="warning",
                        tags={
                            "referrer": referrer,
                            "storage": storage,
                            "reason": reason,
                            "cache_hit": str(cache_hit),
                            "is_duplicate": str(is_duplicate),
                            "consistent": str(consistent),
                        },
                        extras={
                            "query": format_query(query),
                            "primary_result": primary_result_data[idx],
                            "other_result": result_data[idx],
                        },
                    )

                    break