def do_query(query: ClickhouseQuery, query_settings: QuerySettings) -> QueryResult: nonlocal query_run_count query_run_count += 1 if query_run_count == 1: return QueryResult( result={ "data": [ { "event_id": "a", "project_id": "1", "timestamp": " 2019-10-01 22:33:42", }, { "event_id": "a", "project_id": "1", "timestamp": " 2019-10-01 22:44:42", }, ] }, extra={}, ) else: assert query.get_limit() == 2 return QueryResult({}, {})
def __init__(self, query: Query, settings: RequestSettings,) -> None: # Clickhouse query structure # Referencing them here directly since it makes it easier # to process this query independently from the Clickhouse Query # and there is no risk in doing so since they are immutable. self.__selected_columns = query.get_selected_columns_from_ast() self.__condition = query.get_condition_from_ast() self.__groupby = query.get_groupby_from_ast() self.__having = query.get_having_from_ast() self.__orderby = query.get_orderby_from_ast() self.__data_source = query.get_data_source() self.__arrayjoin = query.get_arrayjoin_from_ast() self.__granularity = query.get_granularity() self.__limit = query.get_limit() self.__limitby = query.get_limitby() self.__offset = query.get_offset() if self.__having: assert self.__groupby, "found HAVING clause with no GROUP BY" self.__turbo = settings.get_turbo() self.__final = query.get_final() self.__sample = query.get_sample() self.__hastotals = query.has_totals() self.__prewhere = query.get_prewhere_ast() self.__settings = settings self.__sql_data_list: Optional[Sequence[Tuple[str, str]]] = None self.__formatted_query: Optional[str] = None self.__sql_data: Optional[Mapping[str, str]] = None
def test_query_parameters() -> None: query = Query( Table("my_table", ColumnSet([])), limitby=(100, "environment"), limit=100, offset=50, totals=True, granularity=60, ) assert query.get_limitby() == (100, "environment") assert query.get_limit() == 100 assert query.get_offset() == 50 assert query.has_totals() is True assert query.get_granularity() == 60 assert query.get_from_clause().table_name == "my_table"
def execute( self, query: Query, request_settings: RequestSettings, runner: SplitQueryRunner, ) -> Optional[QueryResult]: """ If a query is: - ORDER BY timestamp DESC - has no grouping - has an offset/limit - has a large time range We know we have to reverse-sort the entire set of rows to return the small chunk at the end of the time range, so optimistically split the time range into smaller increments, and start with the last one, so that we can potentially avoid querying the entire range. """ limit = query.get_limit() if limit is None or query.get_groupby(): return None if query.get_offset() >= 1000: return None orderby = query.get_orderby() if not orderby or orderby[0] != f"-{self.__timestamp_col}": return None conditions = query.get_conditions() or [] from_date_str = next( (condition[2] for condition in conditions if _identify_condition(condition, self.__timestamp_col, ">=")), None, ) to_date_str = next( (condition[2] for condition in conditions if _identify_condition(condition, self.__timestamp_col, "<")), None, ) from_date_ast, to_date_ast = get_time_range(query, self.__timestamp_col) if not from_date_str or not to_date_str: return None date_align, split_step = state.get_configs([("date_align_seconds", 1), ("split_step", 3600) ] # default 1 hour ) to_date = util.parse_datetime(to_date_str, date_align) from_date = util.parse_datetime(from_date_str, date_align) if from_date != from_date_ast: logger.warning( "Mismatch in start date on time splitter.", extra={ "ast": str(from_date_ast), "legacy": str(from_date) }, exc_info=True, ) metrics.increment("mismatch.ast_from_date") remaining_offset = query.get_offset() overall_result = None split_end = to_date split_start = max(split_end - timedelta(seconds=split_step), from_date) total_results = 0 while split_start < split_end and total_results < limit: # We need to make a copy to use during the query execution because we replace # the start-end conditions on the query at each iteration of this loop. split_query = copy.deepcopy(query) _replace_condition(split_query, self.__timestamp_col, ">=", split_start.isoformat()) _replace_ast_condition(split_query, self.__timestamp_col, ">=", LiteralExpr(None, split_start)) _replace_condition(split_query, self.__timestamp_col, "<", split_end.isoformat()) _replace_ast_condition(split_query, self.__timestamp_col, "<", LiteralExpr(None, split_end)) # Because its paged, we have to ask for (limit+offset) results # and set offset=0 so we can then trim them ourselves. split_query.set_offset(0) split_query.set_limit(limit - total_results + remaining_offset) # At every iteration we only append the "data" key from the results returned by # the runner. The "extra" key is only populated at the first iteration of the # loop and never changed. result = runner(split_query, request_settings) if overall_result is None: overall_result = result else: overall_result.result["data"].extend(result.result["data"]) if remaining_offset > 0 and len(overall_result.result["data"]) > 0: to_trim = min(remaining_offset, len(overall_result.result["data"])) overall_result.result["data"] = overall_result.result["data"][ to_trim:] remaining_offset -= to_trim total_results = len(overall_result.result["data"]) if total_results < limit: if len(result.result["data"]) == 0: # If we got nothing from the last query, expand the range by a static factor split_step = split_step * STEP_GROWTH else: # If we got some results but not all of them, estimate how big the time # range should be for the next query based on how many results we got for # our last query and its time range, and how many we have left to fetch. remaining = limit - total_results split_step = split_step * math.ceil( remaining / float(len(result.result["data"]))) # Set the start and end of the next query based on the new range. split_end = split_start try: split_start = max( split_end - timedelta(seconds=split_step), from_date) except OverflowError: split_start = from_date return overall_result
def execute( self, query: Query, request_settings: RequestSettings, runner: SplitQueryRunner, ) -> Optional[QueryResult]: """ Split query in 2 steps if a large number of columns is being selected. - First query only selects event_id, project_id and timestamp. - Second query selects all fields for only those events. - Shrink the date range. """ limit = query.get_limit() if (limit is None or limit == 0 or query.get_groupby() or query.get_aggregations() or not query.get_selected_columns()): return None if limit > settings.COLUMN_SPLIT_MAX_LIMIT: metrics.increment("column_splitter.query_above_limit") return None # Do not split if there is already a = or IN condition on an ID column id_column_matcher = FunctionCall( Or([String(ConditionFunctions.EQ), String(ConditionFunctions.IN)]), ( Column(None, String(self.__id_column)), AnyExpression(), ), ) for expr in query.get_condition_from_ast() or []: match = id_column_matcher.match(expr) if match: return None # We need to count the number of table/column name pairs # not the number of distinct Column objects in the query # so to avoid counting aliased columns multiple times. total_columns = {(col.table_name, col.column_name) for col in query.get_all_ast_referenced_columns()} minimal_query = copy.deepcopy(query) minimal_query.set_selected_columns( [self.__id_column, self.__project_column, self.__timestamp_column]) # TODO: provide the table alias name to this splitter if we ever use it # in joins. minimal_query.set_ast_selected_columns([ SelectedExpression(self.__id_column, ColumnExpr(None, None, self.__id_column)), SelectedExpression(self.__project_column, ColumnExpr(None, None, self.__project_column)), SelectedExpression( self.__timestamp_column, ColumnExpr(None, None, self.__timestamp_column), ), ]) for exp in minimal_query.get_all_expressions(): if exp.alias in ( self.__id_column, self.__project_column, self.__timestamp_column, ) and not (isinstance(exp, ColumnExpr) and exp.column_name == exp.alias): logger.warning( "Potential alias shadowing due to column splitter", extra={"expression": exp}, exc_info=True, ) minimal_columns = { (col.table_name, col.column_name) for col in minimal_query.get_all_ast_referenced_columns() } if len(total_columns) <= len(minimal_columns): return None # Ensures the AST minimal query is actually runnable on its own. if not minimal_query.validate_aliases(): return None legacy_references = set(minimal_query.get_all_referenced_columns()) ast_column_names = { c.column_name for c in minimal_query.get_all_ast_referenced_columns() } # Ensures the legacy minimal query (which does not expand alias references) # does not contain alias references we removed when creating minimal_query. if legacy_references - ast_column_names: metrics.increment("columns.skip_invalid_legacy_query") return None result = runner(minimal_query, request_settings) del minimal_query if not result.result["data"]: return None # Making a copy just in case runner returned None (which would drive the execution # strategy to ignore the result of this splitter and try the next one). query = copy.deepcopy(query) event_ids = list( set([event[self.__id_column] for event in result.result["data"]])) if len(event_ids) > settings.COLUMN_SPLIT_MAX_RESULTS: # We may be runing a query that is beyond clickhouse maximum query size, # so we cowardly abandon. metrics.increment( "column_splitter.intermediate_results_beyond_limit") return None query.add_conditions([(self.__id_column, "IN", event_ids)]) query.add_condition_to_ast( in_condition( None, ColumnExpr(None, None, self.__id_column), [LiteralExpr(None, e_id) for e_id in event_ids], )) query.set_offset(0) # TODO: This is technically wrong. Event ids are unique per project, not globally. # So, if the minimal query only returned the same event_id from two projects, we # would be underestimating the limit here. query.set_limit(len(event_ids)) project_ids = list( set([ event[self.__project_column] for event in result.result["data"] ])) _replace_condition( query, self.__project_column, "IN", project_ids, ) _replace_ast_condition( query, self.__project_column, "IN", literals_tuple(None, [LiteralExpr(None, p_id) for p_id in project_ids]), ) timestamps = [ event[self.__timestamp_column] for event in result.result["data"] ] _replace_condition( query, self.__timestamp_column, ">=", util.parse_datetime(min(timestamps)).isoformat(), ) _replace_ast_condition( query, self.__timestamp_column, ">=", LiteralExpr(None, util.parse_datetime(min(timestamps))), ) # We add 1 second since this gets translated to ('timestamp', '<', to_date) # and events are stored with a granularity of 1 second. _replace_condition( query, self.__timestamp_column, "<", (util.parse_datetime(max(timestamps)) + timedelta(seconds=1)).isoformat(), ) _replace_ast_condition( query, self.__timestamp_column, "<", LiteralExpr( None, (util.parse_datetime(max(timestamps)) + timedelta(seconds=1)), ), ) return runner(query, request_settings)
def execute( self, query: Query, request_settings: RequestSettings, runner: SplitQueryRunner, ) -> Optional[QueryResult]: """ If a query is: - ORDER BY timestamp DESC - has no grouping - has an offset/limit - has a large time range We know we have to reverse-sort the entire set of rows to return the small chunk at the end of the time range, so optimistically split the time range into smaller increments, and start with the last one, so that we can potentially avoid querying the entire range. """ limit = query.get_limit() if limit is None or query.get_groupby_from_ast(): return None if query.get_offset() >= 1000: return None orderby = query.get_orderby_from_ast() if (not orderby or orderby[0].direction != OrderByDirection.DESC or not isinstance(orderby[0].expression, ColumnExpr) or not orderby[0].expression.column_name == self.__timestamp_col): return None from_date_ast, to_date_ast = get_time_range(query, self.__timestamp_col) if from_date_ast is None or to_date_ast is None: return None date_align, split_step = state.get_configs([("date_align_seconds", 1), ("split_step", 3600) ] # default 1 hour ) assert isinstance(split_step, int) remaining_offset = query.get_offset() overall_result: Optional[QueryResult] = None split_end = to_date_ast split_start = max(split_end - timedelta(seconds=split_step), from_date_ast) total_results = 0 while split_start < split_end and total_results < limit: # We need to make a copy to use during the query execution because we replace # the start-end conditions on the query at each iteration of this loop. split_query = copy.deepcopy(query) _replace_ast_condition(split_query, self.__timestamp_col, ">=", LiteralExpr(None, split_start)) _replace_ast_condition(split_query, self.__timestamp_col, "<", LiteralExpr(None, split_end)) # Because its paged, we have to ask for (limit+offset) results # and set offset=0 so we can then trim them ourselves. split_query.set_offset(0) split_query.set_limit(limit - total_results + remaining_offset) # At every iteration we only append the "data" key from the results returned by # the runner. The "extra" key is only populated at the first iteration of the # loop and never changed. result = runner(split_query, request_settings) if overall_result is None: overall_result = result else: overall_result.result["data"].extend(result.result["data"]) if remaining_offset > 0 and len(overall_result.result["data"]) > 0: to_trim = min(remaining_offset, len(overall_result.result["data"])) overall_result.result["data"] = overall_result.result["data"][ to_trim:] remaining_offset -= to_trim total_results = len(overall_result.result["data"]) if total_results < limit: if len(result.result["data"]) == 0: # If we got nothing from the last query, expand the range by a static factor split_step = split_step * STEP_GROWTH else: # If we got some results but not all of them, estimate how big the time # range should be for the next query based on how many results we got for # our last query and its time range, and how many we have left to fetch. remaining = limit - total_results split_step = split_step * math.ceil( remaining / float(len(result.result["data"]))) # Set the start and end of the next query based on the new range. split_end = split_start try: split_start = max( split_end - timedelta(seconds=split_step), from_date_ast) except OverflowError: split_start = from_date_ast return overall_result
def execute( self, query: Query, query_settings: QuerySettings, runner: SplitQueryRunner, ) -> Optional[QueryResult]: """ Split query in 2 steps if a large number of columns is being selected. - First query only selects event_id, project_id and timestamp. - Second query selects all fields for only those events. - Shrink the date range. """ limit = query.get_limit() if (limit is None or limit == 0 or query.get_groupby() or not query.get_selected_columns()): return None if limit > settings.COLUMN_SPLIT_MAX_LIMIT: metrics.increment("column_splitter.query_above_limit") return None # Do not split if there is already a = or IN condition on an ID column id_column_matcher = FunctionCall( Or([String(ConditionFunctions.EQ), String(ConditionFunctions.IN)]), ( Column(None, String(self.__id_column)), AnyExpression(), ), ) for expr in query.get_condition() or []: match = id_column_matcher.match(expr) if match: return None # We need to count the number of table/column name pairs # not the number of distinct Column objects in the query # so to avoid counting aliased columns multiple times. selected_columns = { (col.table_name, col.column_name) for col in query.get_columns_referenced_in_select() } if len(selected_columns) < settings.COLUMN_SPLIT_MIN_COLS: metrics.increment("column_splitter.main_query_min_threshold") return None minimal_query = copy.deepcopy(query) # TODO: provide the table alias name to this splitter if we ever use it # in joins. minimal_query.set_ast_selected_columns([ SelectedExpression( self.__id_column, ColumnExpr(self.__id_column, None, self.__id_column), ), SelectedExpression( self.__project_column, ColumnExpr(self.__project_column, None, self.__project_column), ), SelectedExpression( self.__timestamp_column, ColumnExpr(self.__timestamp_column, None, self.__timestamp_column), ), ]) for exp in minimal_query.get_all_expressions(): if exp.alias in ( self.__id_column, self.__project_column, self.__timestamp_column, ) and not (isinstance(exp, ColumnExpr) and exp.column_name == exp.alias): logger.warning( "Potential alias shadowing due to column splitter", extra={"expression": exp}, exc_info=True, ) # Ensures the AST minimal query is actually runnable on its own. if not minimal_query.validate_aliases(): return None # There is a Clickhouse bug where if functions in the ORDER BY clause are not in the SELECT, # they fail on distributed tables. For that specific case, skip the query splitter. for orderby in minimal_query.get_orderby(): if isinstance(orderby.expression, (FunctionCallExpr, CurriedFunctionCallExpr)): metrics.increment("column_splitter.orderby_has_a_function") return None result = runner(minimal_query, query_settings) del minimal_query if not result.result["data"]: metrics.increment("column_splitter.no_data_from_minimal_query") return None # Making a copy just in case runner returned None (which would drive the execution # strategy to ignore the result of this splitter and try the next one). query = copy.deepcopy(query) event_ids = list( set([event[self.__id_column] for event in result.result["data"]])) if len(event_ids) > settings.COLUMN_SPLIT_MAX_RESULTS: # We may be runing a query that is beyond clickhouse maximum query size, # so we cowardly abandon. metrics.increment( "column_splitter.intermediate_results_beyond_limit") return None query.add_condition_to_ast( in_condition( ColumnExpr(None, None, self.__id_column), [LiteralExpr(None, e_id) for e_id in event_ids], )) query.set_offset(0) query.set_limit(len(result.result["data"])) project_ids = list( set([ event[self.__project_column] for event in result.result["data"] ])) _replace_ast_condition( query, self.__project_column, "IN", literals_tuple(None, [LiteralExpr(None, p_id) for p_id in project_ids]), ) timestamps = [ event[self.__timestamp_column] for event in result.result["data"] ] _replace_ast_condition( query, self.__timestamp_column, ">=", LiteralExpr(None, util.parse_datetime(min(timestamps))), ) # We add 1 second since this gets translated to ('timestamp', '<', to_date) # and events are stored with a granularity of 1 second. _replace_ast_condition( query, self.__timestamp_column, "<", LiteralExpr( None, (util.parse_datetime(max(timestamps)) + timedelta(seconds=1)), ), ) return runner(query, query_settings)