Пример #1
0
def parse_and_process(query_body: MutableMapping[str, Any]) -> ClickhouseQuery:
    dataset = get_dataset("transactions")
    query = parse_query(query_body, dataset)
    request = Request("a", query, HTTPRequestSettings(), {}, "r")
    for p in dataset.get_query_processors():
        p.process_query(query, request.settings)
    plan = dataset.get_query_plan_builder().build_plan(request)

    ArrayJoinKeyValueOptimizer("tags").process_query(plan.query,
                                                     request.settings)
    return plan.query
Пример #2
0
def parse_and_process(query_body: MutableMapping[str, Any]) -> ClickhouseQuery:
    dataset = get_dataset("transactions")
    query = parse_query(query_body, dataset)
    request = Request("a", query_body, query, HTTPRequestSettings(), "r")
    entity = get_entity(query.get_from_clause().key)
    for p in entity.get_query_processors():
        p.process_query(query, request.settings)

    ArrayJoinKeyValueOptimizer("tags").process_query(query, request.settings)

    query_plan = SingleStorageQueryPlanBuilder(
        storage=entity.get_writable_storage(),
        mappers=transaction_translator,
    ).build_and_rank_plans(query, request.settings)[0]

    return query_plan.query
Пример #3
0
    # during create statement
    # (https://github.com/ClickHouse/ClickHouse/issues/12586), so the
    # materialization is added with a migration.
    skipped_cols_on_creation={"_tags_hash_map"},
)


storage = WritableTableStorage(
    storage_key=StorageKey.TRANSACTIONS,
    storage_set_key=StorageSetKey.TRANSACTIONS,
    schema=schema,
    query_processors=[
        NestedFieldConditionOptimizer(
            "contexts",
            "_contexts_flattened",
            {"start_ts", "finish_ts"},
            BEGINNING_OF_TIME,
        ),
        MappingOptimizer("tags", "_tags_hash_map", "tags_hash_map_enabled"),
        TransactionColumnProcessor(),
        ArrayJoinKeyValueOptimizer("tags"),
        ArrayJoinKeyValueOptimizer("measurements"),
        PrewhereProcessor(),
    ],
    stream_loader=KafkaStreamLoader(
        processor=TransactionsMessageProcessor(), default_topic="events",
    ),
    query_splitters=[TimeSplitQueryStrategy(timestamp_col="finish_ts")],
    writer_options={"insert_allow_materialized_columns": 1},
)
Пример #4
0
 storage_set_key=StorageSetKey.DISCOVER,
 schema=schema,
 query_processors=[
     MappingColumnPromoter(
         mapping_specs={
             "tags": {
                 "environment": "environment",
                 "sentry:release": "release",
                 "sentry:dist": "dist",
                 "sentry:user": "******",
             },
             "contexts": {"trace.trace_id": "trace_id"},
         }
     ),
     MappingOptimizer("tags", "_tags_hash_map", "tags_hash_map_enabled"),
     ArrayJoinKeyValueOptimizer("tags"),
     UUIDColumnProcessor(set(["event_id", "trace_id"])),
     EventsBooleanContextsProcessor(),
     PrewhereProcessor(
         [
             "event_id",
             "release",
             "message",
             "transaction_name",
             "environment",
             "project_id",
         ]
     ),
 ],
 query_splitters=[
     ColumnSplitQueryStrategy(
Пример #5
0
         mapping_specs={
             "tags": {
                 "environment": "environment",
                 "sentry:release": "release",
                 "sentry:dist": "dist",
                 "sentry:user": "******",
             },
             "contexts": {"trace.trace_id": "trace_id", "trace.span_id": "span_id"},
         }
     ),
     UUIDColumnProcessor(set(["event_id", "trace_id"])),
     HexIntColumnProcessor({"span_id"}),
     EventsBooleanContextsProcessor(),
     MappingOptimizer("tags", "_tags_hash_map", "tags_hash_map_enabled"),
     EmptyTagConditionProcessor(),
     ArrayJoinKeyValueOptimizer("tags"),
     ArrayJoinKeyValueOptimizer("measurements"),
     ArrayJoinKeyValueOptimizer("span_op_breakdowns"),
     PrewhereProcessor(
         [
             "event_id",
             "trace_id",
             "span_id",
             "transaction_name",
             "transaction",
             "title",
         ]
     ),
     TableRateLimit(),
 ],
 stream_loader=build_kafka_stream_loader_from_settings(
Пример #6
0
    Column("_tags_hash", Array(UInt(64), SchemaModifiers(readonly=True))),
]

sets_storage = ReadableTableStorage(
    storage_key=StorageKey.METRICS_SETS,
    storage_set_key=StorageSetKey.METRICS,
    schema=TableSchema(
        local_table_name="metrics_sets_local",
        dist_table_name="metrics_sets_dist",
        storage_set_key=StorageSetKey.METRICS,
        columns=ColumnSet([
            *aggregated_columns,
            Column("value", AggregateFunction("uniqCombined64", [UInt(64)])),
        ]),
    ),
    query_processors=[ArrayJoinKeyValueOptimizer("tags")],
)

counters_storage = ReadableTableStorage(
    storage_key=StorageKey.METRICS_COUNTERS,
    storage_set_key=StorageSetKey.METRICS,
    schema=TableSchema(
        local_table_name="metrics_counters_local",
        dist_table_name="metrics_counters_dist",
        storage_set_key=StorageSetKey.METRICS,
        columns=ColumnSet([
            *aggregated_columns,
            Column("value", AggregateFunction("sum", [Float(64)])),
        ]),
    ),
    query_processors=[ArrayJoinKeyValueOptimizer("tags")],