def __backwards_migrations(
         self, table_name: str) -> Sequence[operations.Operation]:
     status_type = Enum([("success", 0), ("error", 1), ("rate-limited", 2)])
     return [
         operations.ModifyColumn(
             StorageSetKey.QUERYLOG,
             table_name,
             Column("status", status_type),
         ),
         operations.ModifyColumn(
             StorageSetKey.QUERYLOG,
             table_name,
             Column("clickhouse_queries.status", Array(status_type)),
         ),
     ]
Beispiel #2
0
from typing import Sequence

from snuba.clickhouse.columns import Column, DateTime, Enum, String, UInt
from snuba.clusters.storage_sets import StorageSetKey
from snuba.migrations import migration, operations
from snuba.migrations.columns import MigrationModifiers as Modifiers
from snuba.migrations.context import Context
from snuba.migrations.status import Status
from snuba.migrations.table_engines import Distributed, ReplacingMergeTree

columns: Sequence[Column[Modifiers]] = [
    Column("group", String()),
    Column("migration_id", String()),
    Column("timestamp", DateTime()),
    Column("status", Enum([("completed", 0), ("in_progress", 1), ("not_started", 2)]),),
    Column("version", UInt(64, Modifiers(default="1"))),
]


class Migration(migration.Migration):
    """
    This migration extends Migration instead of MultiStepMigration since it is
    responsible for bootstrapping the migration system itself. It skips setting
    the in progress status in the forwards method and the not started status in
    the backwards method. Since the migration table doesn't exist yet, we can't
    write any statuses until this migration is completed.
    """

    blocking = False

    def __forwards_local(self) -> Sequence[operations.Operation]:
Beispiel #3
0
 def visit_enum(self, node: Node, visited_children: Iterable[Any]) -> ColumnType:
     _enum, _size, _open, _sp, pairs, _sp, _close = visited_children
     return Enum(pairs)
Beispiel #4
0
from typing import Sequence

from snuba.clickhouse.columns import Column, DateTime, Enum, String, UInt
from snuba.clusters.storage_sets import StorageSetKey
from snuba.migrations import migration, operations
from snuba.migrations.columns import MigrationModifiers as Modifiers
from snuba.migrations.table_engines import Distributed, ReplacingMergeTree

columns: Sequence[Column[Modifiers]] = [
    Column("group", String()),
    Column("migration_id", String()),
    Column("timestamp", DateTime()),
    Column(
        "status",
        Enum([("completed", 0), ("in_progress", 1), ("not_started", 2)]),
    ),
    Column("version", UInt(64, Modifiers(default="1"))),
]


class Migration(migration.ClickhouseNodeMigration):
    """
    This migration is the only one that sets is_first_migration = True since it is
    responsible for bootstrapping the migration system itself. It skips setting
    the in progress status in the forwards method and the not started status in
    the backwards method. Since the migration table doesn't exist yet, we can't
    write any statuses until this migration is completed.
    """

    blocking = False
Beispiel #5
0
    Array,
    Column,
    DateTime,
    Enum,
    Float,
    LowCardinality,
    Nested,
    Nullable,
    String,
    UInt,
    UUID,
)
from snuba.clusters.storage_sets import StorageSetKey
from snuba.migrations import migration, operations, table_engines

status_type = Enum([("success", 0), ("error", 1), ("rate-limited", 2)])

columns = [
    Column("request_id", UUID()),
    Column("request_body", String()),
    Column("referrer", LowCardinality(String())),
    Column("dataset", LowCardinality(String())),
    Column("projects", Array(UInt(64))),
    Column("organization", Nullable(UInt(64))),
    Column("timestamp", DateTime()),
    Column("duration_ms", UInt(32)),
    Column("status", status_type),
    Column(
        "clickhouse_queries",
        Nested([
            Column("sql", String()),
Beispiel #6
0
    Materialized,
    Nullable,
    String,
    UInt,
    UUID,
    WithCodecs,
    WithDefault,
)
from snuba.migrations.parse_schema import _get_column

test_data = [
    # Basic types
    (("Date", "", "", ""), Date()),
    (("DateTime", "", "", ""), DateTime()),
    (("Enum8('success' = 0, 'error' = 1)", "", "", ""),
     Enum([("success", 0), ("error", 1)])),
    (("FixedString(32)", "", "", ""), FixedString(32)),
    (("Float32", "", "", ""), Float(32)),
    (("IPv4", "", "", ""), IPv4()),
    (("IPv6", "", "", ""), IPv6()),
    (("String", "", "", ""), String()),
    (("UInt32", "", "", ""), UInt(32)),
    (("UUID", "", "", ""), UUID()),
    # Aggregate functions
    (("AggregateFunction(uniq, UInt8)", "", "", ""),
     AggregateFunction("uniq", UInt(8))),
    (("AggregateFunction(countIf, UUID, UInt8)", "", "", ""),
     AggregateFunction("countIf", UUID(), UInt(8))),
    (("AggregateFunction(quantileIf(0.5, 0.9), UInt32, UInt8)", "", "", ""),
     AggregateFunction("quantileIf(0.5, 0.9)", UInt(32), UInt(8))),
    # Array
Beispiel #7
0
    pytest.param(
        cast(
            Column[Modifier],
            AggregateFunction("uniqIf", [UInt(8), UInt(32)],
                              Modifier(nullable=True)),
        ),
        AggregateFunction("uniqIf", [UInt(8), UInt(32)]),
        cast(
            Column[Modifier],
            AggregateFunction("uniqIf", [UInt(8)], Modifier(nullable=True)),
        ),
        "Nullable(AggregateFunction(uniqIf, UInt8, UInt32))",
        id="aggregated",
    ),
    pytest.param(
        Enum([("a", 1), ("b", 2)], Modifier(nullable=True)),
        Enum([("a", 1), ("b", 2)]),
        Enum([("a", 1), ("b", 2)]),
        "Nullable(Enum('a' = 1, 'b' = 2))",
        id="enums",
    ),
]


@pytest.mark.parametrize("col_type, raw_type, different_type, for_schema",
                         TEST_CASES)
def test_methods(
    col_type: ColumnType[Modifier],
    raw_type: ColumnType[Modifier],
    different_type: ColumnType[Modifier],
    for_schema: str,