Esempio n. 1
0
    def drop(*, dataset: Dataset):
        for storage in dataset.get_all_storages():
            for statement in storage.get_schemas().get_drop_statements():
                clickhouse_rw.execute(statement.statement)

        ensure_table_exists(dataset, force=True)
        redis_client.flushdb()
        return ("ok", 200, {"Content-Type": "text/plain"})
Esempio n. 2
0
def run(conn: Client, dataset: Dataset) -> None:
    schemas: MutableSequence[Schema] = []

    writable_storage = dataset.get_writable_storage()
    if writable_storage:
        writer = writable_storage.get_table_writer()
        schemas.append(writer.get_schema())
    for storage in dataset.get_all_storages():
        schemas.append(storage.get_schemas().get_read_schema())

    for schema in schemas:
        _run_schema(conn, schema)
Esempio n. 3
0
def truncate_dataset(dataset: Dataset) -> None:
    for storage in dataset.get_all_storages():
        cluster = storage.get_cluster()
        clickhouse = cluster.get_query_connection(ClickhouseClientSettings.MIGRATE)
        database = cluster.get_database()

        schema = storage.get_schema()

        if not isinstance(schema, TableSchema):
            return

        table = schema.get_local_table_name()

        clickhouse.execute(f"TRUNCATE TABLE IF EXISTS {database}.{table}")
Esempio n. 4
0
    def ensure_table_exists(dataset: Dataset, force: bool = False) -> None:
        if not force and _ensured.get(dataset, False):
            return

        assert local_dataset_mode(), "Cannot create table in distributed mode"

        from snuba.migrations import migrate

        # We cannot build distributed tables this way. So this only works in local
        # mode.
        for storage in dataset.get_all_storages():
            for statement in storage.get_schemas().get_create_statements():
                clickhouse_rw.execute(statement.statement)

        migrate.run(clickhouse_rw, dataset)

        _ensured[dataset] = True