Beispiel #1
0
def test_make_bulk_create_objects(
    db_keys: Dict[str, str], handler_type: str, expected_type: Any, tables: List[str]
):
    base_config = {"username": "******", "password": "******", "database": "baz"}
    config = {**base_config, **db_keys}
    config["handler_type"] = handler_type

    source, handlers = ds.make_bulk_create_objects(
        tables=tables, schema="bar", config=config
    )
    assert len(handlers) == len(tables)
    assert source.blobHandlerType == handler_type

    table_names = []
    for handler in handlers:
        table_names.append(handler.metadata.bodataTableName)
        assert isinstance(handler, ds.Handler)
        assert isinstance(handler.metadata, expected_type)

    for table in tables:
        assert (
            ds.make_immuta_table_name(
                table=table, schema="bar", handler_type=handler_type, user_prefix=""
            )
            in table_names
        )
Beispiel #2
0
def data_sources_bulk_enroll_iterator(
    client: "ImmutaClient",
    schema_table_mapping: Dict[str, List[Dict[str, str]]],
    schema_obj: Dict[str, str],
    config: Dict[str, Any],
) -> Iterator[Tuple[DataSource, List[Handler], SchemaEvolutionMetadata]]:

    LOGGER.info("Processing schema_prefix: %s", schema_obj["schema_prefix"])

    matches_prefix = partial(fnmatch.fnmatch, pat=schema_obj["schema_prefix"])

    for schema, tables in keyfilter(matches_prefix,
                                    schema_table_mapping).items():
        LOGGER.info("Bulk creating for all tables in schema %s", schema)
        if len(tables) == 0:
            LOGGER.warning("No tables found for schema: %s", schema)
            continue
        data_source, handlers, schema_evolution = make_bulk_create_objects(
            schema=schema,
            tables=[table["tableName"] for table in tables],
            config=config,
            user_prefix=config.get("user_prefix"),
            bodata_schema_name=schema_obj.get("query_engine_target_schema",
                                              schema),
            prefix_query_engine_names_with_schema=schema_obj.get(
                "prefix_query_engine_names_with_schema", False),
            prefix_query_engine_names_with_handler=schema_obj.get(
                "prefix_query_engine_names_with_handler", False),
        )
        yield data_source, handlers, schema_evolution
def test_make_bulk_create_objects(
    db_keys: Dict[str, str],
    handler_type: str,
    expected_type: Any,
    tables: List[str],
    query_engine_target_schema: str,
    prefix_query_engine_names_with_schema: bool,
    prefix_query_engine_names_with_handler: bool,
):
    base_config = {
        "username": "******",
        "password": "******",
        "database": "baz",
        "owner_profile_id": 0,
    }
    config = {**base_config, **db_keys}
    config["handler_type"] = handler_type

    source, handlers, schema_evolution = ds.make_bulk_create_objects(
        tables=tables,
        schema="bar",
        config=config,
        bodata_schema_name=query_engine_target_schema,
        prefix_query_engine_names_with_schema=
        prefix_query_engine_names_with_schema,
        prefix_query_engine_names_with_handler=
        prefix_query_engine_names_with_handler,
    )
    assert len(handlers) == len(tables)
    assert source.blobHandlerType == handler_type

    table_names = []
    for handler in handlers:
        table_names.append(handler.metadata.dataSourceName)
        assert handler.metadata.bodataSchemaName == query_engine_target_schema
        assert isinstance(handler, ds.Handler)
        assert isinstance(handler.metadata, expected_type)

    for table in tables:
        assert (ds.make_immuta_datasource_name(
            table=table,
            schema="bar",
            handler_type=handler_type,
            user_prefix="",
        ) in table_names)

    assert isinstance(schema_evolution, ds.SchemaEvolutionMetadata)